1 /*
   2  * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotArtifactFinder.hpp"
  26 #include "cds/aotClassLinker.hpp"
  27 #include "cds/aotCompressedPointers.hpp"
  28 #include "cds/aotLogging.hpp"
  29 #include "cds/aotMapLogger.hpp"
  30 #include "cds/aotMetaspace.hpp"
  31 #include "cds/archiveBuilder.hpp"
  32 #include "cds/archiveUtils.hpp"
  33 #include "cds/cdsConfig.hpp"
  34 #include "cds/cppVtables.hpp"
  35 #include "cds/dumpAllocStats.hpp"
  36 #include "cds/dynamicArchive.hpp"
  37 #include "cds/heapShared.hpp"
  38 #include "cds/regeneratedClasses.hpp"
  39 #include "classfile/classLoader.hpp"
  40 #include "classfile/classLoaderDataShared.hpp"
  41 #include "classfile/javaClasses.hpp"
  42 #include "classfile/symbolTable.hpp"
  43 #include "classfile/systemDictionaryShared.hpp"
  44 #include "classfile/vmClasses.hpp"
  45 #include "code/aotCodeCache.hpp"
  46 #include "interpreter/abstractInterpreter.hpp"
  47 #include "jvm.h"
  48 #include "logging/log.hpp"
  49 #include "memory/allStatic.hpp"
  50 #include "memory/memoryReserver.hpp"
  51 #include "memory/memRegion.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "oops/compressedKlass.inline.hpp"
  54 #include "oops/instanceKlass.hpp"
  55 #include "oops/methodCounters.hpp"
  56 #include "oops/methodData.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oopHandle.inline.hpp"
  60 #include "oops/trainingData.hpp"
  61 #include "runtime/arguments.hpp"
  62 #include "runtime/globals_extension.hpp"
  63 #include "runtime/javaThread.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/bitMap.inline.hpp"
  67 #include "utilities/formatBuffer.hpp"
  68 
  69 ArchiveBuilder* ArchiveBuilder::_current = nullptr;
  70 
  71 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
  72   char* newtop = ArchiveBuilder::current()->_ro_region.top();
  73   ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
  74 }
  75 
  76 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, mtClassShared) {
  77   _total_bytes = 0;
  78   _objs = new (mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
  79 }
  80 
  81 ArchiveBuilder::SourceObjList::~SourceObjList() {
  82   delete _objs;
  83 }
  84 
  85 void ArchiveBuilder::SourceObjList::append(SourceObjInfo* src_info) {
  86   // Save this source object for copying
  87   src_info->set_id(_objs->length());
  88   _objs->append(src_info);
  89 
  90   // Prepare for marking the pointers in this source object
  91   assert(is_aligned(_total_bytes, sizeof(address)), "must be");
  92   src_info->set_ptrmap_start(_total_bytes / sizeof(address));
  93   _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
  94   src_info->set_ptrmap_end(_total_bytes / sizeof(address));
  95 
  96   BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
  97   if (_ptrmap.size() <= bitmap_size_needed) {
  98     _ptrmap.resize((bitmap_size_needed + 1) * 2);
  99   }
 100 }
 101 
 102 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
 103   // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
 104   // so that we can copy/relocate it later.
 105   src_info->set_has_embedded_pointer();
 106   address src_obj = src_info->source_addr();
 107   address* field_addr = ref->addr();
 108   assert(src_info->ptrmap_start() < _total_bytes, "sanity");
 109   assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
 110   assert(*field_addr != nullptr, "should have checked");
 111 
 112   intx field_offset_in_bytes = ((address)field_addr) - src_obj;
 113   DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
 114   assert(field_offset_in_bytes >= 0, "must be");
 115   assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
 116   assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
 117 
 118   BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
 119   _ptrmap.set_bit(BitMap::idx_t(idx));
 120 }
 121 
 122 class RelocateEmbeddedPointers : public BitMapClosure {
 123   ArchiveBuilder* _builder;
 124   address _buffered_obj;
 125   BitMap::idx_t _start_idx;
 126 public:
 127   RelocateEmbeddedPointers(ArchiveBuilder* builder, address buffered_obj, BitMap::idx_t start_idx) :
 128     _builder(builder), _buffered_obj(buffered_obj), _start_idx(start_idx) {}
 129 
 130   bool do_bit(BitMap::idx_t bit_offset) {
 131     size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
 132     address* ptr_loc = (address*)(_buffered_obj + field_offset);
 133 
 134     address old_p_with_tags = *ptr_loc;
 135     assert(old_p_with_tags != nullptr, "null ptrs shouldn't have been marked");
 136 
 137     address old_p = MetaspaceClosure::strip_tags(old_p_with_tags);
 138     uintx tags = MetaspaceClosure::decode_tags(old_p_with_tags);
 139     address new_p = _builder->get_buffered_addr(old_p);
 140 
 141     bool nulled;
 142     if (new_p == nullptr) {
 143       // old_p had a FollowMode of set_to_null
 144       nulled = true;
 145     } else {
 146       new_p = MetaspaceClosure::add_tags(new_p, tags);
 147       nulled = false;
 148     }
 149 
 150     log_trace(aot)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT " %zu",
 151                    p2i(ptr_loc), p2i(old_p) + tags, p2i(new_p), tags);
 152 
 153     ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
 154     ArchiveBuilder::current()->count_relocated_pointer(tags != 0, nulled);
 155     return true; // keep iterating the bitmap
 156   }
 157 };
 158 
 159 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
 160   SourceObjInfo* src_info = objs()->at(i);
 161   assert(src_info->should_copy(), "must be");
 162   BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
 163   BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end());     // exclusive
 164 
 165   RelocateEmbeddedPointers relocator(builder, src_info->buffered_addr(), start);
 166   _ptrmap.iterate(&relocator, start, end);
 167 }
 168 
 169 ArchiveBuilder::ArchiveBuilder() :
 170   _current_dump_region(nullptr),
 171   _buffer_bottom(nullptr),
 172   _requested_static_archive_bottom(nullptr),
 173   _requested_static_archive_top(nullptr),
 174   _requested_dynamic_archive_bottom(nullptr),
 175   _requested_dynamic_archive_top(nullptr),
 176   _mapped_static_archive_bottom(nullptr),
 177   _mapped_static_archive_top(nullptr),
 178   _buffer_to_requested_delta(0),
 179   _pz_region("pz"), // protection zone -- used only during dumping; does NOT exist in cds archive.
 180   _rw_region("rw"),
 181   _ro_region("ro"),
 182   _ac_region("ac"),
 183   _ptrmap(mtClassShared),
 184   _rw_ptrmap(mtClassShared),
 185   _ro_ptrmap(mtClassShared),
 186   _rw_src_objs(),
 187   _ro_src_objs(),
 188   _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
 189   _buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
 190   _total_heap_region_size(0)
 191 {
 192   _klasses = new (mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
 193   _symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
 194   _entropy_seed = 0x12345678;
 195   _relocated_ptr_info._num_ptrs = 0;
 196   _relocated_ptr_info._num_tagged_ptrs = 0;
 197   _relocated_ptr_info._num_nulled_ptrs = 0;
 198   assert(_current == nullptr, "must be");
 199   _current = this;
 200 }
 201 
 202 ArchiveBuilder::~ArchiveBuilder() {
 203   assert(_current == this, "must be");
 204   _current = nullptr;
 205 
 206   for (int i = 0; i < _symbols->length(); i++) {
 207     _symbols->at(i)->decrement_refcount();
 208   }
 209 
 210   delete _klasses;
 211   delete _symbols;
 212   if (_shared_rs.is_reserved()) {
 213     MemoryReserver::release(_shared_rs);
 214   }
 215 
 216   AOTArtifactFinder::dispose();
 217 }
 218 
 219 // Returns a deterministic sequence of pseudo random numbers. The main purpose is NOT
 220 // for randomness but to get good entropy for the identity_hash() of archived Symbols,
 221 // while keeping the contents of static CDS archives deterministic to ensure
 222 // reproducibility of JDK builds.
 223 int ArchiveBuilder::entropy() {
 224   assert(SafepointSynchronize::is_at_safepoint(), "needed to ensure deterministic sequence");
 225   _entropy_seed = os::next_random(_entropy_seed);
 226   return static_cast<int>(_entropy_seed);
 227 }
 228 
 229 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
 230   ArchiveBuilder* _builder;
 231 
 232 public:
 233   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 234 
 235   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 236     return _builder->gather_klass_and_symbol(ref, read_only);
 237   }
 238 };
 239 
 240 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 241   if (ref->obj() == nullptr) {
 242     return false;
 243   }
 244   if (get_follow_mode(ref) != make_a_copy) {
 245     return false;
 246   }
 247   if (ref->type() == MetaspaceClosureType::ClassType) {
 248     Klass* klass = (Klass*)ref->obj();
 249     assert(klass->is_klass(), "must be");
 250     if (!is_excluded(klass)) {
 251       _klasses->append(klass);
 252       if (klass->is_hidden()) {
 253         assert(klass->is_instance_klass(), "must be");
 254       }
 255     }
 256   } else if (ref->type() == MetaspaceClosureType::SymbolType) {
 257     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 258     Symbol* sym = (Symbol*)ref->obj();
 259     sym->increment_refcount();
 260     _symbols->append(sym);
 261   }
 262 
 263   return true; // recurse
 264 }
 265 
 266 void ArchiveBuilder::gather_klasses_and_symbols() {
 267   ResourceMark rm;
 268 
 269   AOTArtifactFinder::initialize();
 270   AOTArtifactFinder::find_artifacts();
 271 
 272   aot_log_info(aot)("Gathering classes and symbols ... ");
 273   GatherKlassesAndSymbols doit(this);
 274   iterate_roots(&doit);
 275   doit.finish();
 276 
 277   if (CDSConfig::is_dumping_static_archive()) {
 278     // To ensure deterministic contents in the static archive, we need to ensure that
 279     // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
 280     // the MetaspaceObjs are located originally, as they are copied sequentially into
 281     // the archive during the iteration.
 282     //
 283     // The only issue here is that the symbol table and the system directories may be
 284     // randomly ordered, so we copy the symbols and klasses into two arrays and sort
 285     // them deterministically.
 286     //
 287     // During -Xshare:dump, the order of Symbol creation is strictly determined by
 288     // the SharedClassListFile (class loading is done in a single thread and the JIT
 289     // is disabled). Also, Symbols are allocated in monotonically increasing addresses
 290     // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
 291     // ascending address order, we ensure that all Symbols are copied into deterministic
 292     // locations in the archive.
 293     //
 294     // TODO: in the future, if we want to produce deterministic contents in the
 295     // dynamic archive, we might need to sort the symbols alphabetically (also see
 296     // DynamicArchiveBuilder::sort_methods()).
 297     aot_log_info(aot)("Sorting symbols ... ");
 298     _symbols->sort(compare_symbols_by_address);
 299     sort_klasses();
 300   }
 301 
 302   AOTClassLinker::add_candidates();
 303 }
 304 
 305 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
 306   if (a[0] < b[0]) {
 307     return -1;
 308   } else {
 309     assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
 310     return 1;
 311   }
 312 }
 313 
 314 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
 315   return a[0]->name()->fast_compare(b[0]->name());
 316 }
 317 
 318 void ArchiveBuilder::sort_klasses() {
 319   aot_log_info(aot)("Sorting classes ... ");
 320   _klasses->sort(compare_klass_by_name);
 321 }
 322 
 323 address ArchiveBuilder::reserve_buffer() {
 324   // On 64-bit: reserve address space for archives up to the max encoded offset limit.
 325   // On 32-bit: use 256MB + AOT code size due to limited virtual address space.
 326   size_t buffer_size = LP64_ONLY(AOTCompressedPointers::MaxMetadataOffsetBytes)
 327                        NOT_LP64(256 * M + AOTCodeCache::max_aot_code_size());
 328   ReservedSpace rs = MemoryReserver::reserve(buffer_size,
 329                                              AOTMetaspace::core_region_alignment(),
 330                                              os::vm_page_size(),
 331                                              mtNone);
 332   if (!rs.is_reserved()) {
 333     aot_log_error(aot)("Failed to reserve %zu bytes of output buffer.", buffer_size);
 334     AOTMetaspace::unrecoverable_writing_error();
 335   }
 336 
 337   // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
 338   // we are copying the class metadata into the buffer.
 339   address buffer_bottom = (address)rs.base();
 340   aot_log_info(aot)("Reserved output buffer space at " PTR_FORMAT " [%zu bytes]",
 341                 p2i(buffer_bottom), buffer_size);
 342   _shared_rs = rs;
 343 
 344   _buffer_bottom = buffer_bottom;
 345 
 346   if (CDSConfig::is_dumping_static_archive()) {
 347     _current_dump_region = &_pz_region;
 348   } else {
 349     _current_dump_region = &_rw_region;
 350   }
 351   _current_dump_region->init(&_shared_rs, &_shared_vs);
 352 
 353   ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
 354 
 355   // The bottom of the static archive should be mapped at this address by default.
 356   _requested_static_archive_bottom = (address)AOTMetaspace::requested_base_address();
 357 
 358   // The bottom of the archive (that I am writing now) should be mapped at this address by default.
 359   address my_archive_requested_bottom;
 360 
 361   if (CDSConfig::is_dumping_static_archive()) {
 362     my_archive_requested_bottom = _requested_static_archive_bottom;
 363   } else {
 364     _mapped_static_archive_bottom = (address)MetaspaceObj::aot_metaspace_base();
 365     _mapped_static_archive_top  = (address)MetaspaceObj::aot_metaspace_top();
 366     assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
 367     size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
 368 
 369     // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
 370     _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
 371     my_archive_requested_bottom = align_up(_requested_static_archive_top, AOTMetaspace::core_region_alignment());
 372 
 373     _requested_dynamic_archive_bottom = my_archive_requested_bottom;
 374   }
 375 
 376   _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
 377 
 378   address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
 379   if (my_archive_requested_bottom <  _requested_static_archive_bottom ||
 380       my_archive_requested_top    <= _requested_static_archive_bottom) {
 381     // Size overflow.
 382     aot_log_error(aot)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
 383     aot_log_error(aot)("my_archive_requested_top    = " INTPTR_FORMAT, p2i(my_archive_requested_top));
 384     aot_log_error(aot)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
 385                    "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
 386     AOTMetaspace::unrecoverable_writing_error();
 387   }
 388 
 389   if (CDSConfig::is_dumping_static_archive()) {
 390     // We don't want any valid object to be at the very bottom of the archive.
 391     // See ArchivePtrMarker::mark_pointer().
 392     _pz_region.allocate(AOTMetaspace::protection_zone_size());
 393     start_dump_region(&_rw_region);
 394   }
 395 
 396   return buffer_bottom;
 397 }
 398 
 399 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it) {
 400   int num_symbols = _symbols->length();
 401   for (int i = 0; i < num_symbols; i++) {
 402     it->push(_symbols->adr_at(i));
 403   }
 404 
 405   int num_klasses = _klasses->length();
 406   for (int i = 0; i < num_klasses; i++) {
 407     it->push(_klasses->adr_at(i));
 408   }
 409 
 410   iterate_roots(it);
 411 }
 412 
 413 class GatherSortedSourceObjs : public MetaspaceClosure {
 414   ArchiveBuilder* _builder;
 415 
 416 public:
 417   GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
 418 
 419   virtual bool do_ref(Ref* ref, bool read_only) {
 420     return _builder->gather_one_source_obj(ref, read_only);
 421   }
 422 };
 423 
 424 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read_only) {
 425   address src_obj = ref->obj();
 426   if (src_obj == nullptr) {
 427     return false;
 428   }
 429 
 430   remember_embedded_pointer_in_enclosing_obj(ref);
 431   if (RegeneratedClasses::has_been_regenerated(src_obj)) {
 432     // No need to copy it. We will later relocate it to point to the regenerated klass/method.
 433     return false;
 434   }
 435 
 436   FollowMode follow_mode = get_follow_mode(ref);
 437   SourceObjInfo src_info(ref, read_only, follow_mode);
 438   bool created;
 439   SourceObjInfo* p = _src_obj_table.put_if_absent(src_obj, src_info, &created);
 440   if (created) {
 441     if (_src_obj_table.maybe_grow()) {
 442       log_info(aot, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
 443     }
 444   }
 445 
 446 #ifdef ASSERT
 447   if (ref->type() == MetaspaceClosureType::MethodType) {
 448     Method* m = (Method*)ref->obj();
 449     assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
 450            "Should not archive methods in a class that has been regenerated");
 451   }
 452 #endif
 453 
 454   if (ref->type() == MetaspaceClosureType::MethodDataType) {
 455     MethodData* md = (MethodData*)ref->obj();
 456     md->clean_method_data(false /* always_clean */);
 457   }
 458 
 459   assert(p->read_only() == src_info.read_only(), "must be");
 460 
 461   if (created && src_info.should_copy()) {
 462     if (read_only) {
 463       _ro_src_objs.append(p);
 464     } else {
 465       _rw_src_objs.append(p);
 466     }
 467     return true; // Need to recurse into this ref only if we are copying it
 468   } else {
 469     return false;
 470   }
 471 }
 472 
 473 void ArchiveBuilder::record_regenerated_object(address orig_src_obj, address regen_src_obj) {
 474   // Record the fact that orig_src_obj has been replaced by regen_src_obj. All calls to get_buffered_addr(orig_src_obj)
 475   // should return the same value as get_buffered_addr(regen_src_obj).
 476   SourceObjInfo* p = _src_obj_table.get(regen_src_obj);
 477   assert(p != nullptr, "regenerated object should always be dumped");
 478   SourceObjInfo orig_src_info(orig_src_obj, p);
 479   bool created;
 480   _src_obj_table.put_if_absent(orig_src_obj, orig_src_info, &created);
 481   assert(created, "We shouldn't have archived the original copy of a regenerated object");
 482 }
 483 
 484 // Remember that we have a pointer inside ref->enclosing_obj() that points to ref->obj()
 485 void ArchiveBuilder::remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref) {
 486   assert(ref->obj() != nullptr, "should have checked");
 487 
 488   address enclosing_obj = ref->enclosing_obj();
 489   if (enclosing_obj == nullptr) {
 490     return;
 491   }
 492 
 493   // We are dealing with 3 addresses:
 494   // address o    = ref->obj(): We have found an object whose address is o.
 495   // address* mpp = ref->mpp(): The object o is pointed to by a pointer whose address is mpp.
 496   //                            I.e., (*mpp == o)
 497   // enclosing_obj            : If non-null, it is the object which has a field that points to o.
 498   //                            mpp is the address if that field.
 499   //
 500   // Example: We have an array whose first element points to a Method:
 501   //     Method* o                     = 0x0000abcd;
 502   //     Array<Method*>* enclosing_obj = 0x00001000;
 503   //     enclosing_obj->at_put(0, o);
 504   //
 505   // We the MetaspaceClosure iterates on the very first element of this array, we have
 506   //     ref->obj()           == 0x0000abcd   (the Method)
 507   //     ref->mpp()           == 0x00001008   (the location of the first element in the array)
 508   //     ref->enclosing_obj() == 0x00001000   (the Array that contains the Method)
 509   //
 510   // We use the above information to mark the bitmap to indicate that there's a pointer on address 0x00001008.
 511   SourceObjInfo* src_info = _src_obj_table.get(enclosing_obj);
 512   if (src_info == nullptr || !src_info->should_copy()) {
 513     // source objects of point_to_it/set_to_null types are not copied
 514     // so we don't need to remember their pointers.
 515   } else {
 516     if (src_info->read_only()) {
 517       _ro_src_objs.remember_embedded_pointer(src_info, ref);
 518     } else {
 519       _rw_src_objs.remember_embedded_pointer(src_info, ref);
 520     }
 521   }
 522 }
 523 
 524 void ArchiveBuilder::gather_source_objs() {
 525   ResourceMark rm;
 526   aot_log_info(aot)("Gathering all archivable objects ... ");
 527   gather_klasses_and_symbols();
 528   GatherSortedSourceObjs doit(this);
 529   iterate_sorted_roots(&doit);
 530   doit.finish();
 531 }
 532 
 533 bool ArchiveBuilder::is_excluded(Klass* klass) {
 534   if (klass->is_instance_klass()) {
 535     InstanceKlass* ik = InstanceKlass::cast(klass);
 536     return SystemDictionaryShared::is_excluded_class(ik);
 537   } else if (klass->is_objArray_klass()) {
 538     Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
 539     if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache_static_region(bottom)) {
 540       // The bottom class is in the static archive so it's clearly not excluded.
 541       return false;
 542     } else if (bottom->is_instance_klass()) {
 543       return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
 544     }
 545   }
 546 
 547   return false;
 548 }
 549 
 550 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
 551   address obj = ref->obj();
 552   if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
 553     // Don't dump existing shared metadata again.
 554     return point_to_it;
 555   } else if (ref->type() == MetaspaceClosureType::MethodDataType ||
 556              ref->type() == MetaspaceClosureType::MethodCountersType ||
 557              ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
 558              ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
 559              ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
 560     return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
 561   } else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
 562     return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
 563   } else {
 564     if (ref->type() == MetaspaceClosureType::ClassType) {
 565       Klass* klass = (Klass*)ref->obj();
 566       assert(klass->is_klass(), "must be");
 567       if (RegeneratedClasses::has_been_regenerated(klass)) {
 568         klass = RegeneratedClasses::get_regenerated_object(klass);
 569       }
 570       if (is_excluded(klass)) {
 571         ResourceMark rm;
 572         aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
 573         return set_to_null;
 574       }
 575       if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
 576         ResourceMark rm;
 577         aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
 578         return set_to_null;
 579       }
 580     }
 581 
 582     return make_a_copy;
 583   }
 584 }
 585 
 586 void ArchiveBuilder::start_dump_region(DumpRegion* next) {
 587   current_dump_region()->pack(next);
 588   _current_dump_region = next;
 589 }
 590 
 591 char* ArchiveBuilder::ro_strdup(const char* s) {
 592   char* archived_str = ro_region_alloc((int)strlen(s) + 1);
 593   strcpy(archived_str, s);
 594   return archived_str;
 595 }
 596 
 597 // The objects that have embedded pointers will sink
 598 // towards the end of the list. This ensures we have a maximum
 599 // number of leading zero bits in the relocation bitmap.
 600 int ArchiveBuilder::compare_src_objs(SourceObjInfo** a, SourceObjInfo** b) {
 601   if ((*a)->has_embedded_pointer() && !(*b)->has_embedded_pointer()) {
 602     return 1;
 603   } else if (!(*a)->has_embedded_pointer() && (*b)->has_embedded_pointer()) {
 604     return -1;
 605   } else {
 606     // This is necessary to keep the sorting order stable. Otherwise the
 607     // archive's contents may not be deterministic.
 608     return (*a)->id() - (*b)->id();
 609   }
 610 }
 611 
 612 void ArchiveBuilder::sort_metadata_objs() {
 613   _rw_src_objs.objs()->sort(compare_src_objs);
 614   _ro_src_objs.objs()->sort(compare_src_objs);
 615 }
 616 
 617 void ArchiveBuilder::dump_rw_metadata() {
 618   ResourceMark rm;
 619   aot_log_info(aot)("Allocating RW objects ... ");
 620   make_shallow_copies(&_rw_region, &_rw_src_objs);
 621 }
 622 
 623 void ArchiveBuilder::dump_ro_metadata() {
 624   ResourceMark rm;
 625   aot_log_info(aot)("Allocating RO objects ... ");
 626 
 627   start_dump_region(&_ro_region);
 628   make_shallow_copies(&_ro_region, &_ro_src_objs);
 629   RegeneratedClasses::record_regenerated_objects();
 630 }
 631 
 632 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 633                                          const ArchiveBuilder::SourceObjList* src_objs) {
 634   for (int i = 0; i < src_objs->objs()->length(); i++) {
 635     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 636   }
 637   aot_log_info(aot)("done (%d objects)", src_objs->objs()->length());
 638 }
 639 
 640 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 641   address src = src_info->source_addr();
 642   int bytes = src_info->size_in_bytes(); // word-aligned
 643   size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
 644 
 645   char* oldtop = dump_region->top();
 646   if (src_info->type() == MetaspaceClosureType::ClassType) {
 647     // Allocate space for a pointer directly in front of the future InstanceKlass, so
 648     // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
 649     // without building another hashtable. See RunTimeClassInfo::get_for()
 650     // in systemDictionaryShared.cpp.
 651     Klass* klass = (Klass*)src;
 652     if (klass->is_instance_klass()) {
 653       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 654       dump_region->allocate(sizeof(address));
 655     }
 656 #ifdef _LP64
 657     // More strict alignments needed for UseCompressedClassPointers
 658     if (UseCompressedClassPointers) {
 659       alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
 660     }
 661 #endif
 662   } else if (src_info->type() == MetaspaceClosureType::SymbolType) {
 663     // Symbols may be allocated by using AllocateHeap, so their sizes
 664     // may be less than size_in_bytes() indicates.
 665     bytes = ((Symbol*)src)->byte_size();
 666   }
 667 
 668   char* dest = dump_region->allocate(bytes, alignment);
 669   memcpy(dest, src, bytes);
 670 
 671   // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
 672   if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
 673     Symbol* buffered_symbol = (Symbol*)dest;
 674     assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
 675     buffered_symbol->update_identity_hash();
 676   }
 677 
 678   {
 679     bool created;
 680     _buffered_to_src_table.put_if_absent((address)dest, src, &created);
 681     assert(created, "must be");
 682     if (_buffered_to_src_table.maybe_grow()) {
 683       log_info(aot, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
 684     }
 685   }
 686 
 687   intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
 688   if (archived_vtable != nullptr) {
 689     *(address*)dest = (address)archived_vtable;
 690     ArchivePtrMarker::mark_pointer((address*)dest);
 691   }
 692 
 693   log_trace(aot)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 694   src_info->set_buffered_addr((address)dest);
 695 
 696   char* newtop = dump_region->top();
 697   _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
 698 
 699   DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
 700 }
 701 
 702 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
 703 // not handled by MetaspaceClosure.
 704 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
 705   assert(is_in_buffer_space(ptr_location), "must be");
 706   if (src_addr == nullptr) {
 707     *ptr_location = nullptr;
 708     ArchivePtrMarker::clear_pointer(ptr_location);
 709   } else {
 710     *ptr_location = get_buffered_addr(src_addr);
 711     ArchivePtrMarker::mark_pointer(ptr_location);
 712   }
 713 }
 714 
 715 void ArchiveBuilder::mark_and_relocate_to_buffered_addr(address* ptr_location) {
 716   assert(*ptr_location != nullptr, "sanity");
 717   if (!is_in_mapped_static_archive(*ptr_location)) {
 718     *ptr_location = get_buffered_addr(*ptr_location);
 719   }
 720   ArchivePtrMarker::mark_pointer(ptr_location);
 721 }
 722 
 723 bool ArchiveBuilder::has_been_archived(address src_addr) const {
 724   SourceObjInfo* p = _src_obj_table.get(src_addr);
 725   if (p == nullptr) {
 726     // This object has never been seen by ArchiveBuilder
 727     return false;
 728   }
 729   if (p->buffered_addr() == nullptr) {
 730     // ArchiveBuilder has seen this object, but decided not to archive it. So
 731     // Any reference to this object will be modified to nullptr inside the buffer.
 732     assert(p->follow_mode() == set_to_null, "must be");
 733     return false;
 734   }
 735 
 736   DEBUG_ONLY({
 737     // This is a class/method that belongs to one of the "original" classes that
 738     // have been regenerated by lambdaFormInvokers.cpp. We must have archived
 739     // the "regenerated" version of it.
 740     if (RegeneratedClasses::has_been_regenerated(src_addr)) {
 741       address regen_obj = RegeneratedClasses::get_regenerated_object(src_addr);
 742       precond(regen_obj != nullptr && regen_obj != src_addr);
 743       assert(has_been_archived(regen_obj), "must be");
 744       assert(get_buffered_addr(src_addr) == get_buffered_addr(regen_obj), "must be");
 745     }});
 746 
 747   return true;
 748 }
 749 
 750 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
 751   SourceObjInfo* p = _src_obj_table.get(src_addr);
 752   assert(p != nullptr, "src_addr " INTPTR_FORMAT " is used but has not been archived",
 753          p2i(src_addr));
 754 
 755   return p->buffered_addr();
 756 }
 757 
 758 address ArchiveBuilder::get_source_addr(address buffered_addr) const {
 759   assert(is_in_buffer_space(buffered_addr), "must be");
 760   address* src_p = _buffered_to_src_table.get(buffered_addr);
 761   assert(src_p != nullptr && *src_p != nullptr, "must be");
 762   return *src_p;
 763 }
 764 
 765 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
 766   for (int i = 0; i < src_objs->objs()->length(); i++) {
 767     src_objs->relocate(i, this);
 768   }
 769 }
 770 
 771 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
 772   aot_log_info(aot)("Relocating embedded pointers in core regions ... ");
 773   relocate_embedded_pointers(&_rw_src_objs);
 774   relocate_embedded_pointers(&_ro_src_objs);
 775   log_info(cds)("Relocating %zu pointers, %zu tagged, %zu nulled",
 776                 _relocated_ptr_info._num_ptrs,
 777                 _relocated_ptr_info._num_tagged_ptrs,
 778                 _relocated_ptr_info._num_nulled_ptrs);
 779 }
 780 
 781 #define ADD_COUNT(x) \
 782   x += 1; \
 783   x ## _a += aotlinked ? 1 : 0; \
 784   x ## _i += inited ? 1 : 0;
 785 
 786 #define DECLARE_INSTANCE_KLASS_COUNTER(x) \
 787   int x = 0; \
 788   int x ## _a = 0; \
 789   int x ## _i = 0;
 790 
 791 void ArchiveBuilder::make_klasses_shareable() {
 792   DECLARE_INSTANCE_KLASS_COUNTER(num_instance_klasses);
 793   DECLARE_INSTANCE_KLASS_COUNTER(num_boot_klasses);
 794   DECLARE_INSTANCE_KLASS_COUNTER(num_vm_klasses);
 795   DECLARE_INSTANCE_KLASS_COUNTER(num_platform_klasses);
 796   DECLARE_INSTANCE_KLASS_COUNTER(num_app_klasses);
 797   DECLARE_INSTANCE_KLASS_COUNTER(num_old_klasses);
 798   DECLARE_INSTANCE_KLASS_COUNTER(num_hidden_klasses);
 799   DECLARE_INSTANCE_KLASS_COUNTER(num_enum_klasses);
 800   DECLARE_INSTANCE_KLASS_COUNTER(num_unregistered_klasses);
 801   int num_unlinked_klasses = 0;
 802   int num_obj_array_klasses = 0;
 803   int num_type_array_klasses = 0;
 804 
 805   int boot_unlinked = 0;
 806   int platform_unlinked = 0;
 807   int app_unlinked = 0;
 808   int unreg_unlinked = 0;
 809 
 810   for (int i = 0; i < klasses()->length(); i++) {
 811     // Some of the code in ConstantPool::remove_unshareable_info() requires the classes
 812     // to be in linked state, so it must be call here before the next loop, which returns
 813     // all classes to unlinked state.
 814     Klass* k = get_buffered_addr(klasses()->at(i));
 815     if (k->is_instance_klass()) {
 816       InstanceKlass::cast(k)->constants()->remove_unshareable_info();
 817     }
 818   }
 819 
 820   for (int i = 0; i < klasses()->length(); i++) {
 821     const char* type;
 822     const char* unlinked = "";
 823     const char* kind = "";
 824     const char* hidden = "";
 825     const char* old = "";
 826     const char* generated = "";
 827     const char* aotlinked_msg = "";
 828     const char* inited_msg = "";
 829     Klass* k = get_buffered_addr(klasses()->at(i));
 830     bool inited = false;
 831     k->remove_java_mirror();
 832 #ifdef _LP64
 833     if (UseCompactObjectHeaders) {
 834       Klass* requested_k = to_requested(k);
 835       address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 836       const int narrow_klass_shift = precomputed_narrow_klass_shift();
 837       narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
 838       k->set_prototype_header(markWord::prototype().set_narrow_klass(nk));
 839     }
 840 #endif //_LP64
 841     if (k->is_objArray_klass()) {
 842       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 843       // on their array classes.
 844       num_obj_array_klasses ++;
 845       type = "array";
 846     } else if (k->is_typeArray_klass()) {
 847       num_type_array_klasses ++;
 848       type = "array";
 849       k->remove_unshareable_info();
 850     } else {
 851       assert(k->is_instance_klass(), " must be");
 852       InstanceKlass* ik = InstanceKlass::cast(k);
 853       InstanceKlass* src_ik = get_source_addr(ik);
 854       bool aotlinked = AOTClassLinker::is_candidate(src_ik);
 855       inited = ik->has_aot_initialized_mirror();
 856       ADD_COUNT(num_instance_klasses);
 857       if (ik->is_hidden()) {
 858         ADD_COUNT(num_hidden_klasses);
 859         hidden = " hidden";
 860         oop loader = k->class_loader();
 861         if (loader == nullptr) {
 862           type = "boot";
 863           ADD_COUNT(num_boot_klasses);
 864         } else if (loader == SystemDictionary::java_platform_loader()) {
 865           type = "plat";
 866           ADD_COUNT(num_platform_klasses);
 867         } else if (loader == SystemDictionary::java_system_loader()) {
 868           type = "app";
 869           ADD_COUNT(num_app_klasses);
 870         } else {
 871           type = "bad";
 872           assert(0, "shouldn't happen");
 873         }
 874         if (CDSConfig::is_dumping_method_handles()) {
 875           assert(HeapShared::is_archivable_hidden_klass(ik), "sanity");
 876         } else {
 877           // Legacy CDS support for lambda proxies
 878           CDS_JAVA_HEAP_ONLY(assert(HeapShared::is_lambda_proxy_klass(ik), "sanity");)
 879         }
 880       } else if (ik->defined_by_boot_loader()) {
 881         type = "boot";
 882         ADD_COUNT(num_boot_klasses);
 883       } else if (ik->defined_by_platform_loader()) {
 884         type = "plat";
 885         ADD_COUNT(num_platform_klasses);
 886       } else if (ik->defined_by_app_loader()) {
 887         type = "app";
 888         ADD_COUNT(num_app_klasses);
 889       } else {
 890         assert(ik->defined_by_other_loaders(), "must be");
 891         type = "unreg";
 892         ADD_COUNT(num_unregistered_klasses);
 893       }
 894 
 895       if (AOTClassLinker::is_vm_class(src_ik)) {
 896         ADD_COUNT(num_vm_klasses);
 897       }
 898 
 899       if (!ik->is_linked()) {
 900         num_unlinked_klasses ++;
 901         unlinked = " unlinked";
 902         if (ik->defined_by_boot_loader()) {
 903           boot_unlinked ++;
 904         } else if (ik->defined_by_platform_loader()) {
 905           platform_unlinked ++;
 906         } else if (ik->defined_by_app_loader()) {
 907           app_unlinked ++;
 908         } else {
 909           unreg_unlinked ++;
 910         }
 911       }
 912 
 913       if (ik->is_interface()) {
 914         kind = " interface";
 915       } else if (src_ik->is_enum_subclass()) {
 916         kind = " enum";
 917         ADD_COUNT(num_enum_klasses);
 918       }
 919 
 920       if (CDSConfig::is_old_class_for_verifier(ik)) {
 921         ADD_COUNT(num_old_klasses);
 922         old = " old";
 923       }
 924 
 925       if (ik->is_aot_generated_class()) {
 926         generated = " generated";
 927       }
 928       if (aotlinked) {
 929         aotlinked_msg = " aot-linked";
 930       }
 931       if (inited) {
 932         if (InstanceKlass::cast(k)->static_field_size() == 0) {
 933           inited_msg = " inited (no static fields)";
 934         } else {
 935           inited_msg = " inited";
 936         }
 937       }
 938 
 939       AOTMetaspace::rewrite_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
 940       ik->remove_unshareable_info();
 941     }
 942 
 943     if (aot_log_is_enabled(Debug, aot, class)) {
 944       ResourceMark rm;
 945       aot_log_debug(aot, class)("klasses[%5d] = " PTR_FORMAT " %-5s %s%s%s%s%s%s%s%s", i,
 946                             p2i(to_requested(k)), type, k->external_name(),
 947                             kind, hidden, old, unlinked, generated, aotlinked_msg, inited_msg);
 948     }
 949   }
 950 
 951 #define STATS_FORMAT    "= %5d, aot-linked = %5d, inited = %5d"
 952 #define STATS_PARAMS(x) num_ ## x, num_ ## x ## _a, num_ ## x ## _i
 953 
 954   aot_log_info(aot)("Number of classes %d", num_instance_klasses + num_obj_array_klasses + num_type_array_klasses);
 955   aot_log_info(aot)("    instance classes   " STATS_FORMAT, STATS_PARAMS(instance_klasses));
 956   aot_log_info(aot)("      boot             " STATS_FORMAT, STATS_PARAMS(boot_klasses));
 957   aot_log_info(aot)("        vm             " STATS_FORMAT, STATS_PARAMS(vm_klasses));
 958   aot_log_info(aot)("      platform         " STATS_FORMAT, STATS_PARAMS(platform_klasses));
 959   aot_log_info(aot)("      app              " STATS_FORMAT, STATS_PARAMS(app_klasses));
 960   aot_log_info(aot)("      unregistered     " STATS_FORMAT, STATS_PARAMS(unregistered_klasses));
 961   aot_log_info(aot)("      (enum)           " STATS_FORMAT, STATS_PARAMS(enum_klasses));
 962   aot_log_info(aot)("      (hidden)         " STATS_FORMAT, STATS_PARAMS(hidden_klasses));
 963   aot_log_info(aot)("      (old)            " STATS_FORMAT, STATS_PARAMS(old_klasses));
 964   aot_log_info(aot)("      (unlinked)       = %5d, boot = %d, plat = %d, app = %d, unreg = %d",
 965                 num_unlinked_klasses, boot_unlinked, platform_unlinked, app_unlinked, unreg_unlinked);
 966   aot_log_info(aot)("    obj array classes  = %5d", num_obj_array_klasses);
 967   aot_log_info(aot)("    type array classes = %5d", num_type_array_klasses);
 968   aot_log_info(aot)("               symbols = %5d", _symbols->length());
 969 
 970 #undef STATS_FORMAT
 971 #undef STATS_PARAMS
 972 }
 973 
 974 void ArchiveBuilder::make_training_data_shareable() {
 975   auto clean_td = [&] (address& src_obj,  SourceObjInfo& info) {
 976     if (!is_in_buffer_space(info.buffered_addr())) {
 977       return;
 978     }
 979 
 980     if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
 981         info.type() == MetaspaceClosureType::MethodTrainingDataType ||
 982         info.type() == MetaspaceClosureType::CompileTrainingDataType) {
 983       TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
 984       buffered_td->remove_unshareable_info();
 985     } else if (info.type() == MetaspaceClosureType::MethodDataType) {
 986       MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
 987       buffered_mdo->remove_unshareable_info();
 988     } else if (info.type() == MetaspaceClosureType::MethodCountersType) {
 989       MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
 990       buffered_mc->remove_unshareable_info();
 991     }
 992   };
 993   _src_obj_table.iterate_all(clean_td);
 994 }
 995 
 996 size_t ArchiveBuilder::buffer_to_offset(address p) const {
 997   address requested_p = to_requested(p);
 998   return pointer_delta(requested_p, _requested_static_archive_bottom, 1);
 999 }
1000 
1001 size_t ArchiveBuilder::any_to_offset(address p) const {
1002   if (is_in_mapped_static_archive(p)) {
1003     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
1004     return pointer_delta(p, _mapped_static_archive_bottom, 1);
1005   }
1006   if (!is_in_buffer_space(p)) {
1007     // p must be a "source" address
1008     p = get_buffered_addr(p);
1009   }
1010   return buffer_to_offset(p);
1011 }
1012 
1013 address ArchiveBuilder::offset_to_buffered_address(size_t offset) const {
1014   address requested_addr = _requested_static_archive_bottom + offset;
1015   address buffered_addr = requested_addr - _buffer_to_requested_delta;
1016   assert(is_in_buffer_space(buffered_addr), "bad offset");
1017   return buffered_addr;
1018 }
1019 
1020 void ArchiveBuilder::start_ac_region() {
1021   ro_region()->pack();
1022   start_dump_region(&_ac_region);
1023 }
1024 
1025 void ArchiveBuilder::end_ac_region() {
1026   _ac_region.pack();
1027 }
1028 
1029 #if INCLUDE_CDS_JAVA_HEAP
1030 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
1031   assert(CDSConfig::is_dumping_heap(), "sanity");
1032   k = get_buffered_klass(k);
1033   Klass* requested_k = to_requested(k);
1034   const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
1035 #ifdef ASSERT
1036   const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift));
1037   assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k));
1038 #endif
1039   address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
1040   // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any
1041   // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting.
1042   return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
1043 }
1044 #endif // INCLUDE_CDS_JAVA_HEAP
1045 
1046 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
1047 // so that the archive can be mapped to the "requested" location without runtime relocation.
1048 //
1049 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
1050 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
1051 // - Every pointer must have one of the following values:
1052 //   [a] nullptr:
1053 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
1054 //       consider it at runtime.
1055 //   [b] Points into an object X which is inside the buffer:
1056 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
1057 //       when the archive is mapped at the requested location.
1058 //   [c] Points into an object Y which is inside mapped static archive:
1059 //       - This happens only during dynamic dump
1060 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
1061 //         so it points to Y when the static archive is mapped at the requested location.
1062 template <bool STATIC_DUMP>
1063 class RelocateBufferToRequested : public BitMapClosure {
1064   ArchiveBuilder* _builder;
1065   address _buffer_bottom;
1066   intx _buffer_to_requested_delta;
1067   intx _mapped_to_requested_static_archive_delta;
1068   size_t _max_non_null_offset;
1069 
1070  public:
1071   RelocateBufferToRequested(ArchiveBuilder* builder) {
1072     _builder = builder;
1073     _buffer_bottom = _builder->buffer_bottom();
1074     _buffer_to_requested_delta = builder->buffer_to_requested_delta();
1075     _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
1076     _max_non_null_offset = 0;
1077 
1078     address bottom = _builder->buffer_bottom();
1079     address top = _builder->buffer_top();
1080     address new_bottom = bottom + _buffer_to_requested_delta;
1081     address new_top = top + _buffer_to_requested_delta;
1082     aot_log_debug(aot)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
1083                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1084                    p2i(bottom), p2i(top),
1085                    p2i(new_bottom), p2i(new_top));
1086   }
1087 
1088   bool do_bit(size_t offset) {
1089     address* p = (address*)_buffer_bottom + offset;
1090     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
1091 
1092     if (*p == nullptr) {
1093       // todo -- clear bit, etc
1094       ArchivePtrMarker::ptrmap()->clear_bit(offset);
1095     } else {
1096       if (STATIC_DUMP) {
1097         assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
1098         *p += _buffer_to_requested_delta;
1099         assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
1100       } else {
1101         if (_builder->is_in_buffer_space(*p)) {
1102           *p += _buffer_to_requested_delta;
1103           // assert is in requested dynamic archive
1104         } else {
1105           assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
1106           *p += _mapped_to_requested_static_archive_delta;
1107           assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
1108         }
1109       }
1110       _max_non_null_offset = offset;
1111     }
1112 
1113     return true; // keep iterating
1114   }
1115 
1116   void doit() {
1117     ArchivePtrMarker::ptrmap()->iterate(this);
1118     ArchivePtrMarker::compact(_max_non_null_offset);
1119   }
1120 };
1121 
1122 #ifdef _LP64
1123 int ArchiveBuilder::precomputed_narrow_klass_shift() {
1124   // Legacy Mode:
1125   //    We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
1126   // CompactObjectHeader Mode:
1127   //    narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
1128   //    Klass encoding range.
1129   //
1130   // Note that all of this may change in the future, if we decide to correct the pre-calculated
1131   // narrow Klass IDs at archive load time.
1132   assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
1133   return UseCompactObjectHeaders ?  CompressedKlassPointers::max_shift() : 0;
1134 }
1135 #endif // _LP64
1136 
1137 void ArchiveBuilder::relocate_to_requested() {
1138   if (!ro_region()->is_packed()) {
1139     ro_region()->pack();
1140   }
1141   size_t my_archive_size = buffer_top() - buffer_bottom();
1142 
1143   if (CDSConfig::is_dumping_static_archive()) {
1144     _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
1145     RelocateBufferToRequested<true> patcher(this);
1146     patcher.doit();
1147   } else {
1148     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
1149     _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
1150     RelocateBufferToRequested<false> patcher(this);
1151     patcher.doit();
1152   }
1153 }
1154 
1155 void ArchiveBuilder::print_stats() {
1156   _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1157 }
1158 
1159 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) {
1160   // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1161   // AOTMetaspace::n_regions (internal to hotspot).
1162   assert(NUM_CDS_REGIONS == AOTMetaspace::n_regions, "sanity");
1163 
1164   ResourceMark rm;
1165 
1166   write_region(mapinfo, AOTMetaspace::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1167   write_region(mapinfo, AOTMetaspace::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1168   write_region(mapinfo, AOTMetaspace::ac, &_ac_region, /*read_only=*/false,/*allow_exec=*/false);
1169 
1170   // Split pointer map into read-write and read-only bitmaps
1171   ArchivePtrMarker::initialize_rw_ro_maps(&_rw_ptrmap, &_ro_ptrmap);
1172 
1173   size_t bitmap_size_in_bytes;
1174   char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::rw_ptrmap(),
1175                                               ArchivePtrMarker::ro_ptrmap(),
1176                                               mapped_heap_info,
1177                                               streamed_heap_info,
1178                                               bitmap_size_in_bytes);
1179 
1180   if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
1181     _total_heap_region_size = mapinfo->write_mapped_heap_region(mapped_heap_info);
1182   } else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
1183     _total_heap_region_size = mapinfo->write_streamed_heap_region(streamed_heap_info);
1184   }
1185 
1186   print_region_stats(mapinfo, mapped_heap_info, streamed_heap_info);
1187 
1188   mapinfo->set_requested_base((char*)AOTMetaspace::requested_base_address());
1189   mapinfo->set_header_crc(mapinfo->compute_header_crc());
1190   // After this point, we should not write any data into mapinfo->header() since this
1191   // would corrupt its checksum we have calculated before.
1192   mapinfo->write_header();
1193   mapinfo->close();
1194 
1195   if (log_is_enabled(Info, aot)) {
1196     log_info(aot)("Full module graph = %s", CDSConfig::is_dumping_full_module_graph() ? "enabled" : "disabled");
1197     print_stats();
1198   }
1199 
1200   if (log_is_enabled(Info, aot, map)) {
1201     AOTMapLogger::dumptime_log(this, mapinfo, mapped_heap_info, streamed_heap_info, bitmap, bitmap_size_in_bytes);
1202   }
1203   CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
1204   FREE_C_HEAP_ARRAY(char, bitmap);
1205 }
1206 
1207 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1208   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1209 }
1210 
1211 void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) {
1212   _relocated_ptr_info._num_ptrs ++;
1213   _relocated_ptr_info._num_tagged_ptrs += tagged ? 1 : 0;
1214   _relocated_ptr_info._num_nulled_ptrs += nulled ? 1 : 0;
1215 }
1216 
1217 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
1218                                         AOTMappedHeapInfo* mapped_heap_info,
1219                                         AOTStreamedHeapInfo* streamed_heap_info) {
1220   // Print statistics of all the regions
1221   const size_t bitmap_used = mapinfo->region_at(AOTMetaspace::bm)->used();
1222   const size_t bitmap_reserved = mapinfo->region_at(AOTMetaspace::bm)->used_aligned();
1223   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1224                                 bitmap_reserved +
1225                                 _total_heap_region_size;
1226   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1227                              bitmap_used +
1228                              _total_heap_region_size;
1229   const double total_u_perc = percent_of(total_bytes, total_reserved);
1230 
1231   _rw_region.print(total_reserved);
1232   _ro_region.print(total_reserved);
1233   _ac_region.print(total_reserved);
1234 
1235   print_bitmap_region_stats(bitmap_used, total_reserved);
1236 
1237   if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
1238     print_heap_region_stats(mapped_heap_info->buffer_start(), mapped_heap_info->buffer_byte_size(), total_reserved);
1239   } else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
1240     print_heap_region_stats(streamed_heap_info->buffer_start(), streamed_heap_info->buffer_byte_size(), total_reserved);
1241   }
1242 
1243   aot_log_debug(aot)("total   : %9zu [100.0%% of total] out of %9zu bytes [%5.1f%% used]",
1244                      total_bytes, total_reserved, total_u_perc);
1245 }
1246 
1247 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1248   aot_log_debug(aot)("bm space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used]",
1249                      size, size/double(total_size)*100.0, size);
1250 }
1251 
1252 void ArchiveBuilder::print_heap_region_stats(char* start, size_t size, size_t total_size) {
1253   char* top = start + size;
1254   aot_log_debug(aot)("hp space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used] at " INTPTR_FORMAT,
1255                      size, size/double(total_size)*100.0, size, p2i(start));
1256 }
1257 
1258 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1259   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1260   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1261   // or so.
1262   _rw_region.print_out_of_space_msg(name, needed_bytes);
1263   _ro_region.print_out_of_space_msg(name, needed_bytes);
1264 
1265   log_error(aot)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1266   AOTMetaspace::unrecoverable_writing_error();
1267 }