1 /*
   2  * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveUtils.hpp"
  28 #include "cds/cppVtables.hpp"
  29 #include "cds/dumpAllocStats.hpp"
  30 #include "cds/metaspaceShared.hpp"
  31 #include "classfile/classLoaderDataShared.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/systemDictionaryShared.hpp"
  34 #include "classfile/vmClasses.hpp"
  35 #include "interpreter/abstractInterpreter.hpp"
  36 #include "logging/log.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/allStatic.hpp"
  39 #include "memory/memRegion.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/instanceKlass.hpp"

  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/oopHandle.inline.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/globals_extension.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/align.hpp"
  49 #include "utilities/bitMap.inline.hpp"
  50 #include "utilities/formatBuffer.hpp"
  51 #include "utilities/hashtable.inline.hpp"
  52 
  53 ArchiveBuilder* ArchiveBuilder::_current = NULL;
  54 
  55 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
  56   char* newtop = ArchiveBuilder::current()->_ro_region.top();
  57   ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
  58 }
  59 
  60 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K) {
  61   _total_bytes = 0;
  62   _objs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
  63 }
  64 
  65 ArchiveBuilder::SourceObjList::~SourceObjList() {
  66   delete _objs;
  67 }
  68 
  69 void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) {
  70   // Save this source object for copying
  71   _objs->append(src_info);
  72 
  73   // Prepare for marking the pointers in this source object
  74   assert(is_aligned(_total_bytes, sizeof(address)), "must be");
  75   src_info->set_ptrmap_start(_total_bytes / sizeof(address));
  76   _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
  77   src_info->set_ptrmap_end(_total_bytes / sizeof(address));
  78 
  79   BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
  80   if (_ptrmap.size() <= bitmap_size_needed) {
  81     _ptrmap.resize((bitmap_size_needed + 1) * 2);
  82   }
  83 }
  84 
  85 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
  86   // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
  87   // so that we can copy/relocate it later. E.g., if we have
  88   //    class Foo { intx scala; Bar* ptr; }
  89   //    Foo *f = 0x100;
  90   // To mark the f->ptr pointer on 64-bit platform, this function is called with
  91   //    src_info()->obj() == 0x100
  92   //    ref->addr() == 0x108
  93   address src_obj = src_info->obj();
  94   address* field_addr = ref->addr();
  95   assert(src_info->ptrmap_start() < _total_bytes, "sanity");
  96   assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
  97   assert(*field_addr != NULL, "should have checked");
  98 
  99   intx field_offset_in_bytes = ((address)field_addr) - src_obj;
 100   DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
 101   assert(field_offset_in_bytes >= 0, "must be");
 102   assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
 103   assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
 104 
 105   BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
 106   _ptrmap.set_bit(BitMap::idx_t(idx));
 107 }
 108 
 109 class RelocateEmbeddedPointers : public BitMapClosure {
 110   ArchiveBuilder* _builder;
 111   address _dumped_obj;
 112   BitMap::idx_t _start_idx;
 113 public:
 114   RelocateEmbeddedPointers(ArchiveBuilder* builder, address dumped_obj, BitMap::idx_t start_idx) :
 115     _builder(builder), _dumped_obj(dumped_obj), _start_idx(start_idx) {}
 116 
 117   bool do_bit(BitMap::idx_t bit_offset) {
 118     uintx FLAG_MASK = 0x03; // See comments around MetaspaceClosure::FLAG_MASK
 119     size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
 120     address* ptr_loc = (address*)(_dumped_obj + field_offset);
 121 
 122     uintx old_p_and_bits = (uintx)(*ptr_loc);
 123     uintx flag_bits = (old_p_and_bits & FLAG_MASK);
 124     address old_p = (address)(old_p_and_bits & (~FLAG_MASK));
 125     address new_p = _builder->get_dumped_addr(old_p);
 126     uintx new_p_and_bits = ((uintx)new_p) | flag_bits;
 127 
 128     log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
 129                    p2i(ptr_loc), p2i(old_p), p2i(new_p));
 130 
 131     ArchivePtrMarker::set_and_mark_pointer(ptr_loc, (address)(new_p_and_bits));
 132     return true; // keep iterating the bitmap
 133   }
 134 };
 135 
 136 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
 137   SourceObjInfo* src_info = objs()->at(i);
 138   assert(src_info->should_copy(), "must be");
 139   BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
 140   BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end());     // exclusive
 141 
 142   RelocateEmbeddedPointers relocator(builder, src_info->dumped_addr(), start);
 143   _ptrmap.iterate(&relocator, start, end);
 144 }
 145 
 146 ArchiveBuilder::ArchiveBuilder() :
 147   _current_dump_space(NULL),
 148   _buffer_bottom(NULL),
 149   _last_verified_top(NULL),
 150   _num_dump_regions_used(0),
 151   _other_region_used_bytes(0),
 152   _requested_static_archive_bottom(NULL),
 153   _requested_static_archive_top(NULL),
 154   _requested_dynamic_archive_bottom(NULL),
 155   _requested_dynamic_archive_top(NULL),
 156   _mapped_static_archive_bottom(NULL),
 157   _mapped_static_archive_top(NULL),
 158   _buffer_to_requested_delta(0),
 159   _rw_region("rw", MAX_SHARED_DELTA),
 160   _ro_region("ro", MAX_SHARED_DELTA),
 161   _rw_src_objs(),
 162   _ro_src_objs(),
 163   _src_obj_table(INITIAL_TABLE_SIZE),
 164   _num_instance_klasses(0),
 165   _num_obj_array_klasses(0),
 166   _num_type_array_klasses(0),
 167   _total_closed_heap_region_size(0),
 168   _total_open_heap_region_size(0),
 169   _estimated_metaspaceobj_bytes(0),
 170   _estimated_hashtable_bytes(0)
 171 {
 172   _klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
 173   _symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
 174   _special_refs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SpecialRefInfo>(24 * K, mtClassShared);
 175 
 176   assert(_current == NULL, "must be");
 177   _current = this;
 178 }
 179 
 180 ArchiveBuilder::~ArchiveBuilder() {
 181   assert(_current == this, "must be");
 182   _current = NULL;
 183 
 184   clean_up_src_obj_table();
 185 
 186   for (int i = 0; i < _symbols->length(); i++) {
 187     _symbols->at(i)->decrement_refcount();
 188   }
 189 
 190   delete _klasses;
 191   delete _symbols;
 192   delete _special_refs;
 193 }
 194 
 195 bool ArchiveBuilder::is_dumping_full_module_graph() {
 196   return DumpSharedSpaces && MetaspaceShared::use_full_module_graph();
 197 }
 198 
 199 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
 200   ArchiveBuilder* _builder;
 201 
 202 public:
 203   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 204 
 205   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 206     return _builder->gather_klass_and_symbol(ref, read_only);
 207   }
 208 };
 209 
 210 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 211   if (ref->obj() == NULL) {
 212     return false;
 213   }
 214   if (get_follow_mode(ref) != make_a_copy) {
 215     return false;
 216   }
 217   if (ref->msotype() == MetaspaceObj::ClassType) {
 218     Klass* klass = (Klass*)ref->obj();
 219     assert(klass->is_klass(), "must be");
 220     if (!is_excluded(klass)) {
 221       _klasses->append(klass);
 222       if (klass->is_instance_klass()) {
 223         _num_instance_klasses ++;
 224       } else if (klass->is_objArray_klass()) {
 225         _num_obj_array_klasses ++;
 226       } else {
 227         assert(klass->is_typeArray_klass(), "sanity");
 228         _num_type_array_klasses ++;
 229       }
 230     }
 231     // See RunTimeSharedClassInfo::get_for()
 232     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);
 233   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 234     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 235     Symbol* sym = (Symbol*)ref->obj();
 236     sym->increment_refcount();
 237     _symbols->append(sym);
 238   }
 239 
 240   int bytes = ref->size() * BytesPerWord;
 241   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 242 
 243   return true; // recurse
 244 }
 245 
 246 void ArchiveBuilder::gather_klasses_and_symbols() {
 247   ResourceMark rm;
 248   log_info(cds)("Gathering classes and symbols ... ");
 249   GatherKlassesAndSymbols doit(this);
 250   iterate_roots(&doit, /*is_relocating_pointers=*/false);
 251 #if INCLUDE_CDS_JAVA_HEAP
 252   if (is_dumping_full_module_graph()) {
 253     ClassLoaderDataShared::iterate_symbols(&doit);
 254   }
 255 #endif
 256   doit.finish();
 257 
 258   log_info(cds)("Number of classes %d", _num_instance_klasses + _num_obj_array_klasses + _num_type_array_klasses);
 259   log_info(cds)("    instance classes   = %5d", _num_instance_klasses);
 260   log_info(cds)("    obj array classes  = %5d", _num_obj_array_klasses);
 261   log_info(cds)("    type array classes = %5d", _num_type_array_klasses);
 262   log_info(cds)("               symbols = %5d", _symbols->length());
 263 
 264   if (DumpSharedSpaces) {
 265     // To ensure deterministic contents in the static archive, we need to ensure that
 266     // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
 267     // the MetaspaceObjs are located originally, as they are copied sequentially into
 268     // the archive during the iteration.
 269     //
 270     // The only issue here is that the symbol table and the system directories may be
 271     // randomly ordered, so we copy the symbols and klasses into two arrays and sort
 272     // them deterministically.
 273     //
 274     // During -Xshare:dump, the order of Symbol creation is strictly determined by
 275     // the SharedClassListFile (class loading is done in a single thread and the JIT
 276     // is disabled). Also, Symbols are allocated in monotonically increasing addresses
 277     // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
 278     // ascending address order, we ensure that all Symbols are copied into deterministic
 279     // locations in the archive.
 280     //
 281     // TODO: in the future, if we want to produce deterministic contents in the
 282     // dynamic archive, we might need to sort the symbols alphabetically (also see
 283     // DynamicArchiveBuilder::sort_methods()).
 284     sort_symbols_and_fix_hash();
 285     sort_klasses();
 286 
 287     // TODO -- we need a proper estimate for the archived modules, etc,
 288     // but this should be enough for now
 289     _estimated_metaspaceobj_bytes += 200 * 1024 * 1024;
 290   }
 291 }
 292 
 293 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
 294   if (a[0] < b[0]) {
 295     return -1;
 296   } else {
 297     assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
 298     return 1;
 299   }
 300 }
 301 
 302 void ArchiveBuilder::sort_symbols_and_fix_hash() {
 303   log_info(cds)("Sorting symbols and fixing identity hash ... ");
 304   os::init_random(0x12345678);
 305   _symbols->sort(compare_symbols_by_address);
 306   for (int i = 0; i < _symbols->length(); i++) {
 307     assert(_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
 308     _symbols->at(i)->update_identity_hash();
 309   }
 310 }
 311 
 312 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
 313   return a[0]->name()->fast_compare(b[0]->name());
 314 }
 315 
 316 void ArchiveBuilder::sort_klasses() {
 317   log_info(cds)("Sorting classes ... ");
 318   _klasses->sort(compare_klass_by_name);
 319 }
 320 
 321 size_t ArchiveBuilder::estimate_archive_size() {
 322   // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
 323   size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
 324   size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
 325   _estimated_hashtable_bytes = symbol_table_est + dictionary_est;
 326 
 327   size_t total = 0;
 328 
 329   total += _estimated_metaspaceobj_bytes;
 330   total += _estimated_hashtable_bytes;
 331 
 332   // allow fragmentation at the end of each dump region
 333   total += _total_dump_regions * MetaspaceShared::core_region_alignment();
 334 
 335   log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
 336                 symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
 337   log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
 338   log_info(cds)("total estimate bytes = " SIZE_FORMAT, total);
 339 
 340   return align_up(total, MetaspaceShared::core_region_alignment());
 341 }
 342 
 343 address ArchiveBuilder::reserve_buffer() {
 344   size_t buffer_size = estimate_archive_size();
 345   ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
 346   if (!rs.is_reserved()) {
 347     log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
 348     vm_direct_exit(0);
 349   }
 350 
 351   // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
 352   // we are copying the class metadata into the buffer.
 353   address buffer_bottom = (address)rs.base();
 354   log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
 355                 p2i(buffer_bottom), buffer_size);
 356   _shared_rs = rs;
 357 
 358   _buffer_bottom = buffer_bottom;
 359   _last_verified_top = buffer_bottom;
 360   _current_dump_space = &_rw_region;
 361   _num_dump_regions_used = 1;
 362   _other_region_used_bytes = 0;
 363   _current_dump_space->init(&_shared_rs, &_shared_vs);
 364 
 365   ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
 366 
 367   // The bottom of the static archive should be mapped at this address by default.
 368   _requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
 369 
 370   // The bottom of the archive (that I am writing now) should be mapped at this address by default.
 371   address my_archive_requested_bottom;
 372 
 373   if (DumpSharedSpaces) {
 374     my_archive_requested_bottom = _requested_static_archive_bottom;
 375   } else {
 376     _mapped_static_archive_bottom = (address)MetaspaceObj::shared_metaspace_base();
 377     _mapped_static_archive_top  = (address)MetaspaceObj::shared_metaspace_top();
 378     assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
 379     size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
 380 
 381     // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
 382     _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
 383     my_archive_requested_bottom = align_up(_requested_static_archive_top, MetaspaceShared::core_region_alignment());
 384 
 385     _requested_dynamic_archive_bottom = my_archive_requested_bottom;
 386   }
 387 
 388   _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
 389 
 390   address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
 391   if (my_archive_requested_bottom <  _requested_static_archive_bottom ||
 392       my_archive_requested_top    <= _requested_static_archive_bottom) {
 393     // Size overflow.
 394     log_error(cds)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
 395     log_error(cds)("my_archive_requested_top    = " INTPTR_FORMAT, p2i(my_archive_requested_top));
 396     log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
 397                    "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
 398     vm_direct_exit(0);
 399   }
 400 
 401   if (DumpSharedSpaces) {
 402     // We don't want any valid object to be at the very bottom of the archive.
 403     // See ArchivePtrMarker::mark_pointer().
 404     rw_region()->allocate(16);
 405   }
 406 
 407   return buffer_bottom;
 408 }
 409 
 410 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
 411   int i;
 412 
 413   if (!is_relocating_pointers) {
 414     // Don't relocate _symbol, so we can safely call decrement_refcount on the
 415     // original symbols.
 416     int num_symbols = _symbols->length();
 417     for (i = 0; i < num_symbols; i++) {
 418       it->push(_symbols->adr_at(i));
 419     }
 420   }
 421 
 422   int num_klasses = _klasses->length();
 423   for (i = 0; i < num_klasses; i++) {
 424     it->push(_klasses->adr_at(i));
 425   }
 426 
 427   iterate_roots(it, is_relocating_pointers);
 428 }
 429 
 430 class GatherSortedSourceObjs : public MetaspaceClosure {
 431   ArchiveBuilder* _builder;
 432 
 433 public:
 434   GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
 435 
 436   virtual bool do_ref(Ref* ref, bool read_only) {
 437     return _builder->gather_one_source_obj(enclosing_ref(), ref, read_only);
 438   }
 439 
 440   virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
 441     assert(type == _method_entry_ref, "only special type allowed for now");
 442     address src_obj = ref->obj();
 443     size_t field_offset = pointer_delta(p, src_obj,  sizeof(u1));
 444     _builder->add_special_ref(type, src_obj, field_offset);
 445   };
 446 
 447   virtual void do_pending_ref(Ref* ref) {
 448     if (ref->obj() != NULL) {
 449       _builder->remember_embedded_pointer_in_copied_obj(enclosing_ref(), ref);
 450     }
 451   }
 452 };
 453 
 454 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
 455                                            MetaspaceClosure::Ref* ref, bool read_only) {
 456   address src_obj = ref->obj();
 457   if (src_obj == NULL) {
 458     return false;
 459   }
 460   ref->set_keep_after_pushing();
 461   remember_embedded_pointer_in_copied_obj(enclosing_ref, ref);
 462 
 463   FollowMode follow_mode = get_follow_mode(ref);
 464   SourceObjInfo src_info(ref, read_only, follow_mode);
 465   bool created;
 466   SourceObjInfo* p = _src_obj_table.add_if_absent(src_obj, src_info, &created);
 467   if (created) {
 468     if (_src_obj_table.maybe_grow(MAX_TABLE_SIZE)) {
 469       log_info(cds, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
 470     }
 471   }
 472 
 473   assert(p->read_only() == src_info.read_only(), "must be");
 474 
 475   if (created && src_info.should_copy()) {
 476     ref->set_user_data((void*)p);
 477     if (read_only) {
 478       _ro_src_objs.append(enclosing_ref, p);
 479     } else {
 480       _rw_src_objs.append(enclosing_ref, p);
 481     }
 482     return true; // Need to recurse into this ref only if we are copying it
 483   } else {
 484     return false;
 485   }
 486 }
 487 
 488 void ArchiveBuilder::add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset) {
 489   _special_refs->append(SpecialRefInfo(type, src_obj, field_offset));
 490 }
 491 
 492 void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref,
 493                                                              MetaspaceClosure::Ref* ref) {
 494   assert(ref->obj() != NULL, "should have checked");
 495 
 496   if (enclosing_ref != NULL) {
 497     SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
 498     if (src_info == NULL) {
 499       // source objects of point_to_it/set_to_null types are not copied
 500       // so we don't need to remember their pointers.
 501     } else {
 502       if (src_info->read_only()) {
 503         _ro_src_objs.remember_embedded_pointer(src_info, ref);
 504       } else {
 505         _rw_src_objs.remember_embedded_pointer(src_info, ref);
 506       }
 507     }
 508   }
 509 }
 510 
 511 void ArchiveBuilder::gather_source_objs() {
 512   ResourceMark rm;
 513   log_info(cds)("Gathering all archivable objects ... ");
 514   gather_klasses_and_symbols();
 515   GatherSortedSourceObjs doit(this);
 516   iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
 517   doit.finish();
 518 }
 519 
 520 bool ArchiveBuilder::is_excluded(Klass* klass) {
 521   if (klass->is_instance_klass()) {
 522     InstanceKlass* ik = InstanceKlass::cast(klass);
 523     return SystemDictionaryShared::is_excluded_class(ik);
 524   } else if (klass->is_objArray_klass()) {
 525     if (DynamicDumpSharedSpaces) {
 526       // Don't support archiving of array klasses for now (WHY???)
 527       return true;
 528     }
 529     Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
 530     if (bottom->is_instance_klass()) {
 531       return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
 532     }
 533   }
 534 
 535   return false;
 536 }
 537 
 538 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
 539   address obj = ref->obj();
 540   if (MetaspaceShared::is_in_shared_metaspace(obj)) {
 541     // Don't dump existing shared metadata again.
 542     return point_to_it;
 543   } else if (ref->msotype() == MetaspaceObj::MethodDataType) {
 544     return set_to_null;
 545   } else {
 546     if (ref->msotype() == MetaspaceObj::ClassType) {
 547       Klass* klass = (Klass*)ref->obj();
 548       assert(klass->is_klass(), "must be");
 549       if (is_excluded(klass)) {
 550         ResourceMark rm;
 551         log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
 552         return set_to_null;
 553       }
 554     }
 555 
 556     return make_a_copy;
 557   }
 558 }
 559 
 560 void ArchiveBuilder::start_dump_space(DumpRegion* next) {
 561   address bottom = _last_verified_top;
 562   address top = (address)(current_dump_space()->top());
 563   _other_region_used_bytes += size_t(top - bottom);
 564 
 565   current_dump_space()->pack(next);
 566   _current_dump_space = next;
 567   _num_dump_regions_used ++;
 568 
 569   _last_verified_top = (address)(current_dump_space()->top());
 570 }
 571 
 572 void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
 573   address bottom = _last_verified_top;
 574   address top = (address)(current_dump_space()->top());
 575   size_t used = size_t(top - bottom) + _other_region_used_bytes;
 576   int diff = int(estimate) - int(used);
 577 
 578   log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
 579   assert(diff >= 0, "Estimate is too small");
 580 
 581   _last_verified_top = top;
 582   _other_region_used_bytes = 0;
 583 }
 584 
 585 void ArchiveBuilder::dump_rw_metadata() {
 586   ResourceMark rm;
 587   log_info(cds)("Allocating RW objects ... ");
 588   make_shallow_copies(&_rw_region, &_rw_src_objs);
 589 
 590 #if INCLUDE_CDS_JAVA_HEAP
 591   if (is_dumping_full_module_graph()) {
 592     // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
 593     char* start = rw_region()->top();
 594     ClassLoaderDataShared::allocate_archived_tables();
 595     alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
 596   }
 597 #endif
 598 }
 599 
 600 void ArchiveBuilder::dump_ro_metadata() {
 601   ResourceMark rm;
 602   log_info(cds)("Allocating RO objects ... ");
 603 
 604   start_dump_space(&_ro_region);
 605   make_shallow_copies(&_ro_region, &_ro_src_objs);
 606 
 607 #if INCLUDE_CDS_JAVA_HEAP
 608   if (is_dumping_full_module_graph()) {
 609     char* start = ro_region()->top();
 610     ClassLoaderDataShared::init_archived_tables();
 611     alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
 612   }
 613 #endif
 614 }
 615 
 616 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 617                                          const ArchiveBuilder::SourceObjList* src_objs) {
 618   for (int i = 0; i < src_objs->objs()->length(); i++) {
 619     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 620   }
 621   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 622 }
 623 
 624 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 625   MetaspaceClosure::Ref* ref = src_info->ref();
 626   address src = ref->obj();
 627   int bytes = src_info->size_in_bytes();
 628   char* dest;
 629   char* oldtop;
 630   char* newtop;
 631 
 632   oldtop = dump_region->top();
 633   if (ref->msotype() == MetaspaceObj::ClassType) {
 634     // Save a pointer immediate in front of an InstanceKlass, so
 635     // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
 636     // without building another hashtable. See RunTimeSharedClassInfo::get_for()
 637     // in systemDictionaryShared.cpp.
 638     Klass* klass = (Klass*)src;
 639     if (klass->is_instance_klass()) {
 640       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 641       dump_region->allocate(sizeof(address));
 642     }
 643   }
 644   dest = dump_region->allocate(bytes);
 645   newtop = dump_region->top();
 646 
 647   memcpy(dest, src, bytes);
 648 
 649   intptr_t* archived_vtable = CppVtables::get_archived_vtable(ref->msotype(), (address)dest);
 650   if (archived_vtable != NULL) {
 651     *(address*)dest = (address)archived_vtable;
 652     ArchivePtrMarker::mark_pointer((address*)dest);
 653   }
 654 
 655   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 656   src_info->set_dumped_addr((address)dest);
 657 
 658   _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
 659 }
 660 
 661 address ArchiveBuilder::get_dumped_addr(address src_obj) const {
 662   SourceObjInfo* p = _src_obj_table.lookup(src_obj);
 663   assert(p != NULL, "must be");
 664 
 665   return p->dumped_addr();
 666 }
 667 
 668 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
 669   for (int i = 0; i < src_objs->objs()->length(); i++) {
 670     src_objs->relocate(i, this);
 671   }
 672 }
 673 
 674 void ArchiveBuilder::update_special_refs() {
 675   for (int i = 0; i < _special_refs->length(); i++) {
 676     SpecialRefInfo s = _special_refs->at(i);
 677     size_t field_offset = s.field_offset();
 678     address src_obj = s.src_obj();
 679     address dst_obj = get_dumped_addr(src_obj);
 680     intptr_t* src_p = (intptr_t*)(src_obj + field_offset);
 681     intptr_t* dst_p = (intptr_t*)(dst_obj + field_offset);
 682     assert(s.type() == MetaspaceClosure::_method_entry_ref, "only special type allowed for now");
 683 
 684     assert(*src_p == *dst_p, "must be a copy");
 685     ArchivePtrMarker::mark_pointer((address*)dst_p);
 686   }
 687 }
 688 
 689 class RefRelocator: public MetaspaceClosure {
 690   ArchiveBuilder* _builder;
 691 
 692 public:
 693   RefRelocator(ArchiveBuilder* builder) : _builder(builder) {}
 694 
 695   virtual bool do_ref(Ref* ref, bool read_only) {
 696     if (ref->not_null()) {
 697       ref->update(_builder->get_dumped_addr(ref->obj()));
 698       ArchivePtrMarker::mark_pointer(ref->addr());
 699     }
 700     return false; // Do not recurse.
 701   }
 702 };
 703 
 704 void ArchiveBuilder::relocate_roots() {
 705   log_info(cds)("Relocating external roots ... ");
 706   ResourceMark rm;
 707   RefRelocator doit(this);
 708   iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
 709   doit.finish();
 710   log_info(cds)("done");
 711 }
 712 
 713 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
 714   log_info(cds)("Relocating embedded pointers in core regions ... ");
 715   relocate_embedded_pointers(&_rw_src_objs);
 716   relocate_embedded_pointers(&_ro_src_objs);
 717   update_special_refs();
 718 }
 719 
 720 // We must relocate vmClasses::_klasses[] only after we have copied the
 721 // java objects in during dump_java_heap_objects(): during the object copy, we operate on
 722 // old objects which assert that their klass is the original klass.
 723 void ArchiveBuilder::relocate_vm_classes() {
 724   log_info(cds)("Relocating vmClasses::_klasses[] ... ");
 725   ResourceMark rm;
 726   RefRelocator doit(this);
 727   vmClasses::metaspace_pointers_do(&doit);
 728 }
 729 
 730 void ArchiveBuilder::make_klasses_shareable() {
 731   for (int i = 0; i < klasses()->length(); i++) {
 732     Klass* k = klasses()->at(i);
 733     k->remove_java_mirror();







 734     if (k->is_objArray_klass()) {
 735       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 736       // on their array classes.
 737     } else if (k->is_typeArray_klass()) {
 738       k->remove_unshareable_info();
 739     } else {
 740       assert(k->is_instance_klass(), " must be");
 741       InstanceKlass* ik = InstanceKlass::cast(k);
 742       if (DynamicDumpSharedSpaces) {
 743         // For static dump, class loader type are already set.
 744         ik->assign_class_loader_type();
 745       }
 746 
 747       MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
 748       ik->remove_unshareable_info();
 749 
 750       if (log_is_enabled(Debug, cds, class)) {
 751         ResourceMark rm;
 752         log_debug(cds, class)("klasses[%4d] = " PTR_FORMAT " %s", i, p2i(to_requested(ik)), ik->external_name());
 753       }
 754     }
 755   }
 756 }
 757 
 758 uintx ArchiveBuilder::buffer_to_offset(address p) const {
 759   address requested_p = to_requested(p);
 760   assert(requested_p >= _requested_static_archive_bottom, "must be");
 761   return requested_p - _requested_static_archive_bottom;
 762 }
 763 
 764 uintx ArchiveBuilder::any_to_offset(address p) const {
 765   if (is_in_mapped_static_archive(p)) {
 766     assert(DynamicDumpSharedSpaces, "must be");
 767     return p - _mapped_static_archive_bottom;
 768   }
 769   return buffer_to_offset(p);
 770 }
 771 
 772 // Update a Java object to point its Klass* to the new location after
 773 // shared archive has been compacted.
 774 void ArchiveBuilder::relocate_klass_ptr(oop o) {

 775   assert(DumpSharedSpaces, "sanity");
 776   Klass* k = get_relocated_klass(o->klass());
 777   Klass* requested_k = to_requested(k);
 778   narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
 779   o->set_narrow_klass(nk);





 780 }
 781 
 782 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 783 // so that the archive can be mapped to the "requested" location without runtime relocation.
 784 //
 785 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 786 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 787 // - Every pointer must have one of the following values:
 788 //   [a] NULL:
 789 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 790 //       consider it at runtime.
 791 //   [b] Points into an object X which is inside the buffer:
 792 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 793 //       when the archive is mapped at the requested location.
 794 //   [c] Points into an object Y which is inside mapped static archive:
 795 //       - This happens only during dynamic dump
 796 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 797 //         so it points to Y when the static archive is mapped at the requested location.
 798 template <bool STATIC_DUMP>
 799 class RelocateBufferToRequested : public BitMapClosure {
 800   ArchiveBuilder* _builder;
 801   address _buffer_bottom;
 802   intx _buffer_to_requested_delta;
 803   intx _mapped_to_requested_static_archive_delta;
 804   size_t _max_non_null_offset;
 805 
 806  public:
 807   RelocateBufferToRequested(ArchiveBuilder* builder) {
 808     _builder = builder;
 809     _buffer_bottom = _builder->buffer_bottom();
 810     _buffer_to_requested_delta = builder->buffer_to_requested_delta();
 811     _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
 812     _max_non_null_offset = 0;
 813 
 814     address bottom = _builder->buffer_bottom();
 815     address top = _builder->buffer_top();
 816     address new_bottom = bottom + _buffer_to_requested_delta;
 817     address new_top = top + _buffer_to_requested_delta;
 818     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
 819                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
 820                    p2i(bottom), p2i(top),
 821                    p2i(new_bottom), p2i(new_top));
 822   }
 823 
 824   bool do_bit(size_t offset) {
 825     address* p = (address*)_buffer_bottom + offset;
 826     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
 827 
 828     if (*p == NULL) {
 829       // todo -- clear bit, etc
 830       ArchivePtrMarker::ptrmap()->clear_bit(offset);
 831     } else {
 832       if (STATIC_DUMP) {
 833         assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
 834         *p += _buffer_to_requested_delta;
 835         assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
 836       } else {
 837         if (_builder->is_in_buffer_space(*p)) {
 838           *p += _buffer_to_requested_delta;
 839           // assert is in requested dynamic archive
 840         } else {
 841           assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
 842           *p += _mapped_to_requested_static_archive_delta;
 843           assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
 844         }
 845       }
 846       _max_non_null_offset = offset;
 847     }
 848 
 849     return true; // keep iterating
 850   }
 851 
 852   void doit() {
 853     ArchivePtrMarker::ptrmap()->iterate(this);
 854     ArchivePtrMarker::compact(_max_non_null_offset);
 855   }
 856 };
 857 
 858 
 859 void ArchiveBuilder::relocate_to_requested() {
 860   ro_region()->pack();
 861 
 862   size_t my_archive_size = buffer_top() - buffer_bottom();
 863 
 864   if (DumpSharedSpaces) {
 865     _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
 866     RelocateBufferToRequested<true> patcher(this);
 867     patcher.doit();
 868   } else {
 869     assert(DynamicDumpSharedSpaces, "must be");
 870     _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
 871     RelocateBufferToRequested<false> patcher(this);
 872     patcher.doit();
 873   }
 874 }
 875 
 876 // Write detailed info to a mapfile to analyze contents of the archive.
 877 // static dump:
 878 //   java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0
 879 // dynamic dump:
 880 //   java -cp MyApp.jar -XX:ArchiveClassesAtExit=MyApp.jsa \
 881 //        -Xlog:cds+map=trace:file=cds.map:none:filesize=0 MyApp
 882 //
 883 // We need to do some address translation because the buffers used at dump time may be mapped to
 884 // a different location at runtime. At dump time, the buffers may be at arbitrary locations
 885 // picked by the OS. At runtime, we try to map at a fixed location (SharedBaseAddress). For
 886 // consistency, we log everything using runtime addresses.
 887 class ArchiveBuilder::CDSMapLogger : AllStatic {
 888   static intx buffer_to_runtime_delta() {
 889     // Translate the buffers used by the RW/RO regions to their eventual (requested) locations
 890     // at runtime.
 891     return ArchiveBuilder::current()->buffer_to_requested_delta();
 892   }
 893 
 894   // rw/ro regions only
 895   static void write_dump_region(const char* name, DumpRegion* region) {
 896     address region_base = address(region->base());
 897     address region_top  = address(region->top());
 898     write_region(name, region_base, region_top, region_base + buffer_to_runtime_delta());
 899   }
 900 
 901 #define _LOG_PREFIX PTR_FORMAT ": @@ %-17s %d"
 902 
 903   static void write_klass(Klass* k, address runtime_dest, const char* type_name, int bytes, Thread* current) {
 904     ResourceMark rm(current);
 905     log_debug(cds, map)(_LOG_PREFIX " %s",
 906                         p2i(runtime_dest), type_name, bytes, k->external_name());
 907   }
 908   static void write_method(Method* m, address runtime_dest, const char* type_name, int bytes, Thread* current) {
 909     ResourceMark rm(current);
 910     log_debug(cds, map)(_LOG_PREFIX " %s",
 911                         p2i(runtime_dest), type_name, bytes,  m->external_name());
 912   }
 913 
 914   // rw/ro regions only
 915   static void write_objects(DumpRegion* region, const ArchiveBuilder::SourceObjList* src_objs) {
 916     address last_obj_base = address(region->base());
 917     address last_obj_end  = address(region->base());
 918     address region_end    = address(region->end());
 919     Thread* current = Thread::current();
 920     for (int i = 0; i < src_objs->objs()->length(); i++) {
 921       SourceObjInfo* src_info = src_objs->at(i);
 922       address src = src_info->orig_obj();
 923       address dest = src_info->dumped_addr();
 924       write_data(last_obj_base, dest, last_obj_base + buffer_to_runtime_delta());
 925       address runtime_dest = dest + buffer_to_runtime_delta();
 926       int bytes = src_info->size_in_bytes();
 927 
 928       MetaspaceObj::Type type = src_info->msotype();
 929       const char* type_name = MetaspaceObj::type_name(type);
 930 
 931       switch (type) {
 932       case MetaspaceObj::ClassType:
 933         write_klass((Klass*)src, runtime_dest, type_name, bytes, current);
 934         break;
 935       case MetaspaceObj::ConstantPoolType:
 936         write_klass(((ConstantPool*)src)->pool_holder(),
 937                     runtime_dest, type_name, bytes, current);
 938         break;
 939       case MetaspaceObj::ConstantPoolCacheType:
 940         write_klass(((ConstantPoolCache*)src)->constant_pool()->pool_holder(),
 941                     runtime_dest, type_name, bytes, current);
 942         break;
 943       case MetaspaceObj::MethodType:
 944         write_method((Method*)src, runtime_dest, type_name, bytes, current);
 945         break;
 946       case MetaspaceObj::ConstMethodType:
 947         write_method(((ConstMethod*)src)->method(), runtime_dest, type_name, bytes, current);
 948         break;
 949       case MetaspaceObj::SymbolType:
 950         {
 951           ResourceMark rm(current);
 952           Symbol* s = (Symbol*)src;
 953           log_debug(cds, map)(_LOG_PREFIX " %s", p2i(runtime_dest), type_name, bytes,
 954                               s->as_quoted_ascii());
 955         }
 956         break;
 957       default:
 958         log_debug(cds, map)(_LOG_PREFIX, p2i(runtime_dest), type_name, bytes);
 959         break;
 960       }
 961 
 962       last_obj_base = dest;
 963       last_obj_end  = dest + bytes;
 964     }
 965 
 966     write_data(last_obj_base, last_obj_end, last_obj_base + buffer_to_runtime_delta());
 967     if (last_obj_end < region_end) {
 968       log_debug(cds, map)(PTR_FORMAT ": @@ Misc data " SIZE_FORMAT " bytes",
 969                           p2i(last_obj_end + buffer_to_runtime_delta()),
 970                           size_t(region_end - last_obj_end));
 971       write_data(last_obj_end, region_end, last_obj_end + buffer_to_runtime_delta());
 972     }
 973   }
 974 
 975 #undef _LOG_PREFIX
 976 
 977   // Write information about a region, whose address at dump time is [base .. top). At
 978   // runtime, this region will be mapped to runtime_base.  runtime_base is 0 if this
 979   // region will be mapped at os-selected addresses (such as the bitmap region), or will
 980   // be accessed with os::read (the header).
 981   static void write_region(const char* name, address base, address top, address runtime_base) {
 982     size_t size = top - base;
 983     base = runtime_base;
 984     top = runtime_base + size;
 985     log_info(cds, map)("[%-18s " PTR_FORMAT " - " PTR_FORMAT " " SIZE_FORMAT_W(9) " bytes]",
 986                        name, p2i(base), p2i(top), size);
 987   }
 988 
 989   // open and closed archive regions
 990   static void write_heap_region(const char* which, GrowableArray<MemRegion> *regions) {
 991     for (int i = 0; i < regions->length(); i++) {
 992       address start = address(regions->at(i).start());
 993       address end = address(regions->at(i).end());
 994       write_region(which, start, end, start);
 995       write_data(start, end, start);
 996     }
 997   }
 998 
 999   // Dump all the data [base...top). Pretend that the base address
1000   // will be mapped to runtime_base at run-time.
1001   static void write_data(address base, address top, address runtime_base) {
1002     assert(top >= base, "must be");
1003 
1004     LogStreamHandle(Trace, cds, map) lsh;
1005     if (lsh.is_enabled()) {
1006       os::print_hex_dump(&lsh, base, top, sizeof(address), 32, runtime_base);
1007     }
1008   }
1009 
1010   static void write_header(FileMapInfo* mapinfo) {
1011     LogStreamHandle(Info, cds, map) lsh;
1012     if (lsh.is_enabled()) {
1013       mapinfo->print(&lsh);
1014     }
1015   }
1016 
1017 public:
1018   static void write(ArchiveBuilder* builder, FileMapInfo* mapinfo,
1019              GrowableArray<MemRegion> *closed_heap_regions,
1020              GrowableArray<MemRegion> *open_heap_regions,
1021              char* bitmap, size_t bitmap_size_in_bytes) {
1022     log_info(cds, map)("%s CDS archive map for %s", DumpSharedSpaces ? "Static" : "Dynamic", mapinfo->full_path());
1023 
1024     address header = address(mapinfo->header());
1025     address header_end = header + mapinfo->header()->header_size();
1026     write_region("header", header, header_end, 0);
1027     write_header(mapinfo);
1028     write_data(header, header_end, 0);
1029 
1030     DumpRegion* rw_region = &builder->_rw_region;
1031     DumpRegion* ro_region = &builder->_ro_region;
1032 
1033     write_dump_region("rw region", rw_region);
1034     write_objects(rw_region, &builder->_rw_src_objs);
1035 
1036     write_dump_region("ro region", ro_region);
1037     write_objects(ro_region, &builder->_ro_src_objs);
1038 
1039     address bitmap_end = address(bitmap + bitmap_size_in_bytes);
1040     write_region("bitmap", address(bitmap), bitmap_end, 0);
1041     write_data(header, header_end, 0);
1042 
1043     if (closed_heap_regions != NULL) {
1044       write_heap_region("closed heap region", closed_heap_regions);
1045     }
1046     if (open_heap_regions != NULL) {
1047       write_heap_region("open heap region", open_heap_regions);
1048     }
1049 
1050     log_info(cds, map)("[End of CDS archive map]");
1051   }
1052 };
1053 
1054 void ArchiveBuilder::print_stats() {
1055   _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1056 }
1057 
1058 void ArchiveBuilder::clean_up_src_obj_table() {
1059   SrcObjTableCleaner cleaner;
1060   _src_obj_table.iterate(&cleaner);
1061 }
1062 
1063 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
1064                                    GrowableArray<MemRegion>* closed_heap_regions,
1065                                    GrowableArray<MemRegion>* open_heap_regions,
1066                                    GrowableArray<ArchiveHeapOopmapInfo>* closed_heap_oopmaps,
1067                                    GrowableArray<ArchiveHeapOopmapInfo>* open_heap_oopmaps) {
1068   // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1069   // MetaspaceShared::n_regions (internal to hotspot).
1070   assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
1071 
1072   write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1073   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1074 
1075   size_t bitmap_size_in_bytes;
1076   char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_oopmaps, open_heap_oopmaps,
1077                                               bitmap_size_in_bytes);
1078 
1079   if (closed_heap_regions != NULL) {
1080     _total_closed_heap_region_size = mapinfo->write_archive_heap_regions(
1081                                         closed_heap_regions,
1082                                         closed_heap_oopmaps,
1083                                         MetaspaceShared::first_closed_archive_heap_region,
1084                                         MetaspaceShared::max_closed_archive_heap_region);
1085     _total_open_heap_region_size = mapinfo->write_archive_heap_regions(
1086                                         open_heap_regions,
1087                                         open_heap_oopmaps,
1088                                         MetaspaceShared::first_open_archive_heap_region,
1089                                         MetaspaceShared::max_open_archive_heap_region);
1090   }
1091 
1092   print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
1093 
1094   mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
1095   if (mapinfo->header()->magic() == CDS_DYNAMIC_ARCHIVE_MAGIC) {
1096     mapinfo->set_header_base_archive_name_size(strlen(Arguments::GetSharedArchivePath()) + 1);
1097     mapinfo->set_header_base_archive_is_default(FLAG_IS_DEFAULT(SharedArchiveFile));
1098   }
1099   mapinfo->set_header_crc(mapinfo->compute_header_crc());
1100   // After this point, we should not write any data into mapinfo->header() since this
1101   // would corrupt its checksum we have calculated before.
1102   mapinfo->write_header();
1103   mapinfo->close();
1104 
1105   if (log_is_enabled(Info, cds)) {
1106     print_stats();
1107   }
1108 
1109   if (log_is_enabled(Info, cds, map)) {
1110     CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions,
1111                         bitmap, bitmap_size_in_bytes);
1112   }
1113   FREE_C_HEAP_ARRAY(char, bitmap);
1114 }
1115 
1116 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1117   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1118 }
1119 
1120 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
1121                                         GrowableArray<MemRegion>* closed_heap_regions,
1122                                         GrowableArray<MemRegion>* open_heap_regions) {
1123   // Print statistics of all the regions
1124   const size_t bitmap_used = mapinfo->space_at(MetaspaceShared::bm)->used();
1125   const size_t bitmap_reserved = mapinfo->space_at(MetaspaceShared::bm)->used_aligned();
1126   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1127                                 bitmap_reserved +
1128                                 _total_closed_heap_region_size +
1129                                 _total_open_heap_region_size;
1130   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1131                              bitmap_used +
1132                              _total_closed_heap_region_size +
1133                              _total_open_heap_region_size;
1134   const double total_u_perc = percent_of(total_bytes, total_reserved);
1135 
1136   _rw_region.print(total_reserved);
1137   _ro_region.print(total_reserved);
1138 
1139   print_bitmap_region_stats(bitmap_used, total_reserved);
1140 
1141   if (closed_heap_regions != NULL) {
1142     print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
1143     print_heap_region_stats(open_heap_regions, "oa", total_reserved);
1144   }
1145 
1146   log_debug(cds)("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1147                  total_bytes, total_reserved, total_u_perc);
1148 }
1149 
1150 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1151   log_debug(cds)("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
1152                  size, size/double(total_size)*100.0, size);
1153 }
1154 
1155 void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1156                                              const char *name, size_t total_size) {
1157   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1158   for (int i = 0; i < arr_len; i++) {
1159       char* start = (char*)heap_mem->at(i).start();
1160       size_t size = heap_mem->at(i).byte_size();
1161       char* top = start + size;
1162       log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1163                      name, i, size, size/double(total_size)*100.0, size, p2i(start));
1164   }
1165 }
1166 
1167 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1168   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1169   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1170   // or so.
1171   _rw_region.print_out_of_space_msg(name, needed_bytes);
1172   _ro_region.print_out_of_space_msg(name, needed_bytes);
1173 
1174   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1175                                 "Please reduce the number of shared classes.");
1176 }
1177 
1178 
1179 #ifndef PRODUCT
1180 void ArchiveBuilder::assert_is_vm_thread() {
1181   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1182 }
1183 #endif
--- EOF ---