1 /*
   2  * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveUtils.hpp"
  28 #include "cds/cppVtables.hpp"
  29 #include "cds/dumpAllocStats.hpp"
  30 #include "cds/heapShared.hpp"
  31 #include "cds/metaspaceShared.hpp"
  32 #include "classfile/classLoaderDataShared.hpp"
  33 #include "classfile/symbolTable.hpp"
  34 #include "classfile/systemDictionaryShared.hpp"
  35 #include "classfile/vmClasses.hpp"
  36 #include "interpreter/abstractInterpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "logging/logStream.hpp"
  39 #include "memory/allStatic.hpp"
  40 #include "memory/memRegion.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/instanceKlass.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"
  45 #include "oops/oopHandle.inline.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/globals_extension.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/bitMap.inline.hpp"
  52 #include "utilities/formatBuffer.hpp"
  53 #include "utilities/hashtable.inline.hpp"
  54 
  55 ArchiveBuilder* ArchiveBuilder::_current = NULL;
  56 
  57 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
  58   char* newtop = ArchiveBuilder::current()->_ro_region.top();
  59   ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
  60 }
  61 
  62 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K) {
  63   _total_bytes = 0;
  64   _objs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
  65 }
  66 
  67 ArchiveBuilder::SourceObjList::~SourceObjList() {
  68   delete _objs;
  69 }
  70 
  71 void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) {
  72   // Save this source object for copying
  73   _objs->append(src_info);
  74 
  75   // Prepare for marking the pointers in this source object
  76   assert(is_aligned(_total_bytes, sizeof(address)), "must be");
  77   src_info->set_ptrmap_start(_total_bytes / sizeof(address));
  78   _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
  79   src_info->set_ptrmap_end(_total_bytes / sizeof(address));
  80 
  81   BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
  82   if (_ptrmap.size() <= bitmap_size_needed) {
  83     _ptrmap.resize((bitmap_size_needed + 1) * 2);
  84   }
  85 }
  86 
  87 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
  88   // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
  89   // so that we can copy/relocate it later. E.g., if we have
  90   //    class Foo { intx scala; Bar* ptr; }
  91   //    Foo *f = 0x100;
  92   // To mark the f->ptr pointer on 64-bit platform, this function is called with
  93   //    src_info()->obj() == 0x100
  94   //    ref->addr() == 0x108
  95   address src_obj = src_info->obj();
  96   address* field_addr = ref->addr();
  97   assert(src_info->ptrmap_start() < _total_bytes, "sanity");
  98   assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
  99   assert(*field_addr != NULL, "should have checked");
 100 
 101   intx field_offset_in_bytes = ((address)field_addr) - src_obj;
 102   DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
 103   assert(field_offset_in_bytes >= 0, "must be");
 104   assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
 105   assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
 106 
 107   BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
 108   _ptrmap.set_bit(BitMap::idx_t(idx));
 109 }
 110 
 111 class RelocateEmbeddedPointers : public BitMapClosure {
 112   ArchiveBuilder* _builder;
 113   address _dumped_obj;
 114   BitMap::idx_t _start_idx;
 115 public:
 116   RelocateEmbeddedPointers(ArchiveBuilder* builder, address dumped_obj, BitMap::idx_t start_idx) :
 117     _builder(builder), _dumped_obj(dumped_obj), _start_idx(start_idx) {}
 118 
 119   bool do_bit(BitMap::idx_t bit_offset) {
 120     size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
 121     address* ptr_loc = (address*)(_dumped_obj + field_offset);
 122 
 123     address old_p = *ptr_loc;
 124     address new_p = _builder->get_dumped_addr(old_p);
 125 
 126     log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
 127                    p2i(ptr_loc), p2i(old_p), p2i(new_p));
 128 
 129     ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
 130     return true; // keep iterating the bitmap
 131   }
 132 };
 133 
 134 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
 135   SourceObjInfo* src_info = objs()->at(i);
 136   assert(src_info->should_copy(), "must be");
 137   BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
 138   BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end());     // exclusive
 139 
 140   RelocateEmbeddedPointers relocator(builder, src_info->dumped_addr(), start);
 141   _ptrmap.iterate(&relocator, start, end);
 142 }
 143 
 144 ArchiveBuilder::ArchiveBuilder() :
 145   _current_dump_space(NULL),
 146   _buffer_bottom(NULL),
 147   _last_verified_top(NULL),
 148   _num_dump_regions_used(0),
 149   _other_region_used_bytes(0),
 150   _requested_static_archive_bottom(NULL),
 151   _requested_static_archive_top(NULL),
 152   _requested_dynamic_archive_bottom(NULL),
 153   _requested_dynamic_archive_top(NULL),
 154   _mapped_static_archive_bottom(NULL),
 155   _mapped_static_archive_top(NULL),
 156   _buffer_to_requested_delta(0),
 157   _rw_region("rw", MAX_SHARED_DELTA),
 158   _ro_region("ro", MAX_SHARED_DELTA),
 159   _rw_src_objs(),
 160   _ro_src_objs(),
 161   _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
 162   _total_closed_heap_region_size(0),
 163   _total_open_heap_region_size(0),
 164   _estimated_metaspaceobj_bytes(0),
 165   _estimated_hashtable_bytes(0)
 166 {
 167   _klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
 168   _symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
 169   _special_refs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SpecialRefInfo>(24 * K, mtClassShared);
 170 
 171   assert(_current == NULL, "must be");
 172   _current = this;
 173 }
 174 
 175 ArchiveBuilder::~ArchiveBuilder() {
 176   assert(_current == this, "must be");
 177   _current = NULL;
 178 
 179   clean_up_src_obj_table();
 180 
 181   for (int i = 0; i < _symbols->length(); i++) {
 182     _symbols->at(i)->decrement_refcount();
 183   }
 184 
 185   delete _klasses;
 186   delete _symbols;
 187   delete _special_refs;
 188   if (_shared_rs.is_reserved()) {
 189     _shared_rs.release();
 190   }
 191 }
 192 
 193 bool ArchiveBuilder::is_dumping_full_module_graph() {
 194   return DumpSharedSpaces && MetaspaceShared::use_full_module_graph();
 195 }
 196 
 197 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
 198   ArchiveBuilder* _builder;
 199 
 200 public:
 201   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 202 
 203   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 204     return _builder->gather_klass_and_symbol(ref, read_only);
 205   }
 206 };
 207 
 208 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 209   if (ref->obj() == NULL) {
 210     return false;
 211   }
 212   if (get_follow_mode(ref) != make_a_copy) {
 213     return false;
 214   }
 215   if (ref->msotype() == MetaspaceObj::ClassType) {
 216     Klass* klass = (Klass*)ref->obj();
 217     assert(klass->is_klass(), "must be");
 218     if (!is_excluded(klass)) {
 219       _klasses->append(klass);
 220     }
 221     // See RunTimeClassInfo::get_for()
 222     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);
 223   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 224     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 225     Symbol* sym = (Symbol*)ref->obj();
 226     sym->increment_refcount();
 227     _symbols->append(sym);
 228   }
 229 
 230   int bytes = ref->size() * BytesPerWord;
 231   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 232 
 233   return true; // recurse
 234 }
 235 
 236 void ArchiveBuilder::gather_klasses_and_symbols() {
 237   ResourceMark rm;
 238   log_info(cds)("Gathering classes and symbols ... ");
 239   GatherKlassesAndSymbols doit(this);
 240   iterate_roots(&doit, /*is_relocating_pointers=*/false);
 241 #if INCLUDE_CDS_JAVA_HEAP
 242   if (is_dumping_full_module_graph()) {
 243     ClassLoaderDataShared::iterate_symbols(&doit);
 244   }
 245 #endif
 246   doit.finish();
 247 
 248   if (DumpSharedSpaces) {
 249     // To ensure deterministic contents in the static archive, we need to ensure that
 250     // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
 251     // the MetaspaceObjs are located originally, as they are copied sequentially into
 252     // the archive during the iteration.
 253     //
 254     // The only issue here is that the symbol table and the system directories may be
 255     // randomly ordered, so we copy the symbols and klasses into two arrays and sort
 256     // them deterministically.
 257     //
 258     // During -Xshare:dump, the order of Symbol creation is strictly determined by
 259     // the SharedClassListFile (class loading is done in a single thread and the JIT
 260     // is disabled). Also, Symbols are allocated in monotonically increasing addresses
 261     // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
 262     // ascending address order, we ensure that all Symbols are copied into deterministic
 263     // locations in the archive.
 264     //
 265     // TODO: in the future, if we want to produce deterministic contents in the
 266     // dynamic archive, we might need to sort the symbols alphabetically (also see
 267     // DynamicArchiveBuilder::sort_methods()).
 268     sort_symbols_and_fix_hash();
 269     sort_klasses();
 270 
 271     // TODO -- we need a proper estimate for the archived modules, etc,
 272     // but this should be enough for now
 273     _estimated_metaspaceobj_bytes += 200 * 1024 * 1024;
 274   }
 275 }
 276 
 277 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
 278   if (a[0] < b[0]) {
 279     return -1;
 280   } else {
 281     assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
 282     return 1;
 283   }
 284 }
 285 
 286 void ArchiveBuilder::sort_symbols_and_fix_hash() {
 287   log_info(cds)("Sorting symbols and fixing identity hash ... ");
 288   os::init_random(0x12345678);
 289   _symbols->sort(compare_symbols_by_address);
 290   for (int i = 0; i < _symbols->length(); i++) {
 291     assert(_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
 292     _symbols->at(i)->update_identity_hash();
 293   }
 294 }
 295 
 296 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
 297   return a[0]->name()->fast_compare(b[0]->name());
 298 }
 299 
 300 void ArchiveBuilder::sort_klasses() {
 301   log_info(cds)("Sorting classes ... ");
 302   _klasses->sort(compare_klass_by_name);
 303 }
 304 
 305 size_t ArchiveBuilder::estimate_archive_size() {
 306   // size of the symbol table and two dictionaries, plus the RunTimeClassInfo's
 307   size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
 308   size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
 309   _estimated_hashtable_bytes = symbol_table_est + dictionary_est;
 310 
 311   size_t total = 0;
 312 
 313   total += _estimated_metaspaceobj_bytes;
 314   total += _estimated_hashtable_bytes;
 315 
 316   // allow fragmentation at the end of each dump region
 317   total += _total_dump_regions * MetaspaceShared::core_region_alignment();
 318 
 319   log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
 320                 symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
 321   log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
 322   log_info(cds)("total estimate bytes = " SIZE_FORMAT, total);
 323 
 324   return align_up(total, MetaspaceShared::core_region_alignment());
 325 }
 326 
 327 address ArchiveBuilder::reserve_buffer() {
 328   size_t buffer_size = estimate_archive_size();
 329   ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
 330   if (!rs.is_reserved()) {
 331     log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
 332     vm_direct_exit(0);
 333   }
 334 
 335   // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
 336   // we are copying the class metadata into the buffer.
 337   address buffer_bottom = (address)rs.base();
 338   log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
 339                 p2i(buffer_bottom), buffer_size);
 340   _shared_rs = rs;
 341 
 342   _buffer_bottom = buffer_bottom;
 343   _last_verified_top = buffer_bottom;
 344   _current_dump_space = &_rw_region;
 345   _num_dump_regions_used = 1;
 346   _other_region_used_bytes = 0;
 347   _current_dump_space->init(&_shared_rs, &_shared_vs);
 348 
 349   ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
 350 
 351   // The bottom of the static archive should be mapped at this address by default.
 352   _requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
 353 
 354   // The bottom of the archive (that I am writing now) should be mapped at this address by default.
 355   address my_archive_requested_bottom;
 356 
 357   if (DumpSharedSpaces) {
 358     my_archive_requested_bottom = _requested_static_archive_bottom;
 359   } else {
 360     _mapped_static_archive_bottom = (address)MetaspaceObj::shared_metaspace_base();
 361     _mapped_static_archive_top  = (address)MetaspaceObj::shared_metaspace_top();
 362     assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
 363     size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
 364 
 365     // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
 366     _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
 367     my_archive_requested_bottom = align_up(_requested_static_archive_top, MetaspaceShared::core_region_alignment());
 368 
 369     _requested_dynamic_archive_bottom = my_archive_requested_bottom;
 370   }
 371 
 372   _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
 373 
 374   address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
 375   if (my_archive_requested_bottom <  _requested_static_archive_bottom ||
 376       my_archive_requested_top    <= _requested_static_archive_bottom) {
 377     // Size overflow.
 378     log_error(cds)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
 379     log_error(cds)("my_archive_requested_top    = " INTPTR_FORMAT, p2i(my_archive_requested_top));
 380     log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
 381                    "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
 382     vm_direct_exit(0);
 383   }
 384 
 385   if (DumpSharedSpaces) {
 386     // We don't want any valid object to be at the very bottom of the archive.
 387     // See ArchivePtrMarker::mark_pointer().
 388     rw_region()->allocate(16);
 389   }
 390 
 391   return buffer_bottom;
 392 }
 393 
 394 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
 395   int i;
 396 
 397   if (!is_relocating_pointers) {
 398     // Don't relocate _symbol, so we can safely call decrement_refcount on the
 399     // original symbols.
 400     int num_symbols = _symbols->length();
 401     for (i = 0; i < num_symbols; i++) {
 402       it->push(_symbols->adr_at(i));
 403     }
 404   }
 405 
 406   int num_klasses = _klasses->length();
 407   for (i = 0; i < num_klasses; i++) {
 408     it->push(_klasses->adr_at(i));
 409   }
 410 
 411   iterate_roots(it, is_relocating_pointers);
 412 }
 413 
 414 class GatherSortedSourceObjs : public MetaspaceClosure {
 415   ArchiveBuilder* _builder;
 416 
 417 public:
 418   GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
 419 
 420   virtual bool do_ref(Ref* ref, bool read_only) {
 421     return _builder->gather_one_source_obj(enclosing_ref(), ref, read_only);
 422   }
 423 
 424   virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
 425     assert(type == _method_entry_ref, "only special type allowed for now");
 426     address src_obj = ref->obj();
 427     size_t field_offset = pointer_delta(p, src_obj,  sizeof(u1));
 428     _builder->add_special_ref(type, src_obj, field_offset);
 429   };
 430 
 431   virtual void do_pending_ref(Ref* ref) {
 432     if (ref->obj() != NULL) {
 433       _builder->remember_embedded_pointer_in_copied_obj(enclosing_ref(), ref);
 434     }
 435   }
 436 };
 437 
 438 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
 439                                            MetaspaceClosure::Ref* ref, bool read_only) {
 440   address src_obj = ref->obj();
 441   if (src_obj == NULL) {
 442     return false;
 443   }
 444   ref->set_keep_after_pushing();
 445   remember_embedded_pointer_in_copied_obj(enclosing_ref, ref);
 446 
 447   FollowMode follow_mode = get_follow_mode(ref);
 448   SourceObjInfo src_info(ref, read_only, follow_mode);
 449   bool created;
 450   SourceObjInfo* p = _src_obj_table.put_if_absent(src_obj, src_info, &created);
 451   if (created) {
 452     if (_src_obj_table.maybe_grow()) {
 453       log_info(cds, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
 454     }
 455   }
 456 
 457   assert(p->read_only() == src_info.read_only(), "must be");
 458 
 459   if (created && src_info.should_copy()) {
 460     ref->set_user_data((void*)p);
 461     if (read_only) {
 462       _ro_src_objs.append(enclosing_ref, p);
 463     } else {
 464       _rw_src_objs.append(enclosing_ref, p);
 465     }
 466     return true; // Need to recurse into this ref only if we are copying it
 467   } else {
 468     return false;
 469   }
 470 }
 471 
 472 void ArchiveBuilder::add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset) {
 473   _special_refs->append(SpecialRefInfo(type, src_obj, field_offset));
 474 }
 475 
 476 void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref,
 477                                                              MetaspaceClosure::Ref* ref) {
 478   assert(ref->obj() != NULL, "should have checked");
 479 
 480   if (enclosing_ref != NULL) {
 481     SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
 482     if (src_info == NULL) {
 483       // source objects of point_to_it/set_to_null types are not copied
 484       // so we don't need to remember their pointers.
 485     } else {
 486       if (src_info->read_only()) {
 487         _ro_src_objs.remember_embedded_pointer(src_info, ref);
 488       } else {
 489         _rw_src_objs.remember_embedded_pointer(src_info, ref);
 490       }
 491     }
 492   }
 493 }
 494 
 495 void ArchiveBuilder::gather_source_objs() {
 496   ResourceMark rm;
 497   log_info(cds)("Gathering all archivable objects ... ");
 498   gather_klasses_and_symbols();
 499   GatherSortedSourceObjs doit(this);
 500   iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
 501   doit.finish();
 502 }
 503 
 504 bool ArchiveBuilder::is_excluded(Klass* klass) {
 505   if (klass->is_instance_klass()) {
 506     InstanceKlass* ik = InstanceKlass::cast(klass);
 507     return SystemDictionaryShared::is_excluded_class(ik);
 508   } else if (klass->is_objArray_klass()) {
 509     if (DynamicDumpSharedSpaces) {
 510       // Don't support archiving of array klasses for now (WHY???)
 511       return true;
 512     }
 513     Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
 514     if (bottom->is_instance_klass()) {
 515       return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
 516     }
 517   }
 518 
 519   return false;
 520 }
 521 
 522 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
 523   address obj = ref->obj();
 524   if (MetaspaceShared::is_in_shared_metaspace(obj)) {
 525     // Don't dump existing shared metadata again.
 526     return point_to_it;
 527   } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
 528              ref->msotype() == MetaspaceObj::MethodCountersType) {
 529     return set_to_null;
 530   } else {
 531     if (ref->msotype() == MetaspaceObj::ClassType) {
 532       Klass* klass = (Klass*)ref->obj();
 533       assert(klass->is_klass(), "must be");
 534       if (is_excluded(klass)) {
 535         ResourceMark rm;
 536         log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
 537         return set_to_null;
 538       }
 539     }
 540 
 541     return make_a_copy;
 542   }
 543 }
 544 
 545 void ArchiveBuilder::start_dump_space(DumpRegion* next) {
 546   address bottom = _last_verified_top;
 547   address top = (address)(current_dump_space()->top());
 548   _other_region_used_bytes += size_t(top - bottom);
 549 
 550   current_dump_space()->pack(next);
 551   _current_dump_space = next;
 552   _num_dump_regions_used ++;
 553 
 554   _last_verified_top = (address)(current_dump_space()->top());
 555 }
 556 
 557 void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
 558   address bottom = _last_verified_top;
 559   address top = (address)(current_dump_space()->top());
 560   size_t used = size_t(top - bottom) + _other_region_used_bytes;
 561   int diff = int(estimate) - int(used);
 562 
 563   log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
 564   assert(diff >= 0, "Estimate is too small");
 565 
 566   _last_verified_top = top;
 567   _other_region_used_bytes = 0;
 568 }
 569 
 570 void ArchiveBuilder::dump_rw_metadata() {
 571   ResourceMark rm;
 572   log_info(cds)("Allocating RW objects ... ");
 573   make_shallow_copies(&_rw_region, &_rw_src_objs);
 574 
 575 #if INCLUDE_CDS_JAVA_HEAP
 576   if (is_dumping_full_module_graph()) {
 577     // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
 578     char* start = rw_region()->top();
 579     ClassLoaderDataShared::allocate_archived_tables();
 580     alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
 581   }
 582 #endif
 583 }
 584 
 585 void ArchiveBuilder::dump_ro_metadata() {
 586   ResourceMark rm;
 587   log_info(cds)("Allocating RO objects ... ");
 588 
 589   start_dump_space(&_ro_region);
 590   make_shallow_copies(&_ro_region, &_ro_src_objs);
 591 
 592 #if INCLUDE_CDS_JAVA_HEAP
 593   if (is_dumping_full_module_graph()) {
 594     char* start = ro_region()->top();
 595     ClassLoaderDataShared::init_archived_tables();
 596     alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
 597   }
 598 #endif
 599 }
 600 
 601 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 602                                          const ArchiveBuilder::SourceObjList* src_objs) {
 603   for (int i = 0; i < src_objs->objs()->length(); i++) {
 604     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 605   }
 606   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 607 }
 608 
 609 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 610   MetaspaceClosure::Ref* ref = src_info->ref();
 611   address src = ref->obj();
 612   int bytes = src_info->size_in_bytes();
 613   char* dest;
 614   char* oldtop;
 615   char* newtop;
 616 
 617   oldtop = dump_region->top();
 618   if (ref->msotype() == MetaspaceObj::ClassType) {
 619     // Save a pointer immediate in front of an InstanceKlass, so
 620     // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
 621     // without building another hashtable. See RunTimeClassInfo::get_for()
 622     // in systemDictionaryShared.cpp.
 623     Klass* klass = (Klass*)src;
 624     if (klass->is_instance_klass()) {
 625       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 626       dump_region->allocate(sizeof(address));
 627     }
 628   }
 629   dest = dump_region->allocate(bytes);
 630   newtop = dump_region->top();
 631 
 632   memcpy(dest, src, bytes);
 633 
 634   intptr_t* archived_vtable = CppVtables::get_archived_vtable(ref->msotype(), (address)dest);
 635   if (archived_vtable != NULL) {
 636     *(address*)dest = (address)archived_vtable;
 637     ArchivePtrMarker::mark_pointer((address*)dest);
 638   }
 639 
 640   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 641   src_info->set_dumped_addr((address)dest);
 642 
 643   _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
 644 }
 645 
 646 address ArchiveBuilder::get_dumped_addr(address src_obj) const {
 647   SourceObjInfo* p = _src_obj_table.get(src_obj);
 648   assert(p != NULL, "must be");
 649 
 650   return p->dumped_addr();
 651 }
 652 
 653 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
 654   for (int i = 0; i < src_objs->objs()->length(); i++) {
 655     src_objs->relocate(i, this);
 656   }
 657 }
 658 
 659 void ArchiveBuilder::update_special_refs() {
 660   for (int i = 0; i < _special_refs->length(); i++) {
 661     SpecialRefInfo s = _special_refs->at(i);
 662     size_t field_offset = s.field_offset();
 663     address src_obj = s.src_obj();
 664     address dst_obj = get_dumped_addr(src_obj);
 665     intptr_t* src_p = (intptr_t*)(src_obj + field_offset);
 666     intptr_t* dst_p = (intptr_t*)(dst_obj + field_offset);
 667     assert(s.type() == MetaspaceClosure::_method_entry_ref, "only special type allowed for now");
 668 
 669     assert(*src_p == *dst_p, "must be a copy");
 670     ArchivePtrMarker::mark_pointer((address*)dst_p);
 671   }
 672 }
 673 
 674 class RefRelocator: public MetaspaceClosure {
 675   ArchiveBuilder* _builder;
 676 
 677 public:
 678   RefRelocator(ArchiveBuilder* builder) : _builder(builder) {}
 679 
 680   virtual bool do_ref(Ref* ref, bool read_only) {
 681     if (ref->not_null()) {
 682       ref->update(_builder->get_dumped_addr(ref->obj()));
 683       ArchivePtrMarker::mark_pointer(ref->addr());
 684     }
 685     return false; // Do not recurse.
 686   }
 687 };
 688 
 689 void ArchiveBuilder::relocate_roots() {
 690   log_info(cds)("Relocating external roots ... ");
 691   ResourceMark rm;
 692   RefRelocator doit(this);
 693   iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
 694   doit.finish();
 695   log_info(cds)("done");
 696 }
 697 
 698 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
 699   log_info(cds)("Relocating embedded pointers in core regions ... ");
 700   relocate_embedded_pointers(&_rw_src_objs);
 701   relocate_embedded_pointers(&_ro_src_objs);
 702   update_special_refs();
 703 }
 704 
 705 // We must relocate vmClasses::_klasses[] only after we have copied the
 706 // java objects in during dump_java_heap_objects(): during the object copy, we operate on
 707 // old objects which assert that their klass is the original klass.
 708 void ArchiveBuilder::relocate_vm_classes() {
 709   log_info(cds)("Relocating vmClasses::_klasses[] ... ");
 710   ResourceMark rm;
 711   RefRelocator doit(this);
 712   vmClasses::metaspace_pointers_do(&doit);
 713 }
 714 
 715 void ArchiveBuilder::make_klasses_shareable() {
 716   int num_instance_klasses = 0;
 717   int num_boot_klasses = 0;
 718   int num_platform_klasses = 0;
 719   int num_app_klasses = 0;
 720   int num_hidden_klasses = 0;
 721   int num_unlinked_klasses = 0;
 722   int num_unregistered_klasses = 0;
 723   int num_obj_array_klasses = 0;
 724   int num_type_array_klasses = 0;
 725 
 726   for (int i = 0; i < klasses()->length(); i++) {
 727     const char* type;
 728     const char* unlinked = "";
 729     const char* hidden = "";
 730     Klass* k = klasses()->at(i);
 731     k->remove_java_mirror();
 732     if (k->is_objArray_klass()) {
 733       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 734       // on their array classes.
 735       num_obj_array_klasses ++;
 736       type = "array";
 737     } else if (k->is_typeArray_klass()) {
 738       num_type_array_klasses ++;
 739       type = "array";
 740       k->remove_unshareable_info();
 741     } else {
 742       assert(k->is_instance_klass(), " must be");
 743       num_instance_klasses ++;
 744       InstanceKlass* ik = InstanceKlass::cast(k);
 745       if (DynamicDumpSharedSpaces) {
 746         // For static dump, class loader type are already set.
 747         ik->assign_class_loader_type();
 748       }
 749       if (ik->is_shared_boot_class()) {
 750         type = "boot";
 751         num_boot_klasses ++;
 752       } else if (ik->is_shared_platform_class()) {
 753         type = "plat";
 754         num_platform_klasses ++;
 755       } else if (ik->is_shared_app_class()) {
 756         type = "app";
 757         num_app_klasses ++;
 758       } else {
 759         assert(ik->is_shared_unregistered_class(), "must be");
 760         type = "unreg";
 761         num_unregistered_klasses ++;
 762       }
 763 
 764       if (!ik->is_linked()) {
 765         num_unlinked_klasses ++;
 766         unlinked = " ** unlinked";
 767       }
 768 
 769       if (ik->is_hidden()) {
 770         num_hidden_klasses ++;
 771         hidden = " ** hidden";
 772       }
 773 
 774       MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
 775       ik->remove_unshareable_info();
 776     }
 777 
 778     if (log_is_enabled(Debug, cds, class)) {
 779       ResourceMark rm;
 780       log_debug(cds, class)("klasses[%5d] = " PTR_FORMAT " %-5s %s%s%s", i, p2i(to_requested(k)), type, k->external_name(), hidden, unlinked);
 781     }
 782   }
 783 
 784   log_info(cds)("Number of classes %d", num_instance_klasses + num_obj_array_klasses + num_type_array_klasses);
 785   log_info(cds)("    instance classes   = %5d", num_instance_klasses);
 786   log_info(cds)("      boot             = %5d", num_boot_klasses);
 787   log_info(cds)("      app              = %5d", num_app_klasses);
 788   log_info(cds)("      platform         = %5d", num_platform_klasses);
 789   log_info(cds)("      unregistered     = %5d", num_unregistered_klasses);
 790   log_info(cds)("      (hidden)         = %5d", num_hidden_klasses);
 791   log_info(cds)("      (unlinked)       = %5d", num_unlinked_klasses);
 792   log_info(cds)("    obj array classes  = %5d", num_obj_array_klasses);
 793   log_info(cds)("    type array classes = %5d", num_type_array_klasses);
 794   log_info(cds)("               symbols = %5d", _symbols->length());
 795 }
 796 
 797 uintx ArchiveBuilder::buffer_to_offset(address p) const {
 798   address requested_p = to_requested(p);
 799   assert(requested_p >= _requested_static_archive_bottom, "must be");
 800   return requested_p - _requested_static_archive_bottom;
 801 }
 802 
 803 uintx ArchiveBuilder::any_to_offset(address p) const {
 804   if (is_in_mapped_static_archive(p)) {
 805     assert(DynamicDumpSharedSpaces, "must be");
 806     return p - _mapped_static_archive_bottom;
 807   }
 808   return buffer_to_offset(p);
 809 }
 810 
 811 // Update a Java object to point its Klass* to the new location after
 812 // shared archive has been compacted.
 813 void ArchiveBuilder::relocate_klass_ptr(oop o) {
 814   assert(DumpSharedSpaces, "sanity");
 815   Klass* k = get_relocated_klass(o->klass());
 816   Klass* requested_k = to_requested(k);
 817   narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
 818   o->set_narrow_klass(nk);
 819 }
 820 
 821 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 822 // so that the archive can be mapped to the "requested" location without runtime relocation.
 823 //
 824 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 825 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 826 // - Every pointer must have one of the following values:
 827 //   [a] NULL:
 828 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 829 //       consider it at runtime.
 830 //   [b] Points into an object X which is inside the buffer:
 831 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 832 //       when the archive is mapped at the requested location.
 833 //   [c] Points into an object Y which is inside mapped static archive:
 834 //       - This happens only during dynamic dump
 835 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 836 //         so it points to Y when the static archive is mapped at the requested location.
 837 template <bool STATIC_DUMP>
 838 class RelocateBufferToRequested : public BitMapClosure {
 839   ArchiveBuilder* _builder;
 840   address _buffer_bottom;
 841   intx _buffer_to_requested_delta;
 842   intx _mapped_to_requested_static_archive_delta;
 843   size_t _max_non_null_offset;
 844 
 845  public:
 846   RelocateBufferToRequested(ArchiveBuilder* builder) {
 847     _builder = builder;
 848     _buffer_bottom = _builder->buffer_bottom();
 849     _buffer_to_requested_delta = builder->buffer_to_requested_delta();
 850     _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
 851     _max_non_null_offset = 0;
 852 
 853     address bottom = _builder->buffer_bottom();
 854     address top = _builder->buffer_top();
 855     address new_bottom = bottom + _buffer_to_requested_delta;
 856     address new_top = top + _buffer_to_requested_delta;
 857     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
 858                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
 859                    p2i(bottom), p2i(top),
 860                    p2i(new_bottom), p2i(new_top));
 861   }
 862 
 863   bool do_bit(size_t offset) {
 864     address* p = (address*)_buffer_bottom + offset;
 865     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
 866 
 867     if (*p == NULL) {
 868       // todo -- clear bit, etc
 869       ArchivePtrMarker::ptrmap()->clear_bit(offset);
 870     } else {
 871       if (STATIC_DUMP) {
 872         assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
 873         *p += _buffer_to_requested_delta;
 874         assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
 875       } else {
 876         if (_builder->is_in_buffer_space(*p)) {
 877           *p += _buffer_to_requested_delta;
 878           // assert is in requested dynamic archive
 879         } else {
 880           assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
 881           *p += _mapped_to_requested_static_archive_delta;
 882           assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
 883         }
 884       }
 885       _max_non_null_offset = offset;
 886     }
 887 
 888     return true; // keep iterating
 889   }
 890 
 891   void doit() {
 892     ArchivePtrMarker::ptrmap()->iterate(this);
 893     ArchivePtrMarker::compact(_max_non_null_offset);
 894   }
 895 };
 896 
 897 
 898 void ArchiveBuilder::relocate_to_requested() {
 899   ro_region()->pack();
 900 
 901   size_t my_archive_size = buffer_top() - buffer_bottom();
 902 
 903   if (DumpSharedSpaces) {
 904     _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
 905     RelocateBufferToRequested<true> patcher(this);
 906     patcher.doit();
 907   } else {
 908     assert(DynamicDumpSharedSpaces, "must be");
 909     _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
 910     RelocateBufferToRequested<false> patcher(this);
 911     patcher.doit();
 912   }
 913 }
 914 
 915 // Write detailed info to a mapfile to analyze contents of the archive.
 916 // static dump:
 917 //   java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0
 918 // dynamic dump:
 919 //   java -cp MyApp.jar -XX:ArchiveClassesAtExit=MyApp.jsa \
 920 //        -Xlog:cds+map=trace:file=cds.map:none:filesize=0 MyApp
 921 //
 922 // We need to do some address translation because the buffers used at dump time may be mapped to
 923 // a different location at runtime. At dump time, the buffers may be at arbitrary locations
 924 // picked by the OS. At runtime, we try to map at a fixed location (SharedBaseAddress). For
 925 // consistency, we log everything using runtime addresses.
 926 class ArchiveBuilder::CDSMapLogger : AllStatic {
 927   static intx buffer_to_runtime_delta() {
 928     // Translate the buffers used by the RW/RO regions to their eventual (requested) locations
 929     // at runtime.
 930     return ArchiveBuilder::current()->buffer_to_requested_delta();
 931   }
 932 
 933   // rw/ro regions only
 934   static void log_metaspace_region(const char* name, DumpRegion* region,
 935                                    const ArchiveBuilder::SourceObjList* src_objs) {
 936     address region_base = address(region->base());
 937     address region_top  = address(region->top());
 938     log_region(name, region_base, region_top, region_base + buffer_to_runtime_delta());
 939     log_metaspace_objects(region, src_objs);
 940   }
 941 
 942 #define _LOG_PREFIX PTR_FORMAT ": @@ %-17s %d"
 943 
 944   static void log_klass(Klass* k, address runtime_dest, const char* type_name, int bytes, Thread* current) {
 945     ResourceMark rm(current);
 946     log_debug(cds, map)(_LOG_PREFIX " %s",
 947                         p2i(runtime_dest), type_name, bytes, k->external_name());
 948   }
 949   static void log_method(Method* m, address runtime_dest, const char* type_name, int bytes, Thread* current) {
 950     ResourceMark rm(current);
 951     log_debug(cds, map)(_LOG_PREFIX " %s",
 952                         p2i(runtime_dest), type_name, bytes,  m->external_name());
 953   }
 954 
 955   // rw/ro regions only
 956   static void log_metaspace_objects(DumpRegion* region, const ArchiveBuilder::SourceObjList* src_objs) {
 957     address last_obj_base = address(region->base());
 958     address last_obj_end  = address(region->base());
 959     address region_end    = address(region->end());
 960     Thread* current = Thread::current();
 961     for (int i = 0; i < src_objs->objs()->length(); i++) {
 962       SourceObjInfo* src_info = src_objs->at(i);
 963       address src = src_info->orig_obj();
 964       address dest = src_info->dumped_addr();
 965       log_data(last_obj_base, dest, last_obj_base + buffer_to_runtime_delta());
 966       address runtime_dest = dest + buffer_to_runtime_delta();
 967       int bytes = src_info->size_in_bytes();
 968 
 969       MetaspaceObj::Type type = src_info->msotype();
 970       const char* type_name = MetaspaceObj::type_name(type);
 971 
 972       switch (type) {
 973       case MetaspaceObj::ClassType:
 974         log_klass((Klass*)src, runtime_dest, type_name, bytes, current);
 975         break;
 976       case MetaspaceObj::ConstantPoolType:
 977         log_klass(((ConstantPool*)src)->pool_holder(),
 978                     runtime_dest, type_name, bytes, current);
 979         break;
 980       case MetaspaceObj::ConstantPoolCacheType:
 981         log_klass(((ConstantPoolCache*)src)->constant_pool()->pool_holder(),
 982                     runtime_dest, type_name, bytes, current);
 983         break;
 984       case MetaspaceObj::MethodType:
 985         log_method((Method*)src, runtime_dest, type_name, bytes, current);
 986         break;
 987       case MetaspaceObj::ConstMethodType:
 988         log_method(((ConstMethod*)src)->method(), runtime_dest, type_name, bytes, current);
 989         break;
 990       case MetaspaceObj::SymbolType:
 991         {
 992           ResourceMark rm(current);
 993           Symbol* s = (Symbol*)src;
 994           log_debug(cds, map)(_LOG_PREFIX " %s", p2i(runtime_dest), type_name, bytes,
 995                               s->as_quoted_ascii());
 996         }
 997         break;
 998       default:
 999         log_debug(cds, map)(_LOG_PREFIX, p2i(runtime_dest), type_name, bytes);
1000         break;
1001       }
1002 
1003       last_obj_base = dest;
1004       last_obj_end  = dest + bytes;
1005     }
1006 
1007     log_data(last_obj_base, last_obj_end, last_obj_base + buffer_to_runtime_delta());
1008     if (last_obj_end < region_end) {
1009       log_debug(cds, map)(PTR_FORMAT ": @@ Misc data " SIZE_FORMAT " bytes",
1010                           p2i(last_obj_end + buffer_to_runtime_delta()),
1011                           size_t(region_end - last_obj_end));
1012       log_data(last_obj_end, region_end, last_obj_end + buffer_to_runtime_delta());
1013     }
1014   }
1015 
1016 #undef _LOG_PREFIX
1017 
1018   // Log information about a region, whose address at dump time is [base .. top). At
1019   // runtime, this region will be mapped to runtime_base.  runtime_base is 0 if this
1020   // region will be mapped at os-selected addresses (such as the bitmap region), or will
1021   // be accessed with os::read (the header).
1022   static void log_region(const char* name, address base, address top, address runtime_base) {
1023     size_t size = top - base;
1024     base = runtime_base;
1025     top = runtime_base + size;
1026     log_info(cds, map)("[%-18s " PTR_FORMAT " - " PTR_FORMAT " " SIZE_FORMAT_W(9) " bytes]",
1027                        name, p2i(base), p2i(top), size);
1028   }
1029 
1030   // open and closed archive regions
1031   static void log_heap_regions(const char* which, GrowableArray<MemRegion> *regions) {
1032 #if INCLUDE_CDS_JAVA_HEAP
1033     for (int i = 0; i < regions->length(); i++) {
1034       address start = address(regions->at(i).start());
1035       address end = address(regions->at(i).end());
1036       log_region(which, start, end, start);
1037 
1038       while (start < end) {
1039         size_t byte_size;
1040         oop archived_oop = cast_to_oop(start);
1041         oop original_oop = HeapShared::get_original_object(archived_oop);
1042         if (original_oop != NULL) {
1043           ResourceMark rm;
1044           log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
1045                              p2i(start), original_oop->klass()->external_name());
1046           byte_size = original_oop->size() * BytesPerWord;
1047         } else if (archived_oop == HeapShared::roots()) {
1048           // HeapShared::roots() is copied specially so it doesn't exist in
1049           // HeapShared::OriginalObjectTable. See HeapShared::copy_roots().
1050           log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared:roots (ObjArray)",
1051                              p2i(start));
1052           byte_size = objArrayOopDesc::object_size(HeapShared::roots()->length()) * BytesPerWord;
1053         } else {
1054           // We have reached the end of the region
1055           break;
1056         }
1057         address oop_end = start + byte_size;
1058         log_data(start, oop_end, start, /*is_heap=*/true);
1059         start = oop_end;
1060       }
1061       if (start < end) {
1062         log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes",
1063                            p2i(start), size_t(end - start));
1064         log_data(start, end, start, /*is_heap=*/true);
1065       }
1066     }
1067 #endif
1068   }
1069 
1070   // Log all the data [base...top). Pretend that the base address
1071   // will be mapped to runtime_base at run-time.
1072   static void log_data(address base, address top, address runtime_base, bool is_heap = false) {
1073     assert(top >= base, "must be");
1074 
1075     LogStreamHandle(Trace, cds, map) lsh;
1076     if (lsh.is_enabled()) {
1077       int unitsize = sizeof(address);
1078       if (is_heap && UseCompressedOops) {
1079         // This makes the compressed oop pointers easier to read, but
1080         // longs and doubles will be split into two words.
1081         unitsize = sizeof(narrowOop);
1082       }
1083       os::print_hex_dump(&lsh, base, top, unitsize, 32, runtime_base);
1084     }
1085   }
1086 
1087   static void log_header(FileMapInfo* mapinfo) {
1088     LogStreamHandle(Info, cds, map) lsh;
1089     if (lsh.is_enabled()) {
1090       mapinfo->print(&lsh);
1091     }
1092   }
1093 
1094 public:
1095   static void log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
1096                   GrowableArray<MemRegion> *closed_heap_regions,
1097                   GrowableArray<MemRegion> *open_heap_regions,
1098                   char* bitmap, size_t bitmap_size_in_bytes) {
1099     log_info(cds, map)("%s CDS archive map for %s", DumpSharedSpaces ? "Static" : "Dynamic", mapinfo->full_path());
1100 
1101     address header = address(mapinfo->header());
1102     address header_end = header + mapinfo->header()->header_size();
1103     log_region("header", header, header_end, 0);
1104     log_header(mapinfo);
1105     log_data(header, header_end, 0);
1106 
1107     DumpRegion* rw_region = &builder->_rw_region;
1108     DumpRegion* ro_region = &builder->_ro_region;
1109 
1110     log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs);
1111     log_metaspace_region("ro region", ro_region, &builder->_ro_src_objs);
1112 
1113     address bitmap_end = address(bitmap + bitmap_size_in_bytes);
1114     log_region("bitmap", address(bitmap), bitmap_end, 0);
1115     log_data((address)bitmap, bitmap_end, 0);
1116 
1117     if (closed_heap_regions != NULL) {
1118       log_heap_regions("closed heap region", closed_heap_regions);
1119     }
1120     if (open_heap_regions != NULL) {
1121       log_heap_regions("open heap region", open_heap_regions);
1122     }
1123 
1124     log_info(cds, map)("[End of CDS archive map]");
1125   }
1126 }; // end ArchiveBuilder::CDSMapLogger
1127 
1128 void ArchiveBuilder::print_stats() {
1129   _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1130 }
1131 
1132 void ArchiveBuilder::clean_up_src_obj_table() {
1133   SrcObjTableCleaner cleaner;
1134   _src_obj_table.iterate(&cleaner);
1135 }
1136 
1137 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
1138                                    GrowableArray<MemRegion>* closed_heap_regions,
1139                                    GrowableArray<MemRegion>* open_heap_regions,
1140                                    GrowableArray<ArchiveHeapOopmapInfo>* closed_heap_oopmaps,
1141                                    GrowableArray<ArchiveHeapOopmapInfo>* open_heap_oopmaps) {
1142   // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1143   // MetaspaceShared::n_regions (internal to hotspot).
1144   assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
1145 
1146   write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1147   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1148 
1149   size_t bitmap_size_in_bytes;
1150   char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_oopmaps, open_heap_oopmaps,
1151                                               bitmap_size_in_bytes);
1152 
1153   if (closed_heap_regions != NULL) {
1154     _total_closed_heap_region_size = mapinfo->write_heap_regions(
1155                                         closed_heap_regions,
1156                                         closed_heap_oopmaps,
1157                                         MetaspaceShared::first_closed_heap_region,
1158                                         MetaspaceShared::max_num_closed_heap_regions);
1159     _total_open_heap_region_size = mapinfo->write_heap_regions(
1160                                         open_heap_regions,
1161                                         open_heap_oopmaps,
1162                                         MetaspaceShared::first_open_heap_region,
1163                                         MetaspaceShared::max_num_open_heap_regions);
1164   }
1165 
1166   print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
1167 
1168   mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
1169   mapinfo->set_header_crc(mapinfo->compute_header_crc());
1170   // After this point, we should not write any data into mapinfo->header() since this
1171   // would corrupt its checksum we have calculated before.
1172   mapinfo->write_header();
1173   mapinfo->close();
1174 
1175   if (log_is_enabled(Info, cds)) {
1176     print_stats();
1177   }
1178 
1179   if (log_is_enabled(Info, cds, map)) {
1180     CDSMapLogger::log(this, mapinfo, closed_heap_regions, open_heap_regions,
1181                       bitmap, bitmap_size_in_bytes);
1182   }
1183   CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
1184   FREE_C_HEAP_ARRAY(char, bitmap);
1185 }
1186 
1187 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1188   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1189 }
1190 
1191 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
1192                                         GrowableArray<MemRegion>* closed_heap_regions,
1193                                         GrowableArray<MemRegion>* open_heap_regions) {
1194   // Print statistics of all the regions
1195   const size_t bitmap_used = mapinfo->space_at(MetaspaceShared::bm)->used();
1196   const size_t bitmap_reserved = mapinfo->space_at(MetaspaceShared::bm)->used_aligned();
1197   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1198                                 bitmap_reserved +
1199                                 _total_closed_heap_region_size +
1200                                 _total_open_heap_region_size;
1201   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1202                              bitmap_used +
1203                              _total_closed_heap_region_size +
1204                              _total_open_heap_region_size;
1205   const double total_u_perc = percent_of(total_bytes, total_reserved);
1206 
1207   _rw_region.print(total_reserved);
1208   _ro_region.print(total_reserved);
1209 
1210   print_bitmap_region_stats(bitmap_used, total_reserved);
1211 
1212   if (closed_heap_regions != NULL) {
1213     print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
1214     print_heap_region_stats(open_heap_regions, "oa", total_reserved);
1215   }
1216 
1217   log_debug(cds)("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1218                  total_bytes, total_reserved, total_u_perc);
1219 }
1220 
1221 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1222   log_debug(cds)("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
1223                  size, size/double(total_size)*100.0, size);
1224 }
1225 
1226 void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion>* regions,
1227                                              const char *name, size_t total_size) {
1228   int arr_len = regions == NULL ? 0 : regions->length();
1229   for (int i = 0; i < arr_len; i++) {
1230       char* start = (char*)regions->at(i).start();
1231       size_t size = regions->at(i).byte_size();
1232       char* top = start + size;
1233       log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1234                      name, i, size, size/double(total_size)*100.0, size, p2i(start));
1235   }
1236 }
1237 
1238 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1239   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1240   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1241   // or so.
1242   _rw_region.print_out_of_space_msg(name, needed_bytes);
1243   _ro_region.print_out_of_space_msg(name, needed_bytes);
1244 
1245   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1246                                 "Please reduce the number of shared classes.");
1247 }
1248 
1249 
1250 #ifndef PRODUCT
1251 void ArchiveBuilder::assert_is_vm_thread() {
1252   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1253 }
1254 #endif