1 /*
   2  * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveUtils.hpp"
  28 #include "cds/cppVtables.hpp"
  29 #include "cds/dumpAllocStats.hpp"
  30 #include "cds/heapShared.hpp"
  31 #include "cds/metaspaceShared.hpp"
  32 #include "classfile/classLoaderDataShared.hpp"
  33 #include "classfile/symbolTable.hpp"
  34 #include "classfile/systemDictionaryShared.hpp"
  35 #include "classfile/vmClasses.hpp"
  36 #include "interpreter/abstractInterpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "logging/logStream.hpp"
  39 #include "memory/allStatic.hpp"
  40 #include "memory/memRegion.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/compressedKlass.inline.hpp"
  43 #include "oops/instanceKlass.hpp"
  44 #include "oops/klass.inline.hpp"
  45 #include "oops/objArrayKlass.hpp"
  46 #include "oops/objArrayOop.inline.hpp"
  47 #include "oops/oopHandle.inline.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/globals_extension.hpp"
  50 #include "runtime/javaThread.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/bitMap.inline.hpp"
  54 #include "utilities/formatBuffer.hpp"
  55 
  56 ArchiveBuilder* ArchiveBuilder::_current = NULL;
  57 
  58 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
  59   char* newtop = ArchiveBuilder::current()->_ro_region.top();
  60   ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
  61 }
  62 
  63 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, mtClassShared) {
  64   _total_bytes = 0;
  65   _objs = new (mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
  66 }
  67 
  68 ArchiveBuilder::SourceObjList::~SourceObjList() {
  69   delete _objs;
  70 }
  71 
  72 void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) {
  73   // Save this source object for copying
  74   _objs->append(src_info);
  75 
  76   // Prepare for marking the pointers in this source object
  77   assert(is_aligned(_total_bytes, sizeof(address)), "must be");
  78   src_info->set_ptrmap_start(_total_bytes / sizeof(address));
  79   _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
  80   src_info->set_ptrmap_end(_total_bytes / sizeof(address));
  81 
  82   BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
  83   if (_ptrmap.size() <= bitmap_size_needed) {
  84     _ptrmap.resize((bitmap_size_needed + 1) * 2);
  85   }
  86 }
  87 
  88 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
  89   // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
  90   // so that we can copy/relocate it later. E.g., if we have
  91   //    class Foo { intx scala; Bar* ptr; }
  92   //    Foo *f = 0x100;
  93   // To mark the f->ptr pointer on 64-bit platform, this function is called with
  94   //    src_info()->obj() == 0x100
  95   //    ref->addr() == 0x108
  96   address src_obj = src_info->obj();
  97   address* field_addr = ref->addr();
  98   assert(src_info->ptrmap_start() < _total_bytes, "sanity");
  99   assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
 100   assert(*field_addr != NULL, "should have checked");
 101 
 102   intx field_offset_in_bytes = ((address)field_addr) - src_obj;
 103   DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
 104   assert(field_offset_in_bytes >= 0, "must be");
 105   assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
 106   assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
 107 
 108   BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
 109   _ptrmap.set_bit(BitMap::idx_t(idx));
 110 }
 111 
 112 class RelocateEmbeddedPointers : public BitMapClosure {
 113   ArchiveBuilder* _builder;
 114   address _buffered_obj;
 115   BitMap::idx_t _start_idx;
 116 public:
 117   RelocateEmbeddedPointers(ArchiveBuilder* builder, address buffered_obj, BitMap::idx_t start_idx) :
 118     _builder(builder), _buffered_obj(buffered_obj), _start_idx(start_idx) {}
 119 
 120   bool do_bit(BitMap::idx_t bit_offset) {
 121     size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
 122     address* ptr_loc = (address*)(_buffered_obj + field_offset);
 123 
 124     address old_p = *ptr_loc;
 125     address new_p = _builder->get_buffered_addr(old_p);
 126 
 127     log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
 128                    p2i(ptr_loc), p2i(old_p), p2i(new_p));
 129 
 130     ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
 131     return true; // keep iterating the bitmap
 132   }
 133 };
 134 
 135 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
 136   SourceObjInfo* src_info = objs()->at(i);
 137   assert(src_info->should_copy(), "must be");
 138   BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
 139   BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end());     // exclusive
 140 
 141   RelocateEmbeddedPointers relocator(builder, src_info->buffered_addr(), start);
 142   _ptrmap.iterate(&relocator, start, end);
 143 }
 144 
 145 ArchiveBuilder::ArchiveBuilder() :
 146   _current_dump_space(NULL),
 147   _buffer_bottom(NULL),
 148   _last_verified_top(NULL),
 149   _num_dump_regions_used(0),
 150   _other_region_used_bytes(0),
 151   _requested_static_archive_bottom(NULL),
 152   _requested_static_archive_top(NULL),
 153   _requested_dynamic_archive_bottom(NULL),
 154   _requested_dynamic_archive_top(NULL),
 155   _mapped_static_archive_bottom(NULL),
 156   _mapped_static_archive_top(NULL),
 157   _buffer_to_requested_delta(0),
 158   _rw_region("rw", MAX_SHARED_DELTA),
 159   _ro_region("ro", MAX_SHARED_DELTA),
 160   _ptrmap(mtClassShared),
 161   _rw_src_objs(),
 162   _ro_src_objs(),
 163   _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
 164   _buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
 165   _total_closed_heap_region_size(0),
 166   _total_open_heap_region_size(0),
 167   _estimated_metaspaceobj_bytes(0),
 168   _estimated_hashtable_bytes(0)
 169 {
 170   _klasses = new (mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
 171   _symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
 172   _special_refs = new (mtClassShared) GrowableArray<SpecialRefInfo>(24 * K, mtClassShared);
 173 
 174   assert(_current == NULL, "must be");
 175   _current = this;
 176 }
 177 
 178 ArchiveBuilder::~ArchiveBuilder() {
 179   assert(_current == this, "must be");
 180   _current = NULL;
 181 
 182   clean_up_src_obj_table();
 183 
 184   for (int i = 0; i < _symbols->length(); i++) {
 185     _symbols->at(i)->decrement_refcount();
 186   }
 187 
 188   delete _klasses;
 189   delete _symbols;
 190   delete _special_refs;
 191   if (_shared_rs.is_reserved()) {
 192     _shared_rs.release();
 193   }
 194 }
 195 
 196 bool ArchiveBuilder::is_dumping_full_module_graph() {
 197   return DumpSharedSpaces && MetaspaceShared::use_full_module_graph();
 198 }
 199 
 200 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
 201   ArchiveBuilder* _builder;
 202 
 203 public:
 204   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 205 
 206   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 207     return _builder->gather_klass_and_symbol(ref, read_only);
 208   }
 209 };
 210 
 211 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 212   if (ref->obj() == NULL) {
 213     return false;
 214   }
 215   if (get_follow_mode(ref) != make_a_copy) {
 216     return false;
 217   }
 218   if (ref->msotype() == MetaspaceObj::ClassType) {
 219     Klass* klass = (Klass*)ref->obj();
 220     assert(klass->is_klass(), "must be");
 221     if (!is_excluded(klass)) {
 222       _klasses->append(klass);
 223     }
 224     // See ArchiveBuilder::make_shallow_copies: make sure we have enough space for both maximum
 225     // Klass alignment as well as the RuntimeInfo* pointer we will embed in front of a Klass.
 226     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, KlassAlignmentInBytes) +
 227         align_up(sizeof(void*), SharedSpaceObjectAlignment);
 228   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 229     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 230     Symbol* sym = (Symbol*)ref->obj();
 231     sym->increment_refcount();
 232     _symbols->append(sym);
 233   }
 234 
 235   int bytes = ref->size() * BytesPerWord;
 236   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 237 
 238   return true; // recurse
 239 }
 240 
 241 void ArchiveBuilder::gather_klasses_and_symbols() {
 242   ResourceMark rm;
 243   log_info(cds)("Gathering classes and symbols ... ");
 244   GatherKlassesAndSymbols doit(this);
 245   iterate_roots(&doit, /*is_relocating_pointers=*/false);
 246 #if INCLUDE_CDS_JAVA_HEAP
 247   if (is_dumping_full_module_graph()) {
 248     ClassLoaderDataShared::iterate_symbols(&doit);
 249   }
 250 #endif
 251   doit.finish();
 252 
 253   if (DumpSharedSpaces) {
 254     // To ensure deterministic contents in the static archive, we need to ensure that
 255     // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
 256     // the MetaspaceObjs are located originally, as they are copied sequentially into
 257     // the archive during the iteration.
 258     //
 259     // The only issue here is that the symbol table and the system directories may be
 260     // randomly ordered, so we copy the symbols and klasses into two arrays and sort
 261     // them deterministically.
 262     //
 263     // During -Xshare:dump, the order of Symbol creation is strictly determined by
 264     // the SharedClassListFile (class loading is done in a single thread and the JIT
 265     // is disabled). Also, Symbols are allocated in monotonically increasing addresses
 266     // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
 267     // ascending address order, we ensure that all Symbols are copied into deterministic
 268     // locations in the archive.
 269     //
 270     // TODO: in the future, if we want to produce deterministic contents in the
 271     // dynamic archive, we might need to sort the symbols alphabetically (also see
 272     // DynamicArchiveBuilder::sort_methods()).
 273     sort_symbols_and_fix_hash();
 274     sort_klasses();
 275 
 276     // TODO -- we need a proper estimate for the archived modules, etc,
 277     // but this should be enough for now
 278     _estimated_metaspaceobj_bytes += 200 * 1024 * 1024;
 279   }
 280 }
 281 
 282 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
 283   if (a[0] < b[0]) {
 284     return -1;
 285   } else {
 286     assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
 287     return 1;
 288   }
 289 }
 290 
 291 void ArchiveBuilder::sort_symbols_and_fix_hash() {
 292   log_info(cds)("Sorting symbols and fixing identity hash ... ");
 293   os::init_random(0x12345678);
 294   _symbols->sort(compare_symbols_by_address);
 295   for (int i = 0; i < _symbols->length(); i++) {
 296     assert(_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
 297     _symbols->at(i)->update_identity_hash();
 298   }
 299 }
 300 
 301 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
 302   return a[0]->name()->fast_compare(b[0]->name());
 303 }
 304 
 305 void ArchiveBuilder::sort_klasses() {
 306   log_info(cds)("Sorting classes ... ");
 307   _klasses->sort(compare_klass_by_name);
 308 }
 309 
 310 size_t ArchiveBuilder::estimate_archive_size() {
 311   // size of the symbol table and two dictionaries, plus the RunTimeClassInfo's
 312   size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
 313   size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
 314   _estimated_hashtable_bytes = symbol_table_est + dictionary_est;
 315 
 316   size_t total = 0;
 317 
 318   total += _estimated_metaspaceobj_bytes;
 319   total += _estimated_hashtable_bytes;
 320 
 321   // allow fragmentation at the end of each dump region
 322   total += _total_dump_regions * MetaspaceShared::core_region_alignment();
 323 
 324   log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
 325                 symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
 326   log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
 327   log_info(cds)("total estimate bytes = " SIZE_FORMAT, total);
 328 
 329   return align_up(total, MetaspaceShared::core_region_alignment());
 330 }
 331 
 332 address ArchiveBuilder::reserve_buffer() {
 333   size_t buffer_size = estimate_archive_size();
 334   ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
 335   if (!rs.is_reserved()) {
 336     log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
 337     os::_exit(0);
 338   }
 339 
 340   // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
 341   // we are copying the class metadata into the buffer.
 342   address buffer_bottom = (address)rs.base();
 343   log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
 344                 p2i(buffer_bottom), buffer_size);
 345   _shared_rs = rs;
 346 
 347   _buffer_bottom = buffer_bottom;
 348   _last_verified_top = buffer_bottom;
 349   _current_dump_space = &_rw_region;
 350   _num_dump_regions_used = 1;
 351   _other_region_used_bytes = 0;
 352   _current_dump_space->init(&_shared_rs, &_shared_vs);
 353 
 354   ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
 355 
 356   // The bottom of the static archive should be mapped at this address by default.
 357   _requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
 358 
 359   // The bottom of the archive (that I am writing now) should be mapped at this address by default.
 360   address my_archive_requested_bottom;
 361 
 362   if (DumpSharedSpaces) {
 363     my_archive_requested_bottom = _requested_static_archive_bottom;
 364   } else {
 365     _mapped_static_archive_bottom = (address)MetaspaceObj::shared_metaspace_base();
 366     _mapped_static_archive_top  = (address)MetaspaceObj::shared_metaspace_top();
 367     assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
 368     size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
 369 
 370     // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
 371     _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
 372     my_archive_requested_bottom = align_up(_requested_static_archive_top, MetaspaceShared::core_region_alignment());
 373 
 374     _requested_dynamic_archive_bottom = my_archive_requested_bottom;
 375   }
 376 
 377   _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
 378 
 379   address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
 380   if (my_archive_requested_bottom <  _requested_static_archive_bottom ||
 381       my_archive_requested_top    <= _requested_static_archive_bottom) {
 382     // Size overflow.
 383     log_error(cds)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
 384     log_error(cds)("my_archive_requested_top    = " INTPTR_FORMAT, p2i(my_archive_requested_top));
 385     log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
 386                    "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
 387     os::_exit(0);
 388   }
 389 
 390   if (DumpSharedSpaces) {
 391     // We don't want any valid object to be at the very bottom of the archive.
 392     // See ArchivePtrMarker::mark_pointer().
 393     rw_region()->allocate(16);
 394   }
 395 
 396   return buffer_bottom;
 397 }
 398 
 399 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
 400   int i;
 401 
 402   if (!is_relocating_pointers) {
 403     // Don't relocate _symbol, so we can safely call decrement_refcount on the
 404     // original symbols.
 405     int num_symbols = _symbols->length();
 406     for (i = 0; i < num_symbols; i++) {
 407       it->push(_symbols->adr_at(i));
 408     }
 409   }
 410 
 411   int num_klasses = _klasses->length();
 412   for (i = 0; i < num_klasses; i++) {
 413     it->push(_klasses->adr_at(i));
 414   }
 415 
 416   iterate_roots(it, is_relocating_pointers);
 417 }
 418 
 419 class GatherSortedSourceObjs : public MetaspaceClosure {
 420   ArchiveBuilder* _builder;
 421 
 422 public:
 423   GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
 424 
 425   virtual bool do_ref(Ref* ref, bool read_only) {
 426     return _builder->gather_one_source_obj(enclosing_ref(), ref, read_only);
 427   }
 428 
 429   virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
 430     assert(type == _method_entry_ref, "only special type allowed for now");
 431     address src_obj = ref->obj();
 432     size_t field_offset = pointer_delta(p, src_obj,  sizeof(u1));
 433     _builder->add_special_ref(type, src_obj, field_offset);
 434   };
 435 
 436   virtual void do_pending_ref(Ref* ref) {
 437     if (ref->obj() != NULL) {
 438       _builder->remember_embedded_pointer_in_copied_obj(enclosing_ref(), ref);
 439     }
 440   }
 441 };
 442 
 443 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
 444                                            MetaspaceClosure::Ref* ref, bool read_only) {
 445   address src_obj = ref->obj();
 446   if (src_obj == NULL) {
 447     return false;
 448   }
 449   ref->set_keep_after_pushing();
 450   remember_embedded_pointer_in_copied_obj(enclosing_ref, ref);
 451 
 452   FollowMode follow_mode = get_follow_mode(ref);
 453   SourceObjInfo src_info(ref, read_only, follow_mode);
 454   bool created;
 455   SourceObjInfo* p = _src_obj_table.put_if_absent(src_obj, src_info, &created);
 456   if (created) {
 457     if (_src_obj_table.maybe_grow()) {
 458       log_info(cds, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
 459     }
 460   }
 461 
 462   assert(p->read_only() == src_info.read_only(), "must be");
 463 
 464   if (created && src_info.should_copy()) {
 465     ref->set_user_data((void*)p);
 466     if (read_only) {
 467       _ro_src_objs.append(enclosing_ref, p);
 468     } else {
 469       _rw_src_objs.append(enclosing_ref, p);
 470     }
 471     return true; // Need to recurse into this ref only if we are copying it
 472   } else {
 473     return false;
 474   }
 475 }
 476 
 477 void ArchiveBuilder::add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset) {
 478   _special_refs->append(SpecialRefInfo(type, src_obj, field_offset));
 479 }
 480 
 481 void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref,
 482                                                              MetaspaceClosure::Ref* ref) {
 483   assert(ref->obj() != NULL, "should have checked");
 484 
 485   if (enclosing_ref != NULL) {
 486     SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
 487     if (src_info == NULL) {
 488       // source objects of point_to_it/set_to_null types are not copied
 489       // so we don't need to remember their pointers.
 490     } else {
 491       if (src_info->read_only()) {
 492         _ro_src_objs.remember_embedded_pointer(src_info, ref);
 493       } else {
 494         _rw_src_objs.remember_embedded_pointer(src_info, ref);
 495       }
 496     }
 497   }
 498 }
 499 
 500 void ArchiveBuilder::gather_source_objs() {
 501   ResourceMark rm;
 502   log_info(cds)("Gathering all archivable objects ... ");
 503   gather_klasses_and_symbols();
 504   GatherSortedSourceObjs doit(this);
 505   iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
 506   doit.finish();
 507 }
 508 
 509 bool ArchiveBuilder::is_excluded(Klass* klass) {
 510   if (klass->is_instance_klass()) {
 511     InstanceKlass* ik = InstanceKlass::cast(klass);
 512     return SystemDictionaryShared::is_excluded_class(ik);
 513   } else if (klass->is_objArray_klass()) {
 514     if (DynamicDumpSharedSpaces) {
 515       // Don't support archiving of array klasses for now (WHY???)
 516       return true;
 517     }
 518     Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
 519     if (bottom->is_instance_klass()) {
 520       return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
 521     }
 522   }
 523 
 524   return false;
 525 }
 526 
 527 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
 528   address obj = ref->obj();
 529   if (MetaspaceShared::is_in_shared_metaspace(obj)) {
 530     // Don't dump existing shared metadata again.
 531     return point_to_it;
 532   } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
 533              ref->msotype() == MetaspaceObj::MethodCountersType) {
 534     return set_to_null;
 535   } else {
 536     if (ref->msotype() == MetaspaceObj::ClassType) {
 537       Klass* klass = (Klass*)ref->obj();
 538       assert(klass->is_klass(), "must be");
 539       if (is_excluded(klass)) {
 540         ResourceMark rm;
 541         log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
 542         return set_to_null;
 543       }
 544     }
 545 
 546     return make_a_copy;
 547   }
 548 }
 549 
 550 void ArchiveBuilder::start_dump_space(DumpRegion* next) {
 551   address bottom = _last_verified_top;
 552   address top = (address)(current_dump_space()->top());
 553   _other_region_used_bytes += size_t(top - bottom);
 554 
 555   current_dump_space()->pack(next);
 556   _current_dump_space = next;
 557   _num_dump_regions_used ++;
 558 
 559   _last_verified_top = (address)(current_dump_space()->top());
 560 }
 561 
 562 void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
 563   address bottom = _last_verified_top;
 564   address top = (address)(current_dump_space()->top());
 565   size_t used = size_t(top - bottom) + _other_region_used_bytes;
 566   int diff = int(estimate) - int(used);
 567 
 568   log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
 569   assert(diff >= 0, "Estimate is too small");
 570 
 571   _last_verified_top = top;
 572   _other_region_used_bytes = 0;
 573 }
 574 
 575 void ArchiveBuilder::dump_rw_metadata() {
 576   ResourceMark rm;
 577   log_info(cds)("Allocating RW objects ... ");
 578   make_shallow_copies(&_rw_region, &_rw_src_objs);
 579 
 580 #if INCLUDE_CDS_JAVA_HEAP
 581   if (is_dumping_full_module_graph()) {
 582     // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
 583     char* start = rw_region()->top();
 584     ClassLoaderDataShared::allocate_archived_tables();
 585     alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
 586   }
 587 #endif
 588 }
 589 
 590 void ArchiveBuilder::dump_ro_metadata() {
 591   ResourceMark rm;
 592   log_info(cds)("Allocating RO objects ... ");
 593 
 594   start_dump_space(&_ro_region);
 595   make_shallow_copies(&_ro_region, &_ro_src_objs);
 596 
 597 #if INCLUDE_CDS_JAVA_HEAP
 598   if (is_dumping_full_module_graph()) {
 599     char* start = ro_region()->top();
 600     ClassLoaderDataShared::init_archived_tables();
 601     alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
 602   }
 603 #endif
 604 }
 605 
 606 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 607                                          const ArchiveBuilder::SourceObjList* src_objs) {
 608   for (int i = 0; i < src_objs->objs()->length(); i++) {
 609     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 610   }
 611   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 612 }
 613 
 614 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 615   MetaspaceClosure::Ref* ref = src_info->ref();
 616   address src = ref->obj();
 617   int bytes = src_info->size_in_bytes();
 618   char* dest;
 619   char* oldtop;
 620   char* newtop;
 621 
 622   oldtop = dump_region->top();
 623   if (ref->msotype() == MetaspaceObj::ClassType) {
 624     // Reserve space for a pointer immediately in front of an InstanceKlass. That space will
 625     // later be used to store the RuntimeClassInfo* pointer directly in front of the archived
 626     // InstanceKlass, in order to have a quick lookup InstanceKlass* -> RunTimeClassInfo*
 627     // without building another hashtable. See RunTimeClassInfo::get_for()/::set_for() for
 628     // details.
 629     Klass* klass = (Klass*)src;
 630     if (klass->is_instance_klass()) {
 631       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 632       dump_region->allocate(sizeof(address));
 633     }
 634     dest = dump_region->allocate(bytes, KlassAlignmentInBytes);
 635   } else {
 636     dest = dump_region->allocate(bytes);
 637   }
 638   newtop = dump_region->top();
 639 
 640   memcpy(dest, src, bytes);
 641   {
 642     bool created;
 643     _buffered_to_src_table.put_if_absent((address)dest, src, &created);
 644     assert(created, "must be");
 645     if (_buffered_to_src_table.maybe_grow()) {
 646       log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
 647     }
 648   }
 649 
 650   intptr_t* archived_vtable = CppVtables::get_archived_vtable(ref->msotype(), (address)dest);
 651   if (archived_vtable != NULL) {
 652     *(address*)dest = (address)archived_vtable;
 653     ArchivePtrMarker::mark_pointer((address*)dest);
 654   }
 655 
 656   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d (%s)", p2i(src), p2i(dest), bytes,
 657                  MetaspaceObj::type_name(ref->msotype()));
 658   src_info->set_buffered_addr((address)dest);
 659 
 660   _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
 661 
 662   DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
 663 }
 664 
 665 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
 666   SourceObjInfo* p = _src_obj_table.get(src_addr);
 667   assert(p != NULL, "must be");
 668 
 669   return p->buffered_addr();
 670 }
 671 
 672 address ArchiveBuilder::get_source_addr(address buffered_addr) const {
 673   assert(is_in_buffer_space(buffered_addr), "must be");
 674   address* src_p = _buffered_to_src_table.get(buffered_addr);
 675   assert(src_p != NULL && *src_p != NULL, "must be");
 676   return *src_p;
 677 }
 678 
 679 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
 680   for (int i = 0; i < src_objs->objs()->length(); i++) {
 681     src_objs->relocate(i, this);
 682   }
 683 }
 684 
 685 void ArchiveBuilder::update_special_refs() {
 686   for (int i = 0; i < _special_refs->length(); i++) {
 687     SpecialRefInfo s = _special_refs->at(i);
 688     size_t field_offset = s.field_offset();
 689     address src_obj = s.src_obj();
 690     address dst_obj = get_buffered_addr(src_obj);
 691     intptr_t* src_p = (intptr_t*)(src_obj + field_offset);
 692     intptr_t* dst_p = (intptr_t*)(dst_obj + field_offset);
 693     assert(s.type() == MetaspaceClosure::_method_entry_ref, "only special type allowed for now");
 694 
 695     assert(*src_p == *dst_p, "must be a copy");
 696     ArchivePtrMarker::mark_pointer((address*)dst_p);
 697   }
 698 }
 699 
 700 class RefRelocator: public MetaspaceClosure {
 701   ArchiveBuilder* _builder;
 702 
 703 public:
 704   RefRelocator(ArchiveBuilder* builder) : _builder(builder) {}
 705 
 706   virtual bool do_ref(Ref* ref, bool read_only) {
 707     if (ref->not_null()) {
 708       ref->update(_builder->get_buffered_addr(ref->obj()));
 709       ArchivePtrMarker::mark_pointer(ref->addr());
 710     }
 711     return false; // Do not recurse.
 712   }
 713 };
 714 
 715 void ArchiveBuilder::relocate_roots() {
 716   log_info(cds)("Relocating external roots ... ");
 717   ResourceMark rm;
 718   RefRelocator doit(this);
 719   iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
 720   doit.finish();
 721   log_info(cds)("done");
 722 }
 723 
 724 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
 725   log_info(cds)("Relocating embedded pointers in core regions ... ");
 726   relocate_embedded_pointers(&_rw_src_objs);
 727   relocate_embedded_pointers(&_ro_src_objs);
 728   update_special_refs();
 729 }
 730 
 731 // We must relocate vmClasses::_klasses[] only after we have copied the
 732 // java objects in during dump_java_heap_objects(): during the object copy, we operate on
 733 // old objects which assert that their klass is the original klass.
 734 void ArchiveBuilder::relocate_vm_classes() {
 735   log_info(cds)("Relocating vmClasses::_klasses[] ... ");
 736   ResourceMark rm;
 737   RefRelocator doit(this);
 738   vmClasses::metaspace_pointers_do(&doit);
 739 }
 740 
 741 void ArchiveBuilder::make_klasses_shareable() {
 742   int num_instance_klasses = 0;
 743   int num_boot_klasses = 0;
 744   int num_platform_klasses = 0;
 745   int num_app_klasses = 0;
 746   int num_hidden_klasses = 0;
 747   int num_unlinked_klasses = 0;
 748   int num_unregistered_klasses = 0;
 749   int num_obj_array_klasses = 0;
 750   int num_type_array_klasses = 0;
 751 
 752   for (int i = 0; i < klasses()->length(); i++) {
 753     const char* type;
 754     const char* unlinked = "";
 755     const char* hidden = "";
 756     const char* generated = "";
 757     Klass* k = klasses()->at(i);
 758     k->remove_java_mirror();
 759     Klass* requested_k = to_requested(k);
 760 #ifdef _LP64
 761     narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
 762     k->set_prototype_header(markWord::prototype().set_narrow_klass(nk));
 763 #else
 764     k->set_prototype_header(markWord::prototype());
 765 #endif
 766     if (k->is_objArray_klass()) {
 767       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 768       // on their array classes.
 769       num_obj_array_klasses ++;
 770       type = "array";
 771     } else if (k->is_typeArray_klass()) {
 772       num_type_array_klasses ++;
 773       type = "array";
 774       k->remove_unshareable_info();
 775     } else {
 776       assert(k->is_instance_klass(), " must be");
 777       num_instance_klasses ++;
 778       InstanceKlass* ik = InstanceKlass::cast(k);
 779       if (DynamicDumpSharedSpaces) {
 780         // For static dump, class loader type are already set.
 781         ik->assign_class_loader_type();
 782       }
 783       if (ik->is_shared_boot_class()) {
 784         type = "boot";
 785         num_boot_klasses ++;
 786       } else if (ik->is_shared_platform_class()) {
 787         type = "plat";
 788         num_platform_klasses ++;
 789       } else if (ik->is_shared_app_class()) {
 790         type = "app";
 791         num_app_klasses ++;
 792       } else {
 793         assert(ik->is_shared_unregistered_class(), "must be");
 794         type = "unreg";
 795         num_unregistered_klasses ++;
 796       }
 797 
 798       if (!ik->is_linked()) {
 799         num_unlinked_klasses ++;
 800         unlinked = " ** unlinked";
 801       }
 802 
 803       if (ik->is_hidden()) {
 804         num_hidden_klasses ++;
 805         hidden = " ** hidden";
 806       }
 807 
 808       if (ik->is_generated_shared_class()) {
 809         generated = " ** generated";
 810       }
 811       MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
 812       ik->remove_unshareable_info();
 813     }
 814 
 815     if (log_is_enabled(Debug, cds, class)) {
 816       ResourceMark rm;
 817       log_debug(cds, class)("klasses[%5d] = " PTR_FORMAT " %-5s %s%s%s%s", i,
 818                             p2i(to_requested(k)), type, k->external_name(),
 819                             hidden, unlinked, generated);
 820     }
 821   }
 822 
 823   log_info(cds)("Number of classes %d", num_instance_klasses + num_obj_array_klasses + num_type_array_klasses);
 824   log_info(cds)("    instance classes   = %5d", num_instance_klasses);
 825   log_info(cds)("      boot             = %5d", num_boot_klasses);
 826   log_info(cds)("      app              = %5d", num_app_klasses);
 827   log_info(cds)("      platform         = %5d", num_platform_klasses);
 828   log_info(cds)("      unregistered     = %5d", num_unregistered_klasses);
 829   log_info(cds)("      (hidden)         = %5d", num_hidden_klasses);
 830   log_info(cds)("      (unlinked)       = %5d", num_unlinked_klasses);
 831   log_info(cds)("    obj array classes  = %5d", num_obj_array_klasses);
 832   log_info(cds)("    type array classes = %5d", num_type_array_klasses);
 833   log_info(cds)("               symbols = %5d", _symbols->length());
 834 }
 835 
 836 uintx ArchiveBuilder::buffer_to_offset(address p) const {
 837   address requested_p = to_requested(p);
 838   assert(requested_p >= _requested_static_archive_bottom, "must be");
 839   return requested_p - _requested_static_archive_bottom;
 840 }
 841 
 842 uintx ArchiveBuilder::any_to_offset(address p) const {
 843   if (is_in_mapped_static_archive(p)) {
 844     assert(DynamicDumpSharedSpaces, "must be");
 845     return p - _mapped_static_archive_bottom;
 846   }
 847   return buffer_to_offset(p);
 848 }
 849 
 850 // Update a Java object to point its Klass* to the address whene
 851 // the class would be mapped at runtime.
 852 void ArchiveBuilder::relocate_klass_ptr_of_oop(oop o) {
 853   assert(DumpSharedSpaces, "sanity");
 854   Klass* k = get_buffered_klass(o->klass());
 855   Klass* requested_k = to_requested(k);
 856   narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
 857 #ifdef _LP64
 858   o->set_mark(o->mark().set_narrow_klass(nk));
 859 #endif
 860 }
 861 
 862 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 863 // so that the archive can be mapped to the "requested" location without runtime relocation.
 864 //
 865 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 866 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 867 // - Every pointer must have one of the following values:
 868 //   [a] NULL:
 869 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 870 //       consider it at runtime.
 871 //   [b] Points into an object X which is inside the buffer:
 872 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 873 //       when the archive is mapped at the requested location.
 874 //   [c] Points into an object Y which is inside mapped static archive:
 875 //       - This happens only during dynamic dump
 876 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 877 //         so it points to Y when the static archive is mapped at the requested location.
 878 template <bool STATIC_DUMP>
 879 class RelocateBufferToRequested : public BitMapClosure {
 880   ArchiveBuilder* _builder;
 881   address _buffer_bottom;
 882   intx _buffer_to_requested_delta;
 883   intx _mapped_to_requested_static_archive_delta;
 884   size_t _max_non_null_offset;
 885 
 886  public:
 887   RelocateBufferToRequested(ArchiveBuilder* builder) {
 888     _builder = builder;
 889     _buffer_bottom = _builder->buffer_bottom();
 890     _buffer_to_requested_delta = builder->buffer_to_requested_delta();
 891     _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
 892     _max_non_null_offset = 0;
 893 
 894     address bottom = _builder->buffer_bottom();
 895     address top = _builder->buffer_top();
 896     address new_bottom = bottom + _buffer_to_requested_delta;
 897     address new_top = top + _buffer_to_requested_delta;
 898     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
 899                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
 900                    p2i(bottom), p2i(top),
 901                    p2i(new_bottom), p2i(new_top));
 902   }
 903 
 904   bool do_bit(size_t offset) {
 905     address* p = (address*)_buffer_bottom + offset;
 906     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
 907 
 908     if (*p == NULL) {
 909       // todo -- clear bit, etc
 910       ArchivePtrMarker::ptrmap()->clear_bit(offset);
 911     } else {
 912       if (STATIC_DUMP) {
 913         assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
 914         *p += _buffer_to_requested_delta;
 915         assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
 916       } else {
 917         if (_builder->is_in_buffer_space(*p)) {
 918           *p += _buffer_to_requested_delta;
 919           // assert is in requested dynamic archive
 920         } else {
 921           assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
 922           *p += _mapped_to_requested_static_archive_delta;
 923           assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
 924         }
 925       }
 926       _max_non_null_offset = offset;
 927     }
 928 
 929     return true; // keep iterating
 930   }
 931 
 932   void doit() {
 933     ArchivePtrMarker::ptrmap()->iterate(this);
 934     ArchivePtrMarker::compact(_max_non_null_offset);
 935   }
 936 };
 937 
 938 
 939 void ArchiveBuilder::relocate_to_requested() {
 940   ro_region()->pack();
 941 
 942   size_t my_archive_size = buffer_top() - buffer_bottom();
 943 
 944   if (DumpSharedSpaces) {
 945     _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
 946     RelocateBufferToRequested<true> patcher(this);
 947     patcher.doit();
 948   } else {
 949     assert(DynamicDumpSharedSpaces, "must be");
 950     _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
 951     RelocateBufferToRequested<false> patcher(this);
 952     patcher.doit();
 953   }
 954 }
 955 
 956 // Write detailed info to a mapfile to analyze contents of the archive.
 957 // static dump:
 958 //   java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0
 959 // dynamic dump:
 960 //   java -cp MyApp.jar -XX:ArchiveClassesAtExit=MyApp.jsa \
 961 //        -Xlog:cds+map=trace:file=cds.map:none:filesize=0 MyApp
 962 //
 963 // We need to do some address translation because the buffers used at dump time may be mapped to
 964 // a different location at runtime. At dump time, the buffers may be at arbitrary locations
 965 // picked by the OS. At runtime, we try to map at a fixed location (SharedBaseAddress). For
 966 // consistency, we log everything using runtime addresses.
 967 class ArchiveBuilder::CDSMapLogger : AllStatic {
 968   static intx buffer_to_runtime_delta() {
 969     // Translate the buffers used by the RW/RO regions to their eventual (requested) locations
 970     // at runtime.
 971     return ArchiveBuilder::current()->buffer_to_requested_delta();
 972   }
 973 
 974   // rw/ro regions only
 975   static void log_metaspace_region(const char* name, DumpRegion* region,
 976                                    const ArchiveBuilder::SourceObjList* src_objs) {
 977     address region_base = address(region->base());
 978     address region_top  = address(region->top());
 979     log_region(name, region_base, region_top, region_base + buffer_to_runtime_delta());
 980     log_metaspace_objects(region, src_objs);
 981   }
 982 
 983 #define _LOG_PREFIX PTR_FORMAT ": @@ %-17s %d"
 984 
 985   static void log_klass(Klass* k, address runtime_dest, const char* type_name, int bytes, Thread* current) {
 986     ResourceMark rm(current);
 987     log_debug(cds, map)(_LOG_PREFIX " %s",
 988                         p2i(runtime_dest), type_name, bytes, k->external_name());
 989   }
 990   static void log_method(Method* m, address runtime_dest, const char* type_name, int bytes, Thread* current) {
 991     ResourceMark rm(current);
 992     log_debug(cds, map)(_LOG_PREFIX " %s",
 993                         p2i(runtime_dest), type_name, bytes,  m->external_name());
 994   }
 995 
 996   // rw/ro regions only
 997   static void log_metaspace_objects(DumpRegion* region, const ArchiveBuilder::SourceObjList* src_objs) {
 998     address last_obj_base = address(region->base());
 999     address last_obj_end  = address(region->base());
1000     address region_end    = address(region->end());
1001     Thread* current = Thread::current();
1002     for (int i = 0; i < src_objs->objs()->length(); i++) {
1003       SourceObjInfo* src_info = src_objs->at(i);
1004       address src = src_info->source_addr();
1005       address dest = src_info->buffered_addr();
1006       log_data(last_obj_base, dest, last_obj_base + buffer_to_runtime_delta());
1007       address runtime_dest = dest + buffer_to_runtime_delta();
1008       int bytes = src_info->size_in_bytes();
1009 
1010       MetaspaceObj::Type type = src_info->msotype();
1011       const char* type_name = MetaspaceObj::type_name(type);
1012 
1013       switch (type) {
1014       case MetaspaceObj::ClassType:
1015         log_klass((Klass*)src, runtime_dest, type_name, bytes, current);
1016         break;
1017       case MetaspaceObj::ConstantPoolType:
1018         log_klass(((ConstantPool*)src)->pool_holder(),
1019                     runtime_dest, type_name, bytes, current);
1020         break;
1021       case MetaspaceObj::ConstantPoolCacheType:
1022         log_klass(((ConstantPoolCache*)src)->constant_pool()->pool_holder(),
1023                     runtime_dest, type_name, bytes, current);
1024         break;
1025       case MetaspaceObj::MethodType:
1026         log_method((Method*)src, runtime_dest, type_name, bytes, current);
1027         break;
1028       case MetaspaceObj::ConstMethodType:
1029         log_method(((ConstMethod*)src)->method(), runtime_dest, type_name, bytes, current);
1030         break;
1031       case MetaspaceObj::SymbolType:
1032         {
1033           ResourceMark rm(current);
1034           Symbol* s = (Symbol*)src;
1035           log_debug(cds, map)(_LOG_PREFIX " %s", p2i(runtime_dest), type_name, bytes,
1036                               s->as_quoted_ascii());
1037         }
1038         break;
1039       default:
1040         log_debug(cds, map)(_LOG_PREFIX, p2i(runtime_dest), type_name, bytes);
1041         break;
1042       }
1043 
1044       last_obj_base = dest;
1045       last_obj_end  = dest + bytes;
1046     }
1047 
1048     log_data(last_obj_base, last_obj_end, last_obj_base + buffer_to_runtime_delta());
1049     if (last_obj_end < region_end) {
1050       log_debug(cds, map)(PTR_FORMAT ": @@ Misc data " SIZE_FORMAT " bytes",
1051                           p2i(last_obj_end + buffer_to_runtime_delta()),
1052                           size_t(region_end - last_obj_end));
1053       log_data(last_obj_end, region_end, last_obj_end + buffer_to_runtime_delta());
1054     }
1055   }
1056 
1057 #undef _LOG_PREFIX
1058 
1059   // Log information about a region, whose address at dump time is [base .. top). At
1060   // runtime, this region will be mapped to requested_base. requested_base is 0 if this
1061   // region will be mapped at os-selected addresses (such as the bitmap region), or will
1062   // be accessed with os::read (the header).
1063   //
1064   // Note: across -Xshare:dump runs, base may be different, but requested_base should
1065   // be the same as the archive contents should be deterministic.
1066   static void log_region(const char* name, address base, address top, address requested_base) {
1067     size_t size = top - base;
1068     base = requested_base;
1069     top = requested_base + size;
1070     log_info(cds, map)("[%-18s " PTR_FORMAT " - " PTR_FORMAT " " SIZE_FORMAT_W(9) " bytes]",
1071                        name, p2i(base), p2i(top), size);
1072   }
1073 
1074 #if INCLUDE_CDS_JAVA_HEAP
1075   // open and closed archive regions
1076   static void log_heap_regions(const char* which, GrowableArray<MemRegion> *regions) {
1077     for (int i = 0; i < regions->length(); i++) {
1078       address start = address(regions->at(i).start());
1079       address end = address(regions->at(i).end());
1080       log_region(which, start, end, to_requested(start));
1081 
1082       while (start < end) {
1083         size_t byte_size;
1084         oop archived_oop = cast_to_oop(start);
1085         oop original_oop = HeapShared::get_original_object(archived_oop);
1086         if (original_oop != NULL) {
1087           ResourceMark rm;
1088           log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
1089                              p2i(to_requested(start)), original_oop->klass()->external_name());
1090           byte_size = original_oop->size() * BytesPerWord;
1091         } else if (archived_oop == HeapShared::roots()) {
1092           // HeapShared::roots() is copied specially so it doesn't exist in
1093           // HeapShared::OriginalObjectTable. See HeapShared::copy_roots().
1094           log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)",
1095                              p2i(to_requested(start)));
1096           byte_size = objArrayOopDesc::object_size(HeapShared::roots()->length()) * BytesPerWord;
1097         } else {
1098           // We have reached the end of the region
1099           break;
1100         }
1101         address oop_end = start + byte_size;
1102         log_data(start, oop_end, to_requested(start), /*is_heap=*/true);
1103         start = oop_end;
1104       }
1105       if (start < end) {
1106         log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes",
1107                            p2i(to_requested(start)), size_t(end - start));
1108         log_data(start, end, to_requested(start), /*is_heap=*/true);
1109       }
1110     }
1111   }
1112   static address to_requested(address p) {
1113     return HeapShared::to_requested_address(p);
1114   }
1115 #endif
1116 
1117   // Log all the data [base...top). Pretend that the base address
1118   // will be mapped to requested_base at run-time.
1119   static void log_data(address base, address top, address requested_base, bool is_heap = false) {
1120     assert(top >= base, "must be");
1121 
1122     LogStreamHandle(Trace, cds, map) lsh;
1123     if (lsh.is_enabled()) {
1124       int unitsize = sizeof(address);
1125       if (is_heap && UseCompressedOops) {
1126         // This makes the compressed oop pointers easier to read, but
1127         // longs and doubles will be split into two words.
1128         unitsize = sizeof(narrowOop);
1129       }
1130       os::print_hex_dump(&lsh, base, top, unitsize, 32, requested_base);
1131     }
1132   }
1133 
1134   static void log_header(FileMapInfo* mapinfo) {
1135     LogStreamHandle(Info, cds, map) lsh;
1136     if (lsh.is_enabled()) {
1137       mapinfo->print(&lsh);
1138     }
1139   }
1140 
1141 public:
1142   static void log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
1143                   GrowableArray<MemRegion> *closed_heap_regions,
1144                   GrowableArray<MemRegion> *open_heap_regions,
1145                   char* bitmap, size_t bitmap_size_in_bytes) {
1146     log_info(cds, map)("%s CDS archive map for %s", DumpSharedSpaces ? "Static" : "Dynamic", mapinfo->full_path());
1147 
1148     address header = address(mapinfo->header());
1149     address header_end = header + mapinfo->header()->header_size();
1150     log_region("header", header, header_end, 0);
1151     log_header(mapinfo);
1152     log_data(header, header_end, 0);
1153 
1154     DumpRegion* rw_region = &builder->_rw_region;
1155     DumpRegion* ro_region = &builder->_ro_region;
1156 
1157     log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs);
1158     log_metaspace_region("ro region", ro_region, &builder->_ro_src_objs);
1159 
1160     address bitmap_end = address(bitmap + bitmap_size_in_bytes);
1161     log_region("bitmap", address(bitmap), bitmap_end, 0);
1162     log_data((address)bitmap, bitmap_end, 0);
1163 
1164 #if INCLUDE_CDS_JAVA_HEAP
1165     if (closed_heap_regions != NULL) {
1166       log_heap_regions("closed heap region", closed_heap_regions);
1167     }
1168     if (open_heap_regions != NULL) {
1169       log_heap_regions("open heap region", open_heap_regions);
1170     }
1171 #endif
1172 
1173     log_info(cds, map)("[End of CDS archive map]");
1174   }
1175 }; // end ArchiveBuilder::CDSMapLogger
1176 
1177 void ArchiveBuilder::print_stats() {
1178   _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1179 }
1180 
1181 void ArchiveBuilder::clean_up_src_obj_table() {
1182   SrcObjTableCleaner cleaner;
1183   _src_obj_table.iterate(&cleaner);
1184 }
1185 
1186 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
1187                                    GrowableArray<MemRegion>* closed_heap_regions,
1188                                    GrowableArray<MemRegion>* open_heap_regions,
1189                                    GrowableArray<ArchiveHeapBitmapInfo>* closed_heap_bitmaps,
1190                                    GrowableArray<ArchiveHeapBitmapInfo>* open_heap_bitmaps) {
1191   // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1192   // MetaspaceShared::n_regions (internal to hotspot).
1193   assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
1194 
1195   write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1196   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1197 
1198   size_t bitmap_size_in_bytes;
1199   char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_bitmaps, open_heap_bitmaps,
1200                                               bitmap_size_in_bytes);
1201 
1202   if (closed_heap_regions != NULL) {
1203     _total_closed_heap_region_size = mapinfo->write_heap_regions(
1204                                         closed_heap_regions,
1205                                         closed_heap_bitmaps,
1206                                         MetaspaceShared::first_closed_heap_region,
1207                                         MetaspaceShared::max_num_closed_heap_regions);
1208     _total_open_heap_region_size = mapinfo->write_heap_regions(
1209                                         open_heap_regions,
1210                                         open_heap_bitmaps,
1211                                         MetaspaceShared::first_open_heap_region,
1212                                         MetaspaceShared::max_num_open_heap_regions);
1213   }
1214 
1215   print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
1216 
1217   mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
1218   mapinfo->set_header_crc(mapinfo->compute_header_crc());
1219   // After this point, we should not write any data into mapinfo->header() since this
1220   // would corrupt its checksum we have calculated before.
1221   mapinfo->write_header();
1222   mapinfo->close();
1223 
1224   if (log_is_enabled(Info, cds)) {
1225     print_stats();
1226   }
1227 
1228   if (log_is_enabled(Info, cds, map)) {
1229     CDSMapLogger::log(this, mapinfo, closed_heap_regions, open_heap_regions,
1230                       bitmap, bitmap_size_in_bytes);
1231   }
1232   CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
1233   FREE_C_HEAP_ARRAY(char, bitmap);
1234 }
1235 
1236 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1237   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1238 }
1239 
1240 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
1241                                         GrowableArray<MemRegion>* closed_heap_regions,
1242                                         GrowableArray<MemRegion>* open_heap_regions) {
1243   // Print statistics of all the regions
1244   const size_t bitmap_used = mapinfo->region_at(MetaspaceShared::bm)->used();
1245   const size_t bitmap_reserved = mapinfo->region_at(MetaspaceShared::bm)->used_aligned();
1246   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1247                                 bitmap_reserved +
1248                                 _total_closed_heap_region_size +
1249                                 _total_open_heap_region_size;
1250   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1251                              bitmap_used +
1252                              _total_closed_heap_region_size +
1253                              _total_open_heap_region_size;
1254   const double total_u_perc = percent_of(total_bytes, total_reserved);
1255 
1256   _rw_region.print(total_reserved);
1257   _ro_region.print(total_reserved);
1258 
1259   print_bitmap_region_stats(bitmap_used, total_reserved);
1260 
1261   if (closed_heap_regions != NULL) {
1262     print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
1263     print_heap_region_stats(open_heap_regions, "oa", total_reserved);
1264   }
1265 
1266   log_debug(cds)("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1267                  total_bytes, total_reserved, total_u_perc);
1268 }
1269 
1270 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1271   log_debug(cds)("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
1272                  size, size/double(total_size)*100.0, size);
1273 }
1274 
1275 void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion>* regions,
1276                                              const char *name, size_t total_size) {
1277   int arr_len = regions == NULL ? 0 : regions->length();
1278   for (int i = 0; i < arr_len; i++) {
1279       char* start = (char*)regions->at(i).start();
1280       size_t size = regions->at(i).byte_size();
1281       char* top = start + size;
1282       log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1283                      name, i, size, size/double(total_size)*100.0, size, p2i(start));
1284   }
1285 }
1286 
1287 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1288   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1289   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1290   // or so.
1291   _rw_region.print_out_of_space_msg(name, needed_bytes);
1292   _ro_region.print_out_of_space_msg(name, needed_bytes);
1293 
1294   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1295                                 "Please reduce the number of shared classes.");
1296 }
1297 
1298 
1299 #ifndef PRODUCT
1300 void ArchiveBuilder::assert_is_vm_thread() {
1301   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1302 }
1303 #endif