1 /*
2 * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveHeapWriter.hpp"
28 #include "cds/archiveUtils.hpp"
29 #include "cds/cppVtables.hpp"
30 #include "cds/dumpAllocStats.hpp"
31 #include "cds/heapShared.hpp"
32 #include "cds/metaspaceShared.hpp"
33 #include "classfile/classLoaderDataShared.hpp"
34 #include "classfile/symbolTable.hpp"
35 #include "classfile/systemDictionaryShared.hpp"
36 #include "classfile/vmClasses.hpp"
37 #include "interpreter/abstractInterpreter.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logStream.hpp"
40 #include "memory/allStatic.hpp"
41 #include "memory/memRegion.hpp"
42 #include "memory/resourceArea.hpp"
43 #include "oops/instanceKlass.hpp"
44 #include "oops/objArrayKlass.hpp"
45 #include "oops/objArrayOop.inline.hpp"
46 #include "oops/oopHandle.inline.hpp"
47 #include "runtime/arguments.hpp"
48 #include "runtime/globals_extension.hpp"
49 #include "runtime/javaThread.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "utilities/align.hpp"
52 #include "utilities/bitMap.inline.hpp"
53 #include "utilities/formatBuffer.hpp"
54
55 ArchiveBuilder* ArchiveBuilder::_current = nullptr;
56
57 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
58 char* newtop = ArchiveBuilder::current()->_ro_region.top();
59 ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
60 }
61
62 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, mtClassShared) {
63 _total_bytes = 0;
64 _objs = new (mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
65 }
66
67 ArchiveBuilder::SourceObjList::~SourceObjList() {
68 delete _objs;
69 }
70
71 void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) {
72 // Save this source object for copying
73 _objs->append(src_info);
74
75 // Prepare for marking the pointers in this source object
76 assert(is_aligned(_total_bytes, sizeof(address)), "must be");
77 src_info->set_ptrmap_start(_total_bytes / sizeof(address));
78 _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
79 src_info->set_ptrmap_end(_total_bytes / sizeof(address));
80
81 BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
82 if (_ptrmap.size() <= bitmap_size_needed) {
83 _ptrmap.resize((bitmap_size_needed + 1) * 2);
84 }
85 }
86
87 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
88 // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
89 // so that we can copy/relocate it later. E.g., if we have
90 // class Foo { intx scala; Bar* ptr; }
91 // Foo *f = 0x100;
92 // To mark the f->ptr pointer on 64-bit platform, this function is called with
93 // src_info()->obj() == 0x100
94 // ref->addr() == 0x108
95 address src_obj = src_info->source_addr();
96 address* field_addr = ref->addr();
97 assert(src_info->ptrmap_start() < _total_bytes, "sanity");
98 assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
99 assert(*field_addr != nullptr, "should have checked");
100
101 intx field_offset_in_bytes = ((address)field_addr) - src_obj;
102 DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
103 assert(field_offset_in_bytes >= 0, "must be");
104 assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
105 assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
106
107 BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
108 _ptrmap.set_bit(BitMap::idx_t(idx));
109 }
110
111 class RelocateEmbeddedPointers : public BitMapClosure {
112 ArchiveBuilder* _builder;
113 address _buffered_obj;
114 BitMap::idx_t _start_idx;
115 public:
116 RelocateEmbeddedPointers(ArchiveBuilder* builder, address buffered_obj, BitMap::idx_t start_idx) :
117 _builder(builder), _buffered_obj(buffered_obj), _start_idx(start_idx) {}
118
119 bool do_bit(BitMap::idx_t bit_offset) {
120 size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
121 address* ptr_loc = (address*)(_buffered_obj + field_offset);
122
123 address old_p = *ptr_loc;
124 address new_p = _builder->get_buffered_addr(old_p);
125
126 log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
127 p2i(ptr_loc), p2i(old_p), p2i(new_p));
128
129 ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
130 return true; // keep iterating the bitmap
131 }
132 };
133
134 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
135 SourceObjInfo* src_info = objs()->at(i);
136 assert(src_info->should_copy(), "must be");
137 BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
138 BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end()); // exclusive
139
140 RelocateEmbeddedPointers relocator(builder, src_info->buffered_addr(), start);
141 _ptrmap.iterate(&relocator, start, end);
142 }
143
144 ArchiveBuilder::ArchiveBuilder() :
145 _current_dump_space(nullptr),
146 _buffer_bottom(nullptr),
147 _last_verified_top(nullptr),
148 _num_dump_regions_used(0),
149 _other_region_used_bytes(0),
150 _requested_static_archive_bottom(nullptr),
151 _requested_static_archive_top(nullptr),
152 _requested_dynamic_archive_bottom(nullptr),
153 _requested_dynamic_archive_top(nullptr),
154 _mapped_static_archive_bottom(nullptr),
155 _mapped_static_archive_top(nullptr),
156 _buffer_to_requested_delta(0),
157 _rw_region("rw", MAX_SHARED_DELTA),
158 _ro_region("ro", MAX_SHARED_DELTA),
159 _ptrmap(mtClassShared),
160 _rw_src_objs(),
161 _ro_src_objs(),
162 _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
163 _buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
164 _total_heap_region_size(0),
165 _estimated_metaspaceobj_bytes(0),
166 _estimated_hashtable_bytes(0)
167 {
168 _klasses = new (mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
169 _symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
170
171 assert(_current == nullptr, "must be");
172 _current = this;
173 }
174
175 ArchiveBuilder::~ArchiveBuilder() {
176 assert(_current == this, "must be");
177 _current = nullptr;
178
179 for (int i = 0; i < _symbols->length(); i++) {
180 _symbols->at(i)->decrement_refcount();
181 }
182
183 delete _klasses;
184 delete _symbols;
185 if (_shared_rs.is_reserved()) {
186 _shared_rs.release();
187 }
188 }
189
190 bool ArchiveBuilder::is_dumping_full_module_graph() {
191 return DumpSharedSpaces && MetaspaceShared::use_full_module_graph();
192 }
193
194 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
195 ArchiveBuilder* _builder;
196
197 public:
198 GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
199
200 virtual bool do_unique_ref(Ref* ref, bool read_only) {
201 return _builder->gather_klass_and_symbol(ref, read_only);
202 }
203 };
204
205 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
206 if (ref->obj() == nullptr) {
207 return false;
208 }
209 if (get_follow_mode(ref) != make_a_copy) {
210 return false;
211 }
212 if (ref->msotype() == MetaspaceObj::ClassType) {
213 Klass* klass = (Klass*)ref->obj();
214 assert(klass->is_klass(), "must be");
215 if (!is_excluded(klass)) {
216 _klasses->append(klass);
217 }
218 // See RunTimeClassInfo::get_for()
219 _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);
220 } else if (ref->msotype() == MetaspaceObj::SymbolType) {
221 // Make sure the symbol won't be GC'ed while we are dumping the archive.
222 Symbol* sym = (Symbol*)ref->obj();
223 sym->increment_refcount();
224 _symbols->append(sym);
225 }
226
227 int bytes = ref->size() * BytesPerWord;
228 _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
229
230 return true; // recurse
231 }
232
233 void ArchiveBuilder::gather_klasses_and_symbols() {
234 ResourceMark rm;
235 log_info(cds)("Gathering classes and symbols ... ");
236 GatherKlassesAndSymbols doit(this);
237 iterate_roots(&doit);
238 #if INCLUDE_CDS_JAVA_HEAP
239 if (is_dumping_full_module_graph()) {
240 ClassLoaderDataShared::iterate_symbols(&doit);
241 }
242 #endif
243 doit.finish();
244
245 if (DumpSharedSpaces) {
246 // To ensure deterministic contents in the static archive, we need to ensure that
247 // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
248 // the MetaspaceObjs are located originally, as they are copied sequentially into
249 // the archive during the iteration.
250 //
251 // The only issue here is that the symbol table and the system directories may be
252 // randomly ordered, so we copy the symbols and klasses into two arrays and sort
253 // them deterministically.
254 //
255 // During -Xshare:dump, the order of Symbol creation is strictly determined by
256 // the SharedClassListFile (class loading is done in a single thread and the JIT
257 // is disabled). Also, Symbols are allocated in monotonically increasing addresses
258 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
259 // ascending address order, we ensure that all Symbols are copied into deterministic
260 // locations in the archive.
261 //
262 // TODO: in the future, if we want to produce deterministic contents in the
263 // dynamic archive, we might need to sort the symbols alphabetically (also see
264 // DynamicArchiveBuilder::sort_methods()).
265 sort_symbols_and_fix_hash();
266 sort_klasses();
267
268 // TODO -- we need a proper estimate for the archived modules, etc,
269 // but this should be enough for now
270 _estimated_metaspaceobj_bytes += 200 * 1024 * 1024;
271 }
272 }
273
274 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
275 if (a[0] < b[0]) {
276 return -1;
277 } else {
278 assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
279 return 1;
280 }
281 }
282
283 void ArchiveBuilder::sort_symbols_and_fix_hash() {
284 log_info(cds)("Sorting symbols and fixing identity hash ... ");
285 os::init_random(0x12345678);
286 _symbols->sort(compare_symbols_by_address);
287 for (int i = 0; i < _symbols->length(); i++) {
288 assert(_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
289 _symbols->at(i)->update_identity_hash();
290 }
291 }
292
293 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
294 return a[0]->name()->fast_compare(b[0]->name());
295 }
296
297 void ArchiveBuilder::sort_klasses() {
298 log_info(cds)("Sorting classes ... ");
299 _klasses->sort(compare_klass_by_name);
300 }
301
302 size_t ArchiveBuilder::estimate_archive_size() {
303 // size of the symbol table and two dictionaries, plus the RunTimeClassInfo's
304 size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
305 size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
306 _estimated_hashtable_bytes = symbol_table_est + dictionary_est;
307
308 size_t total = 0;
309
310 total += _estimated_metaspaceobj_bytes;
311 total += _estimated_hashtable_bytes;
312
313 // allow fragmentation at the end of each dump region
314 total += _total_dump_regions * MetaspaceShared::core_region_alignment();
315
316 log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
317 symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
318 log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
319 log_info(cds)("total estimate bytes = " SIZE_FORMAT, total);
320
321 return align_up(total, MetaspaceShared::core_region_alignment());
322 }
323
324 address ArchiveBuilder::reserve_buffer() {
325 size_t buffer_size = estimate_archive_size();
326 ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
327 if (!rs.is_reserved()) {
328 log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
329 MetaspaceShared::unrecoverable_writing_error();
330 }
331
332 // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
333 // we are copying the class metadata into the buffer.
334 address buffer_bottom = (address)rs.base();
335 log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
336 p2i(buffer_bottom), buffer_size);
337 _shared_rs = rs;
338
339 _buffer_bottom = buffer_bottom;
340 _last_verified_top = buffer_bottom;
341 _current_dump_space = &_rw_region;
342 _num_dump_regions_used = 1;
343 _other_region_used_bytes = 0;
344 _current_dump_space->init(&_shared_rs, &_shared_vs);
345
346 ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
347
348 // The bottom of the static archive should be mapped at this address by default.
349 _requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
350
351 // The bottom of the archive (that I am writing now) should be mapped at this address by default.
352 address my_archive_requested_bottom;
353
354 if (DumpSharedSpaces) {
355 my_archive_requested_bottom = _requested_static_archive_bottom;
356 } else {
357 _mapped_static_archive_bottom = (address)MetaspaceObj::shared_metaspace_base();
358 _mapped_static_archive_top = (address)MetaspaceObj::shared_metaspace_top();
359 assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
360 size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
361
362 // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
363 _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
364 my_archive_requested_bottom = align_up(_requested_static_archive_top, MetaspaceShared::core_region_alignment());
365
366 _requested_dynamic_archive_bottom = my_archive_requested_bottom;
367 }
368
369 _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
370
371 address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
372 if (my_archive_requested_bottom < _requested_static_archive_bottom ||
373 my_archive_requested_top <= _requested_static_archive_bottom) {
374 // Size overflow.
375 log_error(cds)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
376 log_error(cds)("my_archive_requested_top = " INTPTR_FORMAT, p2i(my_archive_requested_top));
377 log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
378 "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
379 MetaspaceShared::unrecoverable_writing_error();
380 }
381
382 if (DumpSharedSpaces) {
383 // We don't want any valid object to be at the very bottom of the archive.
384 // See ArchivePtrMarker::mark_pointer().
385 rw_region()->allocate(16);
386 }
387
388 return buffer_bottom;
389 }
390
391 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it) {
392 int num_symbols = _symbols->length();
393 for (int i = 0; i < num_symbols; i++) {
394 it->push(_symbols->adr_at(i));
395 }
396
397 int num_klasses = _klasses->length();
398 for (int i = 0; i < num_klasses; i++) {
399 it->push(_klasses->adr_at(i));
400 }
401
402 iterate_roots(it);
403 }
404
405 class GatherSortedSourceObjs : public MetaspaceClosure {
406 ArchiveBuilder* _builder;
407
408 public:
409 GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
410
411 virtual bool do_ref(Ref* ref, bool read_only) {
412 return _builder->gather_one_source_obj(enclosing_ref(), ref, read_only);
413 }
414
415 virtual void do_pending_ref(Ref* ref) {
416 if (ref->obj() != nullptr) {
417 _builder->remember_embedded_pointer_in_gathered_obj(enclosing_ref(), ref);
418 }
419 }
420 };
421
422 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
423 MetaspaceClosure::Ref* ref, bool read_only) {
424 address src_obj = ref->obj();
425 if (src_obj == nullptr) {
426 return false;
427 }
428 remember_embedded_pointer_in_gathered_obj(enclosing_ref, ref);
429
430 FollowMode follow_mode = get_follow_mode(ref);
431 SourceObjInfo src_info(ref, read_only, follow_mode);
432 bool created;
433 SourceObjInfo* p = _src_obj_table.put_if_absent(src_obj, src_info, &created);
434 if (created) {
435 if (_src_obj_table.maybe_grow()) {
436 log_info(cds, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
437 }
438 }
439
440 assert(p->read_only() == src_info.read_only(), "must be");
441
442 if (created && src_info.should_copy()) {
443 ref->set_user_data((void*)p);
444 if (read_only) {
445 _ro_src_objs.append(enclosing_ref, p);
446 } else {
447 _rw_src_objs.append(enclosing_ref, p);
448 }
449 return true; // Need to recurse into this ref only if we are copying it
450 } else {
451 return false;
452 }
453 }
454
455 void ArchiveBuilder::remember_embedded_pointer_in_gathered_obj(MetaspaceClosure::Ref* enclosing_ref,
456 MetaspaceClosure::Ref* ref) {
457 assert(ref->obj() != nullptr, "should have checked");
458
459 if (enclosing_ref != nullptr) {
460 SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
461 if (src_info == nullptr) {
462 // source objects of point_to_it/set_to_null types are not copied
463 // so we don't need to remember their pointers.
464 } else {
465 if (src_info->read_only()) {
466 _ro_src_objs.remember_embedded_pointer(src_info, ref);
467 } else {
468 _rw_src_objs.remember_embedded_pointer(src_info, ref);
469 }
470 }
471 }
472 }
473
474 void ArchiveBuilder::gather_source_objs() {
475 ResourceMark rm;
476 log_info(cds)("Gathering all archivable objects ... ");
477 gather_klasses_and_symbols();
478 GatherSortedSourceObjs doit(this);
479 iterate_sorted_roots(&doit);
480 doit.finish();
481 }
482
483 bool ArchiveBuilder::is_excluded(Klass* klass) {
484 if (klass->is_instance_klass()) {
485 InstanceKlass* ik = InstanceKlass::cast(klass);
486 return SystemDictionaryShared::is_excluded_class(ik);
487 } else if (klass->is_objArray_klass()) {
488 if (DynamicDumpSharedSpaces) {
489 // Don't support archiving of array klasses for now (WHY???)
490 return true;
491 }
492 Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
493 if (bottom->is_instance_klass()) {
494 return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
495 }
496 }
497
498 return false;
499 }
500
501 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
502 address obj = ref->obj();
503 if (MetaspaceShared::is_in_shared_metaspace(obj)) {
504 // Don't dump existing shared metadata again.
505 return point_to_it;
506 } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
507 ref->msotype() == MetaspaceObj::MethodCountersType) {
508 return set_to_null;
509 } else {
510 if (ref->msotype() == MetaspaceObj::ClassType) {
511 Klass* klass = (Klass*)ref->obj();
512 assert(klass->is_klass(), "must be");
513 if (is_excluded(klass)) {
514 ResourceMark rm;
515 log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
516 return set_to_null;
517 }
518 }
519
520 return make_a_copy;
521 }
522 }
523
524 void ArchiveBuilder::start_dump_space(DumpRegion* next) {
525 address bottom = _last_verified_top;
526 address top = (address)(current_dump_space()->top());
527 _other_region_used_bytes += size_t(top - bottom);
528
529 current_dump_space()->pack(next);
530 _current_dump_space = next;
531 _num_dump_regions_used ++;
532
533 _last_verified_top = (address)(current_dump_space()->top());
534 }
535
536 void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
537 address bottom = _last_verified_top;
538 address top = (address)(current_dump_space()->top());
539 size_t used = size_t(top - bottom) + _other_region_used_bytes;
540 int diff = int(estimate) - int(used);
541
542 log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
543 assert(diff >= 0, "Estimate is too small");
544
545 _last_verified_top = top;
546 _other_region_used_bytes = 0;
547 }
548
549 void ArchiveBuilder::dump_rw_metadata() {
550 ResourceMark rm;
551 log_info(cds)("Allocating RW objects ... ");
552 make_shallow_copies(&_rw_region, &_rw_src_objs);
553
554 #if INCLUDE_CDS_JAVA_HEAP
555 if (is_dumping_full_module_graph()) {
556 // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
557 char* start = rw_region()->top();
558 ClassLoaderDataShared::allocate_archived_tables();
559 alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
560 }
561 #endif
562 }
563
564 void ArchiveBuilder::dump_ro_metadata() {
565 ResourceMark rm;
566 log_info(cds)("Allocating RO objects ... ");
567
568 start_dump_space(&_ro_region);
569 make_shallow_copies(&_ro_region, &_ro_src_objs);
570
571 #if INCLUDE_CDS_JAVA_HEAP
572 if (is_dumping_full_module_graph()) {
573 char* start = ro_region()->top();
574 ClassLoaderDataShared::init_archived_tables();
575 alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
576 }
577 #endif
578 }
579
580 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
581 const ArchiveBuilder::SourceObjList* src_objs) {
582 for (int i = 0; i < src_objs->objs()->length(); i++) {
583 make_shallow_copy(dump_region, src_objs->objs()->at(i));
584 }
585 log_info(cds)("done (%d objects)", src_objs->objs()->length());
586 }
587
588 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
589 address src = src_info->source_addr();
590 int bytes = src_info->size_in_bytes();
591 char* dest;
592 char* oldtop;
593 char* newtop;
594
595 oldtop = dump_region->top();
596 if (src_info->msotype() == MetaspaceObj::ClassType) {
597 // Save a pointer immediate in front of an InstanceKlass, so
598 // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
599 // without building another hashtable. See RunTimeClassInfo::get_for()
600 // in systemDictionaryShared.cpp.
601 Klass* klass = (Klass*)src;
602 if (klass->is_instance_klass()) {
603 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
604 dump_region->allocate(sizeof(address));
605 }
606 }
607 dest = dump_region->allocate(bytes);
608 newtop = dump_region->top();
609
610 memcpy(dest, src, bytes);
611 {
612 bool created;
613 _buffered_to_src_table.put_if_absent((address)dest, src, &created);
614 assert(created, "must be");
615 if (_buffered_to_src_table.maybe_grow()) {
616 log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
617 }
618 }
619
620 intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
621 if (archived_vtable != nullptr) {
622 *(address*)dest = (address)archived_vtable;
623 ArchivePtrMarker::mark_pointer((address*)dest);
624 }
625
626 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
627 src_info->set_buffered_addr((address)dest);
628
629 _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
630 }
631
632 // This is used by code that hand-assemble data structures, such as the LambdaProxyClassKey, that are
633 // not handled by MetaspaceClosure.
634 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
635 assert(is_in_buffer_space(ptr_location), "must be");
636 if (src_addr == nullptr) {
637 *ptr_location = nullptr;
638 ArchivePtrMarker::clear_pointer(ptr_location);
639 } else {
640 *ptr_location = get_buffered_addr(src_addr);
641 ArchivePtrMarker::mark_pointer(ptr_location);
642 }
643 }
644
645 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
646 SourceObjInfo* p = _src_obj_table.get(src_addr);
647 assert(p != nullptr, "must be");
648
649 return p->buffered_addr();
650 }
651
652 address ArchiveBuilder::get_source_addr(address buffered_addr) const {
653 assert(is_in_buffer_space(buffered_addr), "must be");
654 address* src_p = _buffered_to_src_table.get(buffered_addr);
655 assert(src_p != nullptr && *src_p != nullptr, "must be");
656 return *src_p;
657 }
658
659 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
660 for (int i = 0; i < src_objs->objs()->length(); i++) {
661 src_objs->relocate(i, this);
662 }
663 }
664
665 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
666 log_info(cds)("Relocating embedded pointers in core regions ... ");
667 relocate_embedded_pointers(&_rw_src_objs);
668 relocate_embedded_pointers(&_ro_src_objs);
669 }
670
671 void ArchiveBuilder::make_klasses_shareable() {
672 int num_instance_klasses = 0;
673 int num_boot_klasses = 0;
674 int num_platform_klasses = 0;
675 int num_app_klasses = 0;
676 int num_hidden_klasses = 0;
677 int num_unlinked_klasses = 0;
678 int num_unregistered_klasses = 0;
679 int num_obj_array_klasses = 0;
680 int num_type_array_klasses = 0;
681
682 for (int i = 0; i < klasses()->length(); i++) {
683 const char* type;
684 const char* unlinked = "";
685 const char* hidden = "";
686 const char* generated = "";
687 Klass* k = get_buffered_addr(klasses()->at(i));
688 k->remove_java_mirror();
689 if (k->is_objArray_klass()) {
690 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
691 // on their array classes.
692 num_obj_array_klasses ++;
693 type = "array";
694 } else if (k->is_typeArray_klass()) {
695 num_type_array_klasses ++;
696 type = "array";
697 k->remove_unshareable_info();
698 } else {
699 assert(k->is_instance_klass(), " must be");
700 num_instance_klasses ++;
701 InstanceKlass* ik = InstanceKlass::cast(k);
702 if (DynamicDumpSharedSpaces) {
703 // For static dump, class loader type are already set.
704 ik->assign_class_loader_type();
705 }
706 if (ik->is_shared_boot_class()) {
707 type = "boot";
708 num_boot_klasses ++;
709 } else if (ik->is_shared_platform_class()) {
710 type = "plat";
711 num_platform_klasses ++;
712 } else if (ik->is_shared_app_class()) {
713 type = "app";
714 num_app_klasses ++;
715 } else {
716 assert(ik->is_shared_unregistered_class(), "must be");
717 type = "unreg";
718 num_unregistered_klasses ++;
719 }
720
721 if (!ik->is_linked()) {
722 num_unlinked_klasses ++;
723 unlinked = " ** unlinked";
724 }
725
726 if (ik->is_hidden()) {
727 num_hidden_klasses ++;
728 hidden = " ** hidden";
729 }
730
731 if (ik->is_generated_shared_class()) {
732 generated = " ** generated";
733 }
734 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
735 ik->remove_unshareable_info();
736 }
737
738 if (log_is_enabled(Debug, cds, class)) {
739 ResourceMark rm;
740 log_debug(cds, class)("klasses[%5d] = " PTR_FORMAT " %-5s %s%s%s%s", i,
741 p2i(to_requested(k)), type, k->external_name(),
742 hidden, unlinked, generated);
743 }
744 }
745
746 log_info(cds)("Number of classes %d", num_instance_klasses + num_obj_array_klasses + num_type_array_klasses);
747 log_info(cds)(" instance classes = %5d", num_instance_klasses);
748 log_info(cds)(" boot = %5d", num_boot_klasses);
749 log_info(cds)(" app = %5d", num_app_klasses);
750 log_info(cds)(" platform = %5d", num_platform_klasses);
751 log_info(cds)(" unregistered = %5d", num_unregistered_klasses);
752 log_info(cds)(" (hidden) = %5d", num_hidden_klasses);
753 log_info(cds)(" (unlinked) = %5d", num_unlinked_klasses);
754 log_info(cds)(" obj array classes = %5d", num_obj_array_klasses);
755 log_info(cds)(" type array classes = %5d", num_type_array_klasses);
756 log_info(cds)(" symbols = %5d", _symbols->length());
757 }
758
759 uintx ArchiveBuilder::buffer_to_offset(address p) const {
760 address requested_p = to_requested(p);
761 assert(requested_p >= _requested_static_archive_bottom, "must be");
762 return requested_p - _requested_static_archive_bottom;
763 }
764
765 uintx ArchiveBuilder::any_to_offset(address p) const {
766 if (is_in_mapped_static_archive(p)) {
767 assert(DynamicDumpSharedSpaces, "must be");
768 return p - _mapped_static_archive_bottom;
769 }
770 if (!is_in_buffer_space(p)) {
771 // p must be a "source" address
772 p = get_buffered_addr(p);
773 }
774 return buffer_to_offset(p);
775 }
776
777 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
778 assert(DumpSharedSpaces, "sanity");
779 k = get_buffered_klass(k);
780 Klass* requested_k = to_requested(k);
781 return CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
782 }
783
784 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
785 // so that the archive can be mapped to the "requested" location without runtime relocation.
786 //
787 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
788 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
789 // - Every pointer must have one of the following values:
790 // [a] nullptr:
791 // No relocation is needed. Remove this pointer from ptrmap so we don't need to
792 // consider it at runtime.
793 // [b] Points into an object X which is inside the buffer:
794 // Adjust this pointer by _buffer_to_requested_delta, so it points to X
795 // when the archive is mapped at the requested location.
796 // [c] Points into an object Y which is inside mapped static archive:
797 // - This happens only during dynamic dump
798 // - Adjust this pointer by _mapped_to_requested_static_archive_delta,
799 // so it points to Y when the static archive is mapped at the requested location.
800 template <bool STATIC_DUMP>
801 class RelocateBufferToRequested : public BitMapClosure {
802 ArchiveBuilder* _builder;
803 address _buffer_bottom;
804 intx _buffer_to_requested_delta;
805 intx _mapped_to_requested_static_archive_delta;
806 size_t _max_non_null_offset;
807
808 public:
809 RelocateBufferToRequested(ArchiveBuilder* builder) {
810 _builder = builder;
811 _buffer_bottom = _builder->buffer_bottom();
812 _buffer_to_requested_delta = builder->buffer_to_requested_delta();
813 _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
814 _max_non_null_offset = 0;
815
816 address bottom = _builder->buffer_bottom();
817 address top = _builder->buffer_top();
818 address new_bottom = bottom + _buffer_to_requested_delta;
819 address new_top = top + _buffer_to_requested_delta;
820 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
821 "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
822 p2i(bottom), p2i(top),
823 p2i(new_bottom), p2i(new_top));
824 }
825
826 bool do_bit(size_t offset) {
827 address* p = (address*)_buffer_bottom + offset;
828 assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
829
830 if (*p == nullptr) {
831 // todo -- clear bit, etc
832 ArchivePtrMarker::ptrmap()->clear_bit(offset);
833 } else {
834 if (STATIC_DUMP) {
835 assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
836 *p += _buffer_to_requested_delta;
837 assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
838 } else {
839 if (_builder->is_in_buffer_space(*p)) {
840 *p += _buffer_to_requested_delta;
841 // assert is in requested dynamic archive
842 } else {
843 assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
844 *p += _mapped_to_requested_static_archive_delta;
845 assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
846 }
847 }
848 _max_non_null_offset = offset;
849 }
850
851 return true; // keep iterating
852 }
853
854 void doit() {
855 ArchivePtrMarker::ptrmap()->iterate(this);
856 ArchivePtrMarker::compact(_max_non_null_offset);
857 }
858 };
859
860
861 void ArchiveBuilder::relocate_to_requested() {
862 ro_region()->pack();
863
864 size_t my_archive_size = buffer_top() - buffer_bottom();
865
866 if (DumpSharedSpaces) {
867 _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
868 RelocateBufferToRequested<true> patcher(this);
869 patcher.doit();
870 } else {
871 assert(DynamicDumpSharedSpaces, "must be");
872 _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
873 RelocateBufferToRequested<false> patcher(this);
874 patcher.doit();
875 }
876 }
877
878 // Write detailed info to a mapfile to analyze contents of the archive.
879 // static dump:
880 // java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0
881 // dynamic dump:
882 // java -cp MyApp.jar -XX:ArchiveClassesAtExit=MyApp.jsa \
883 // -Xlog:cds+map=trace:file=cds.map:none:filesize=0 MyApp
884 //
885 // We need to do some address translation because the buffers used at dump time may be mapped to
886 // a different location at runtime. At dump time, the buffers may be at arbitrary locations
887 // picked by the OS. At runtime, we try to map at a fixed location (SharedBaseAddress). For
888 // consistency, we log everything using runtime addresses.
889 class ArchiveBuilder::CDSMapLogger : AllStatic {
890 static intx buffer_to_runtime_delta() {
891 // Translate the buffers used by the RW/RO regions to their eventual (requested) locations
892 // at runtime.
893 return ArchiveBuilder::current()->buffer_to_requested_delta();
894 }
895
896 // rw/ro regions only
897 static void log_metaspace_region(const char* name, DumpRegion* region,
898 const ArchiveBuilder::SourceObjList* src_objs) {
899 address region_base = address(region->base());
900 address region_top = address(region->top());
901 log_region(name, region_base, region_top, region_base + buffer_to_runtime_delta());
902 log_metaspace_objects(region, src_objs);
903 }
904
905 #define _LOG_PREFIX PTR_FORMAT ": @@ %-17s %d"
906
907 static void log_klass(Klass* k, address runtime_dest, const char* type_name, int bytes, Thread* current) {
908 ResourceMark rm(current);
909 log_debug(cds, map)(_LOG_PREFIX " %s",
910 p2i(runtime_dest), type_name, bytes, k->external_name());
911 }
912 static void log_method(Method* m, address runtime_dest, const char* type_name, int bytes, Thread* current) {
913 ResourceMark rm(current);
914 log_debug(cds, map)(_LOG_PREFIX " %s",
915 p2i(runtime_dest), type_name, bytes, m->external_name());
916 }
917
918 // rw/ro regions only
919 static void log_metaspace_objects(DumpRegion* region, const ArchiveBuilder::SourceObjList* src_objs) {
920 address last_obj_base = address(region->base());
921 address last_obj_end = address(region->base());
922 address region_end = address(region->end());
923 Thread* current = Thread::current();
924 for (int i = 0; i < src_objs->objs()->length(); i++) {
925 SourceObjInfo* src_info = src_objs->at(i);
926 address src = src_info->source_addr();
927 address dest = src_info->buffered_addr();
928 log_data(last_obj_base, dest, last_obj_base + buffer_to_runtime_delta());
929 address runtime_dest = dest + buffer_to_runtime_delta();
930 int bytes = src_info->size_in_bytes();
931
932 MetaspaceObj::Type type = src_info->msotype();
933 const char* type_name = MetaspaceObj::type_name(type);
934
935 switch (type) {
936 case MetaspaceObj::ClassType:
937 log_klass((Klass*)src, runtime_dest, type_name, bytes, current);
938 break;
939 case MetaspaceObj::ConstantPoolType:
940 log_klass(((ConstantPool*)src)->pool_holder(),
941 runtime_dest, type_name, bytes, current);
942 break;
943 case MetaspaceObj::ConstantPoolCacheType:
944 log_klass(((ConstantPoolCache*)src)->constant_pool()->pool_holder(),
945 runtime_dest, type_name, bytes, current);
946 break;
947 case MetaspaceObj::MethodType:
948 log_method((Method*)src, runtime_dest, type_name, bytes, current);
949 break;
950 case MetaspaceObj::ConstMethodType:
951 log_method(((ConstMethod*)src)->method(), runtime_dest, type_name, bytes, current);
952 break;
953 case MetaspaceObj::SymbolType:
954 {
955 ResourceMark rm(current);
956 Symbol* s = (Symbol*)src;
957 log_debug(cds, map)(_LOG_PREFIX " %s", p2i(runtime_dest), type_name, bytes,
958 s->as_quoted_ascii());
959 }
960 break;
961 default:
962 log_debug(cds, map)(_LOG_PREFIX, p2i(runtime_dest), type_name, bytes);
963 break;
964 }
965
966 last_obj_base = dest;
967 last_obj_end = dest + bytes;
968 }
969
970 log_data(last_obj_base, last_obj_end, last_obj_base + buffer_to_runtime_delta());
971 if (last_obj_end < region_end) {
972 log_debug(cds, map)(PTR_FORMAT ": @@ Misc data " SIZE_FORMAT " bytes",
973 p2i(last_obj_end + buffer_to_runtime_delta()),
974 size_t(region_end - last_obj_end));
975 log_data(last_obj_end, region_end, last_obj_end + buffer_to_runtime_delta());
976 }
977 }
978
979 #undef _LOG_PREFIX
980
981 // Log information about a region, whose address at dump time is [base .. top). At
982 // runtime, this region will be mapped to requested_base. requested_base is 0 if this
983 // region will be mapped at os-selected addresses (such as the bitmap region), or will
984 // be accessed with os::read (the header).
985 //
986 // Note: across -Xshare:dump runs, base may be different, but requested_base should
987 // be the same as the archive contents should be deterministic.
988 static void log_region(const char* name, address base, address top, address requested_base) {
989 size_t size = top - base;
990 base = requested_base;
991 top = requested_base + size;
992 log_info(cds, map)("[%-18s " PTR_FORMAT " - " PTR_FORMAT " " SIZE_FORMAT_W(9) " bytes]",
993 name, p2i(base), p2i(top), size);
994 }
995
996 #if INCLUDE_CDS_JAVA_HEAP
997 static void log_heap_region(ArchiveHeapInfo* heap_info) {
998 MemRegion r = heap_info->memregion();
999 address start = address(r.start());
1000 address end = address(r.end());
1001 log_region("heap", start, end, to_requested(start));
1002
1003 while (start < end) {
1004 size_t byte_size;
1005 oop original_oop = ArchiveHeapWriter::buffered_addr_to_source_obj(start);
1006 if (original_oop != nullptr) {
1007 ResourceMark rm;
1008 log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
1009 p2i(to_requested(start)), original_oop->klass()->external_name());
1010 byte_size = original_oop->size() * BytesPerWord;
1011 } else if (start == ArchiveHeapWriter::buffered_heap_roots_addr()) {
1012 // HeapShared::roots() is copied specially so it doesn't exist in
1013 // HeapShared::OriginalObjectTable. See HeapShared::copy_roots().
1014 log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)",
1015 p2i(to_requested(start)));
1016 byte_size = ArchiveHeapWriter::heap_roots_word_size() * BytesPerWord;
1017 } else {
1018 // We have reached the end of the region, but have some unused space
1019 // at the end.
1020 log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes",
1021 p2i(to_requested(start)), size_t(end - start));
1022 log_data(start, end, to_requested(start), /*is_heap=*/true);
1023 break;
1024 }
1025 address oop_end = start + byte_size;
1026 log_data(start, oop_end, to_requested(start), /*is_heap=*/true);
1027 start = oop_end;
1028 }
1029 }
1030
1031 static address to_requested(address p) {
1032 return ArchiveHeapWriter::buffered_addr_to_requested_addr(p);
1033 }
1034 #endif
1035
1036 // Log all the data [base...top). Pretend that the base address
1037 // will be mapped to requested_base at run-time.
1038 static void log_data(address base, address top, address requested_base, bool is_heap = false) {
1039 assert(top >= base, "must be");
1040
1041 LogStreamHandle(Trace, cds, map) lsh;
1042 if (lsh.is_enabled()) {
1043 int unitsize = sizeof(address);
1044 if (is_heap && UseCompressedOops) {
1045 // This makes the compressed oop pointers easier to read, but
1046 // longs and doubles will be split into two words.
1047 unitsize = sizeof(narrowOop);
1048 }
1049 os::print_hex_dump(&lsh, base, top, unitsize, 32, requested_base);
1050 }
1051 }
1052
1053 static void log_header(FileMapInfo* mapinfo) {
1054 LogStreamHandle(Info, cds, map) lsh;
1055 if (lsh.is_enabled()) {
1056 mapinfo->print(&lsh);
1057 }
1058 }
1059
1060 public:
1061 static void log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
1062 ArchiveHeapInfo* heap_info,
1063 char* bitmap, size_t bitmap_size_in_bytes) {
1064 log_info(cds, map)("%s CDS archive map for %s", DumpSharedSpaces ? "Static" : "Dynamic", mapinfo->full_path());
1065
1066 address header = address(mapinfo->header());
1067 address header_end = header + mapinfo->header()->header_size();
1068 log_region("header", header, header_end, 0);
1069 log_header(mapinfo);
1070 log_data(header, header_end, 0);
1071
1072 DumpRegion* rw_region = &builder->_rw_region;
1073 DumpRegion* ro_region = &builder->_ro_region;
1074
1075 log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs);
1076 log_metaspace_region("ro region", ro_region, &builder->_ro_src_objs);
1077
1078 address bitmap_end = address(bitmap + bitmap_size_in_bytes);
1079 log_region("bitmap", address(bitmap), bitmap_end, 0);
1080 log_data((address)bitmap, bitmap_end, 0);
1081
1082 #if INCLUDE_CDS_JAVA_HEAP
1083 if (heap_info->is_used()) {
1084 log_heap_region(heap_info);
1085 }
1086 #endif
1087
1088 log_info(cds, map)("[End of CDS archive map]");
1089 }
1090 }; // end ArchiveBuilder::CDSMapLogger
1091
1092 void ArchiveBuilder::print_stats() {
1093 _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1094 }
1095
1096 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info) {
1097 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1098 // MetaspaceShared::n_regions (internal to hotspot).
1099 assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
1100
1101 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1102 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1103
1104 size_t bitmap_size_in_bytes;
1105 char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), heap_info,
1106 bitmap_size_in_bytes);
1107
1108 if (heap_info->is_used()) {
1109 _total_heap_region_size = mapinfo->write_heap_region(heap_info);
1110 }
1111
1112 print_region_stats(mapinfo, heap_info);
1113
1114 mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
1115 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1116 // After this point, we should not write any data into mapinfo->header() since this
1117 // would corrupt its checksum we have calculated before.
1118 mapinfo->write_header();
1119 mapinfo->close();
1120
1121 if (log_is_enabled(Info, cds)) {
1122 print_stats();
1123 }
1124
1125 if (log_is_enabled(Info, cds, map)) {
1126 CDSMapLogger::log(this, mapinfo, heap_info,
1127 bitmap, bitmap_size_in_bytes);
1128 }
1129 CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
1130 FREE_C_HEAP_ARRAY(char, bitmap);
1131 }
1132
1133 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
1134 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1135 }
1136
1137 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* heap_info) {
1138 // Print statistics of all the regions
1139 const size_t bitmap_used = mapinfo->region_at(MetaspaceShared::bm)->used();
1140 const size_t bitmap_reserved = mapinfo->region_at(MetaspaceShared::bm)->used_aligned();
1141 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1142 bitmap_reserved +
1143 _total_heap_region_size;
1144 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1145 bitmap_used +
1146 _total_heap_region_size;
1147 const double total_u_perc = percent_of(total_bytes, total_reserved);
1148
1149 _rw_region.print(total_reserved);
1150 _ro_region.print(total_reserved);
1151
1152 print_bitmap_region_stats(bitmap_used, total_reserved);
1153
1154 if (heap_info->is_used()) {
1155 print_heap_region_stats(heap_info, total_reserved);
1156 }
1157
1158 log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1159 total_bytes, total_reserved, total_u_perc);
1160 }
1161
1162 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1163 log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
1164 size, size/double(total_size)*100.0, size);
1165 }
1166
1167 void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
1168 char* start = info->start();
1169 size_t size = info->byte_size();
1170 char* top = start + size;
1171 log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1172 size, size/double(total_size)*100.0, size, p2i(start));
1173 }
1174
1175 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1176 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1177 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1178 // or so.
1179 _rw_region.print_out_of_space_msg(name, needed_bytes);
1180 _ro_region.print_out_of_space_msg(name, needed_bytes);
1181
1182 log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1183 MetaspaceShared::unrecoverable_writing_error();
1184 }
1185
1186
1187 #ifndef PRODUCT
1188 void ArchiveBuilder::assert_is_vm_thread() {
1189 assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1190 }
1191 #endif