1 /*
2 * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassLinker.hpp"
27 #include "cds/aotCompressedPointers.hpp"
28 #include "cds/aotLogging.hpp"
29 #include "cds/aotMapLogger.hpp"
30 #include "cds/aotMetaspace.hpp"
31 #include "cds/archiveBuilder.hpp"
32 #include "cds/archiveUtils.hpp"
33 #include "cds/cdsConfig.hpp"
34 #include "cds/cppVtables.hpp"
35 #include "cds/dumpAllocStats.hpp"
36 #include "cds/dynamicArchive.hpp"
37 #include "cds/heapShared.hpp"
38 #include "cds/regeneratedClasses.hpp"
39 #include "classfile/classLoader.hpp"
40 #include "classfile/classLoaderDataShared.hpp"
41 #include "classfile/javaClasses.hpp"
42 #include "classfile/symbolTable.hpp"
43 #include "classfile/systemDictionaryShared.hpp"
44 #include "classfile/vmClasses.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "interpreter/abstractInterpreter.hpp"
47 #include "jvm.h"
48 #include "logging/log.hpp"
49 #include "memory/allStatic.hpp"
50 #include "memory/memoryReserver.hpp"
51 #include "memory/memRegion.hpp"
52 #include "memory/resourceArea.hpp"
53 #include "oops/compressedKlass.inline.hpp"
54 #include "oops/instanceKlass.hpp"
55 #include "oops/methodCounters.hpp"
56 #include "oops/methodData.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oopHandle.inline.hpp"
60 #include "oops/trainingData.hpp"
61 #include "runtime/arguments.hpp"
62 #include "runtime/globals_extension.hpp"
63 #include "runtime/javaThread.hpp"
64 #include "runtime/sharedRuntime.hpp"
65 #include "utilities/align.hpp"
66 #include "utilities/bitMap.inline.hpp"
67 #include "utilities/formatBuffer.hpp"
68
69 ArchiveBuilder* ArchiveBuilder::_current = nullptr;
70
71 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
72 char* newtop = ArchiveBuilder::current()->_ro_region.top();
73 ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
74 }
75
76 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, mtClassShared) {
77 _total_bytes = 0;
78 _objs = new (mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
79 }
80
81 ArchiveBuilder::SourceObjList::~SourceObjList() {
82 delete _objs;
83 }
84
85 void ArchiveBuilder::SourceObjList::append(SourceObjInfo* src_info) {
86 // Save this source object for copying
87 src_info->set_id(_objs->length());
88 _objs->append(src_info);
89
90 // Prepare for marking the pointers in this source object
91 assert(is_aligned(_total_bytes, sizeof(address)), "must be");
92 src_info->set_ptrmap_start(_total_bytes / sizeof(address));
93 _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
94 src_info->set_ptrmap_end(_total_bytes / sizeof(address));
95
96 BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
97 if (_ptrmap.size() <= bitmap_size_needed) {
98 _ptrmap.resize((bitmap_size_needed + 1) * 2);
99 }
100 }
101
102 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
103 // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
104 // so that we can copy/relocate it later.
105 src_info->set_has_embedded_pointer();
106 address src_obj = src_info->source_addr();
107 address* field_addr = ref->addr();
108 assert(src_info->ptrmap_start() < _total_bytes, "sanity");
109 assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
110 assert(*field_addr != nullptr, "should have checked");
111
112 intx field_offset_in_bytes = ((address)field_addr) - src_obj;
113 DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
114 assert(field_offset_in_bytes >= 0, "must be");
115 assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
116 assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
117
118 BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
119 _ptrmap.set_bit(BitMap::idx_t(idx));
120 }
121
122 class RelocateEmbeddedPointers : public BitMapClosure {
123 ArchiveBuilder* _builder;
124 address _buffered_obj;
125 BitMap::idx_t _start_idx;
126 public:
127 RelocateEmbeddedPointers(ArchiveBuilder* builder, address buffered_obj, BitMap::idx_t start_idx) :
128 _builder(builder), _buffered_obj(buffered_obj), _start_idx(start_idx) {}
129
130 bool do_bit(BitMap::idx_t bit_offset) {
131 size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
132 address* ptr_loc = (address*)(_buffered_obj + field_offset);
133
134 address old_p_with_tags = *ptr_loc;
135 assert(old_p_with_tags != nullptr, "null ptrs shouldn't have been marked");
136
137 address old_p = MetaspaceClosure::strip_tags(old_p_with_tags);
138 uintx tags = MetaspaceClosure::decode_tags(old_p_with_tags);
139 address new_p = _builder->get_buffered_addr(old_p);
140
141 bool nulled;
142 if (new_p == nullptr) {
143 // old_p had a FollowMode of set_to_null
144 nulled = true;
145 } else {
146 new_p = MetaspaceClosure::add_tags(new_p, tags);
147 nulled = false;
148 }
149
150 log_trace(aot)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT " %zu",
151 p2i(ptr_loc), p2i(old_p) + tags, p2i(new_p), tags);
152
153 ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
154 ArchiveBuilder::current()->count_relocated_pointer(tags != 0, nulled);
155 return true; // keep iterating the bitmap
156 }
157 };
158
159 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
160 SourceObjInfo* src_info = objs()->at(i);
161 assert(src_info->should_copy(), "must be");
162 BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
163 BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end()); // exclusive
164
165 RelocateEmbeddedPointers relocator(builder, src_info->buffered_addr(), start);
166 _ptrmap.iterate(&relocator, start, end);
167 }
168
169 ArchiveBuilder::ArchiveBuilder() :
170 _current_dump_region(nullptr),
171 _buffer_bottom(nullptr),
172 _requested_static_archive_bottom(nullptr),
173 _requested_static_archive_top(nullptr),
174 _requested_dynamic_archive_bottom(nullptr),
175 _requested_dynamic_archive_top(nullptr),
176 _mapped_static_archive_bottom(nullptr),
177 _mapped_static_archive_top(nullptr),
178 _buffer_to_requested_delta(0),
179 _pz_region("pz"), // protection zone -- used only during dumping; does NOT exist in cds archive.
180 _rw_region("rw"),
181 _ro_region("ro"),
182 _ac_region("ac"),
183 _ptrmap(mtClassShared),
184 _rw_ptrmap(mtClassShared),
185 _ro_ptrmap(mtClassShared),
186 _rw_src_objs(),
187 _ro_src_objs(),
188 _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
189 _buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
190 _total_heap_region_size(0)
191 {
192 _klasses = new (mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
193 _symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
194 _entropy_seed = 0x12345678;
195 _relocated_ptr_info._num_ptrs = 0;
196 _relocated_ptr_info._num_tagged_ptrs = 0;
197 _relocated_ptr_info._num_nulled_ptrs = 0;
198 assert(_current == nullptr, "must be");
199 _current = this;
200 }
201
202 ArchiveBuilder::~ArchiveBuilder() {
203 assert(_current == this, "must be");
204 _current = nullptr;
205
206 for (int i = 0; i < _symbols->length(); i++) {
207 _symbols->at(i)->decrement_refcount();
208 }
209
210 delete _klasses;
211 delete _symbols;
212 if (_shared_rs.is_reserved()) {
213 MemoryReserver::release(_shared_rs);
214 }
215
216 AOTArtifactFinder::dispose();
217 }
218
219 // Returns a deterministic sequence of pseudo random numbers. The main purpose is NOT
220 // for randomness but to get good entropy for the identity_hash() of archived Symbols,
221 // while keeping the contents of static CDS archives deterministic to ensure
222 // reproducibility of JDK builds.
223 int ArchiveBuilder::entropy() {
224 assert(SafepointSynchronize::is_at_safepoint(), "needed to ensure deterministic sequence");
225 _entropy_seed = os::next_random(_entropy_seed);
226 return static_cast<int>(_entropy_seed);
227 }
228
229 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
230 ArchiveBuilder* _builder;
231
232 public:
233 GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
234
235 virtual bool do_unique_ref(Ref* ref, bool read_only) {
236 return _builder->gather_klass_and_symbol(ref, read_only);
237 }
238 };
239
240 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
241 if (ref->obj() == nullptr) {
242 return false;
243 }
244 if (get_follow_mode(ref) != make_a_copy) {
245 return false;
246 }
247 if (ref->type() == MetaspaceClosureType::ClassType) {
248 Klass* klass = (Klass*)ref->obj();
249 assert(klass->is_klass(), "must be");
250 if (!is_excluded(klass)) {
251 _klasses->append(klass);
252 if (klass->is_hidden()) {
253 assert(klass->is_instance_klass(), "must be");
254 }
255 }
256 } else if (ref->type() == MetaspaceClosureType::SymbolType) {
257 // Make sure the symbol won't be GC'ed while we are dumping the archive.
258 Symbol* sym = (Symbol*)ref->obj();
259 sym->increment_refcount();
260 _symbols->append(sym);
261 }
262
263 return true; // recurse
264 }
265
266 void ArchiveBuilder::gather_klasses_and_symbols() {
267 ResourceMark rm;
268
269 AOTArtifactFinder::initialize();
270 AOTArtifactFinder::find_artifacts();
271
272 aot_log_info(aot)("Gathering classes and symbols ... ");
273 GatherKlassesAndSymbols doit(this);
274 iterate_roots(&doit);
275 doit.finish();
276
277 if (CDSConfig::is_dumping_static_archive()) {
278 // To ensure deterministic contents in the static archive, we need to ensure that
279 // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
280 // the MetaspaceObjs are located originally, as they are copied sequentially into
281 // the archive during the iteration.
282 //
283 // The only issue here is that the symbol table and the system directories may be
284 // randomly ordered, so we copy the symbols and klasses into two arrays and sort
285 // them deterministically.
286 //
287 // During -Xshare:dump, the order of Symbol creation is strictly determined by
288 // the SharedClassListFile (class loading is done in a single thread and the JIT
289 // is disabled). Also, Symbols are allocated in monotonically increasing addresses
290 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
291 // ascending address order, we ensure that all Symbols are copied into deterministic
292 // locations in the archive.
293 //
294 // TODO: in the future, if we want to produce deterministic contents in the
295 // dynamic archive, we might need to sort the symbols alphabetically (also see
296 // DynamicArchiveBuilder::sort_methods()).
297 aot_log_info(aot)("Sorting symbols ... ");
298 _symbols->sort(compare_symbols_by_address);
299 sort_klasses();
300 }
301
302 AOTClassLinker::add_candidates();
303 }
304
305 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
306 if (a[0] < b[0]) {
307 return -1;
308 } else {
309 assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
310 return 1;
311 }
312 }
313
314 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
315 return a[0]->name()->fast_compare(b[0]->name());
316 }
317
318 void ArchiveBuilder::sort_klasses() {
319 aot_log_info(aot)("Sorting classes ... ");
320 _klasses->sort(compare_klass_by_name);
321 }
322
323 address ArchiveBuilder::reserve_buffer() {
324 // AOTCodeCache::max_aot_code_size() accounts for aot code region.
325 size_t buffer_size = LP64_ONLY(CompressedClassSpaceSize) NOT_LP64(256 * M) + AOTCodeCache::max_aot_code_size();
326 ReservedSpace rs = MemoryReserver::reserve(buffer_size,
327 AOTMetaspace::core_region_alignment(),
328 os::vm_page_size(),
329 mtNone);
330 if (!rs.is_reserved()) {
331 aot_log_error(aot)("Failed to reserve %zu bytes of output buffer.", buffer_size);
332 AOTMetaspace::unrecoverable_writing_error();
333 }
334
335 // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
336 // we are copying the class metadata into the buffer.
337 address buffer_bottom = (address)rs.base();
338 aot_log_info(aot)("Reserved output buffer space at " PTR_FORMAT " [%zu bytes]",
339 p2i(buffer_bottom), buffer_size);
340 _shared_rs = rs;
341
342 _buffer_bottom = buffer_bottom;
343
344 if (CDSConfig::is_dumping_static_archive()) {
345 _current_dump_region = &_pz_region;
346 } else {
347 _current_dump_region = &_rw_region;
348 }
349 _current_dump_region->init(&_shared_rs, &_shared_vs);
350
351 ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
352
353 // The bottom of the static archive should be mapped at this address by default.
354 _requested_static_archive_bottom = (address)AOTMetaspace::requested_base_address();
355
356 // The bottom of the archive (that I am writing now) should be mapped at this address by default.
357 address my_archive_requested_bottom;
358
359 if (CDSConfig::is_dumping_static_archive()) {
360 my_archive_requested_bottom = _requested_static_archive_bottom;
361 } else {
362 _mapped_static_archive_bottom = (address)MetaspaceObj::aot_metaspace_base();
363 _mapped_static_archive_top = (address)MetaspaceObj::aot_metaspace_top();
364 assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
365 size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
366
367 // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
368 _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
369 my_archive_requested_bottom = align_up(_requested_static_archive_top, AOTMetaspace::core_region_alignment());
370
371 _requested_dynamic_archive_bottom = my_archive_requested_bottom;
372 }
373
374 _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
375
376 address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
377 if (my_archive_requested_bottom < _requested_static_archive_bottom ||
378 my_archive_requested_top <= _requested_static_archive_bottom) {
379 // Size overflow.
380 aot_log_error(aot)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
381 aot_log_error(aot)("my_archive_requested_top = " INTPTR_FORMAT, p2i(my_archive_requested_top));
382 aot_log_error(aot)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
383 "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
384 AOTMetaspace::unrecoverable_writing_error();
385 }
386
387 if (CDSConfig::is_dumping_static_archive()) {
388 // We don't want any valid object to be at the very bottom of the archive.
389 // See ArchivePtrMarker::mark_pointer().
390 _pz_region.allocate(AOTMetaspace::protection_zone_size());
391 start_dump_region(&_rw_region);
392 }
393
394 return buffer_bottom;
395 }
396
397 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it) {
398 int num_symbols = _symbols->length();
399 for (int i = 0; i < num_symbols; i++) {
400 it->push(_symbols->adr_at(i));
401 }
402
403 int num_klasses = _klasses->length();
404 for (int i = 0; i < num_klasses; i++) {
405 it->push(_klasses->adr_at(i));
406 }
407
408 iterate_roots(it);
409 }
410
411 class GatherSortedSourceObjs : public MetaspaceClosure {
412 ArchiveBuilder* _builder;
413
414 public:
415 GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
416
417 virtual bool do_ref(Ref* ref, bool read_only) {
418 return _builder->gather_one_source_obj(ref, read_only);
419 }
420 };
421
422 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read_only) {
423 address src_obj = ref->obj();
424 if (src_obj == nullptr) {
425 return false;
426 }
427
428 remember_embedded_pointer_in_enclosing_obj(ref);
429 if (RegeneratedClasses::has_been_regenerated(src_obj)) {
430 // No need to copy it. We will later relocate it to point to the regenerated klass/method.
431 return false;
432 }
433
434 FollowMode follow_mode = get_follow_mode(ref);
435 SourceObjInfo src_info(ref, read_only, follow_mode);
436 bool created;
437 SourceObjInfo* p = _src_obj_table.put_if_absent(src_obj, src_info, &created);
438 if (created) {
439 if (_src_obj_table.maybe_grow()) {
440 log_info(aot, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
441 }
442 }
443
444 #ifdef ASSERT
445 if (ref->type() == MetaspaceClosureType::MethodType) {
446 Method* m = (Method*)ref->obj();
447 assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
448 "Should not archive methods in a class that has been regenerated");
449 }
450 #endif
451
452 if (ref->type() == MetaspaceClosureType::MethodDataType) {
453 MethodData* md = (MethodData*)ref->obj();
454 md->clean_method_data(false /* always_clean */);
455 }
456
457 assert(p->read_only() == src_info.read_only(), "must be");
458
459 if (created && src_info.should_copy()) {
460 if (read_only) {
461 _ro_src_objs.append(p);
462 } else {
463 _rw_src_objs.append(p);
464 }
465 return true; // Need to recurse into this ref only if we are copying it
466 } else {
467 return false;
468 }
469 }
470
471 void ArchiveBuilder::record_regenerated_object(address orig_src_obj, address regen_src_obj) {
472 // Record the fact that orig_src_obj has been replaced by regen_src_obj. All calls to get_buffered_addr(orig_src_obj)
473 // should return the same value as get_buffered_addr(regen_src_obj).
474 SourceObjInfo* p = _src_obj_table.get(regen_src_obj);
475 assert(p != nullptr, "regenerated object should always be dumped");
476 SourceObjInfo orig_src_info(orig_src_obj, p);
477 bool created;
478 _src_obj_table.put_if_absent(orig_src_obj, orig_src_info, &created);
479 assert(created, "We shouldn't have archived the original copy of a regenerated object");
480 }
481
482 // Remember that we have a pointer inside ref->enclosing_obj() that points to ref->obj()
483 void ArchiveBuilder::remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref) {
484 assert(ref->obj() != nullptr, "should have checked");
485
486 address enclosing_obj = ref->enclosing_obj();
487 if (enclosing_obj == nullptr) {
488 return;
489 }
490
491 // We are dealing with 3 addresses:
492 // address o = ref->obj(): We have found an object whose address is o.
493 // address* mpp = ref->mpp(): The object o is pointed to by a pointer whose address is mpp.
494 // I.e., (*mpp == o)
495 // enclosing_obj : If non-null, it is the object which has a field that points to o.
496 // mpp is the address if that field.
497 //
498 // Example: We have an array whose first element points to a Method:
499 // Method* o = 0x0000abcd;
500 // Array<Method*>* enclosing_obj = 0x00001000;
501 // enclosing_obj->at_put(0, o);
502 //
503 // We the MetaspaceClosure iterates on the very first element of this array, we have
504 // ref->obj() == 0x0000abcd (the Method)
505 // ref->mpp() == 0x00001008 (the location of the first element in the array)
506 // ref->enclosing_obj() == 0x00001000 (the Array that contains the Method)
507 //
508 // We use the above information to mark the bitmap to indicate that there's a pointer on address 0x00001008.
509 SourceObjInfo* src_info = _src_obj_table.get(enclosing_obj);
510 if (src_info == nullptr || !src_info->should_copy()) {
511 // source objects of point_to_it/set_to_null types are not copied
512 // so we don't need to remember their pointers.
513 } else {
514 if (src_info->read_only()) {
515 _ro_src_objs.remember_embedded_pointer(src_info, ref);
516 } else {
517 _rw_src_objs.remember_embedded_pointer(src_info, ref);
518 }
519 }
520 }
521
522 void ArchiveBuilder::gather_source_objs() {
523 ResourceMark rm;
524 aot_log_info(aot)("Gathering all archivable objects ... ");
525 gather_klasses_and_symbols();
526 GatherSortedSourceObjs doit(this);
527 iterate_sorted_roots(&doit);
528 doit.finish();
529 }
530
531 bool ArchiveBuilder::is_excluded(Klass* klass) {
532 if (klass->is_instance_klass()) {
533 InstanceKlass* ik = InstanceKlass::cast(klass);
534 return SystemDictionaryShared::is_excluded_class(ik);
535 } else if (klass->is_objArray_klass()) {
536 Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
537 if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache_static_region(bottom)) {
538 // The bottom class is in the static archive so it's clearly not excluded.
539 return false;
540 } else if (bottom->is_instance_klass()) {
541 return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
542 }
543 }
544
545 return false;
546 }
547
548 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
549 address obj = ref->obj();
550 if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
551 // Don't dump existing shared metadata again.
552 return point_to_it;
553 } else if (ref->type() == MetaspaceClosureType::MethodDataType ||
554 ref->type() == MetaspaceClosureType::MethodCountersType ||
555 ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
556 ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
557 ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
558 return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
559 } else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
560 return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
561 } else {
562 if (ref->type() == MetaspaceClosureType::ClassType) {
563 Klass* klass = (Klass*)ref->obj();
564 assert(klass->is_klass(), "must be");
565 if (RegeneratedClasses::has_been_regenerated(klass)) {
566 klass = RegeneratedClasses::get_regenerated_object(klass);
567 }
568 if (is_excluded(klass)) {
569 ResourceMark rm;
570 aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
571 return set_to_null;
572 }
573 if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
574 ResourceMark rm;
575 aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
576 return set_to_null;
577 }
578 }
579
580 return make_a_copy;
581 }
582 }
583
584 void ArchiveBuilder::start_dump_region(DumpRegion* next) {
585 current_dump_region()->pack(next);
586 _current_dump_region = next;
587 }
588
589 char* ArchiveBuilder::ro_strdup(const char* s) {
590 char* archived_str = ro_region_alloc((int)strlen(s) + 1);
591 strcpy(archived_str, s);
592 return archived_str;
593 }
594
595 // The objects that have embedded pointers will sink
596 // towards the end of the list. This ensures we have a maximum
597 // number of leading zero bits in the relocation bitmap.
598 int ArchiveBuilder::compare_src_objs(SourceObjInfo** a, SourceObjInfo** b) {
599 if ((*a)->has_embedded_pointer() && !(*b)->has_embedded_pointer()) {
600 return 1;
601 } else if (!(*a)->has_embedded_pointer() && (*b)->has_embedded_pointer()) {
602 return -1;
603 } else {
604 // This is necessary to keep the sorting order stable. Otherwise the
605 // archive's contents may not be deterministic.
606 return (*a)->id() - (*b)->id();
607 }
608 }
609
610 void ArchiveBuilder::sort_metadata_objs() {
611 _rw_src_objs.objs()->sort(compare_src_objs);
612 _ro_src_objs.objs()->sort(compare_src_objs);
613 }
614
615 void ArchiveBuilder::dump_rw_metadata() {
616 ResourceMark rm;
617 aot_log_info(aot)("Allocating RW objects ... ");
618 make_shallow_copies(&_rw_region, &_rw_src_objs);
619 }
620
621 void ArchiveBuilder::dump_ro_metadata() {
622 ResourceMark rm;
623 aot_log_info(aot)("Allocating RO objects ... ");
624
625 start_dump_region(&_ro_region);
626 make_shallow_copies(&_ro_region, &_ro_src_objs);
627 RegeneratedClasses::record_regenerated_objects();
628 }
629
630 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
631 const ArchiveBuilder::SourceObjList* src_objs) {
632 for (int i = 0; i < src_objs->objs()->length(); i++) {
633 make_shallow_copy(dump_region, src_objs->objs()->at(i));
634 }
635 aot_log_info(aot)("done (%d objects)", src_objs->objs()->length());
636 }
637
638 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
639 address src = src_info->source_addr();
640 int bytes = src_info->size_in_bytes(); // word-aligned
641 size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
642
643 char* oldtop = dump_region->top();
644 if (src_info->type() == MetaspaceClosureType::ClassType) {
645 // Allocate space for a pointer directly in front of the future InstanceKlass, so
646 // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
647 // without building another hashtable. See RunTimeClassInfo::get_for()
648 // in systemDictionaryShared.cpp.
649 Klass* klass = (Klass*)src;
650 if (klass->is_instance_klass()) {
651 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
652 dump_region->allocate(sizeof(address));
653 }
654 #ifdef _LP64
655 // More strict alignments needed for UseCompressedClassPointers
656 if (UseCompressedClassPointers) {
657 alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
658 }
659 #endif
660 } else if (src_info->type() == MetaspaceClosureType::SymbolType) {
661 // Symbols may be allocated by using AllocateHeap, so their sizes
662 // may be less than size_in_bytes() indicates.
663 bytes = ((Symbol*)src)->byte_size();
664 }
665
666 char* dest = dump_region->allocate(bytes, alignment);
667 memcpy(dest, src, bytes);
668
669 // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
670 if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
671 Symbol* buffered_symbol = (Symbol*)dest;
672 assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
673 buffered_symbol->update_identity_hash();
674 }
675
676 {
677 bool created;
678 _buffered_to_src_table.put_if_absent((address)dest, src, &created);
679 assert(created, "must be");
680 if (_buffered_to_src_table.maybe_grow()) {
681 log_info(aot, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
682 }
683 }
684
685 intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
686 if (archived_vtable != nullptr) {
687 *(address*)dest = (address)archived_vtable;
688 ArchivePtrMarker::mark_pointer((address*)dest);
689 }
690
691 log_trace(aot)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
692 src_info->set_buffered_addr((address)dest);
693
694 char* newtop = dump_region->top();
695 _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
696
697 DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
698 }
699
700 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
701 // not handled by MetaspaceClosure.
702 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
703 assert(is_in_buffer_space(ptr_location), "must be");
704 if (src_addr == nullptr) {
705 *ptr_location = nullptr;
706 ArchivePtrMarker::clear_pointer(ptr_location);
707 } else {
708 *ptr_location = get_buffered_addr(src_addr);
709 ArchivePtrMarker::mark_pointer(ptr_location);
710 }
711 }
712
713 void ArchiveBuilder::mark_and_relocate_to_buffered_addr(address* ptr_location) {
714 assert(*ptr_location != nullptr, "sanity");
715 if (!is_in_mapped_static_archive(*ptr_location)) {
716 *ptr_location = get_buffered_addr(*ptr_location);
717 }
718 ArchivePtrMarker::mark_pointer(ptr_location);
719 }
720
721 bool ArchiveBuilder::has_been_archived(address src_addr) const {
722 SourceObjInfo* p = _src_obj_table.get(src_addr);
723 if (p == nullptr) {
724 // This object has never been seen by ArchiveBuilder
725 return false;
726 }
727 if (p->buffered_addr() == nullptr) {
728 // ArchiveBuilder has seen this object, but decided not to archive it. So
729 // Any reference to this object will be modified to nullptr inside the buffer.
730 assert(p->follow_mode() == set_to_null, "must be");
731 return false;
732 }
733
734 DEBUG_ONLY({
735 // This is a class/method that belongs to one of the "original" classes that
736 // have been regenerated by lambdaFormInvokers.cpp. We must have archived
737 // the "regenerated" version of it.
738 if (RegeneratedClasses::has_been_regenerated(src_addr)) {
739 address regen_obj = RegeneratedClasses::get_regenerated_object(src_addr);
740 precond(regen_obj != nullptr && regen_obj != src_addr);
741 assert(has_been_archived(regen_obj), "must be");
742 assert(get_buffered_addr(src_addr) == get_buffered_addr(regen_obj), "must be");
743 }});
744
745 return true;
746 }
747
748 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
749 SourceObjInfo* p = _src_obj_table.get(src_addr);
750 assert(p != nullptr, "src_addr " INTPTR_FORMAT " is used but has not been archived",
751 p2i(src_addr));
752
753 return p->buffered_addr();
754 }
755
756 address ArchiveBuilder::get_source_addr(address buffered_addr) const {
757 assert(is_in_buffer_space(buffered_addr), "must be");
758 address* src_p = _buffered_to_src_table.get(buffered_addr);
759 assert(src_p != nullptr && *src_p != nullptr, "must be");
760 return *src_p;
761 }
762
763 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
764 for (int i = 0; i < src_objs->objs()->length(); i++) {
765 src_objs->relocate(i, this);
766 }
767 }
768
769 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
770 aot_log_info(aot)("Relocating embedded pointers in core regions ... ");
771 relocate_embedded_pointers(&_rw_src_objs);
772 relocate_embedded_pointers(&_ro_src_objs);
773 log_info(cds)("Relocating %zu pointers, %zu tagged, %zu nulled",
774 _relocated_ptr_info._num_ptrs,
775 _relocated_ptr_info._num_tagged_ptrs,
776 _relocated_ptr_info._num_nulled_ptrs);
777 }
778
779 #define ADD_COUNT(x) \
780 x += 1; \
781 x ## _a += aotlinked ? 1 : 0; \
782 x ## _i += inited ? 1 : 0;
783
784 #define DECLARE_INSTANCE_KLASS_COUNTER(x) \
785 int x = 0; \
786 int x ## _a = 0; \
787 int x ## _i = 0;
788
789 void ArchiveBuilder::make_klasses_shareable() {
790 DECLARE_INSTANCE_KLASS_COUNTER(num_instance_klasses);
791 DECLARE_INSTANCE_KLASS_COUNTER(num_boot_klasses);
792 DECLARE_INSTANCE_KLASS_COUNTER(num_vm_klasses);
793 DECLARE_INSTANCE_KLASS_COUNTER(num_platform_klasses);
794 DECLARE_INSTANCE_KLASS_COUNTER(num_app_klasses);
795 DECLARE_INSTANCE_KLASS_COUNTER(num_old_klasses);
796 DECLARE_INSTANCE_KLASS_COUNTER(num_hidden_klasses);
797 DECLARE_INSTANCE_KLASS_COUNTER(num_enum_klasses);
798 DECLARE_INSTANCE_KLASS_COUNTER(num_unregistered_klasses);
799 int num_unlinked_klasses = 0;
800 int num_obj_array_klasses = 0;
801 int num_type_array_klasses = 0;
802
803 int boot_unlinked = 0;
804 int platform_unlinked = 0;
805 int app_unlinked = 0;
806 int unreg_unlinked = 0;
807
808 for (int i = 0; i < klasses()->length(); i++) {
809 // Some of the code in ConstantPool::remove_unshareable_info() requires the classes
810 // to be in linked state, so it must be call here before the next loop, which returns
811 // all classes to unlinked state.
812 Klass* k = get_buffered_addr(klasses()->at(i));
813 if (k->is_instance_klass()) {
814 InstanceKlass::cast(k)->constants()->remove_unshareable_info();
815 }
816 }
817
818 for (int i = 0; i < klasses()->length(); i++) {
819 const char* type;
820 const char* unlinked = "";
821 const char* kind = "";
822 const char* hidden = "";
823 const char* old = "";
824 const char* generated = "";
825 const char* aotlinked_msg = "";
826 const char* inited_msg = "";
827 Klass* k = get_buffered_addr(klasses()->at(i));
828 bool inited = false;
829 k->remove_java_mirror();
830 #ifdef _LP64
831 if (UseCompactObjectHeaders) {
832 Klass* requested_k = to_requested(k);
833 address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
834 const int narrow_klass_shift = precomputed_narrow_klass_shift();
835 narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
836 k->set_prototype_header(markWord::prototype().set_narrow_klass(nk));
837 }
838 #endif //_LP64
839 if (k->is_objArray_klass()) {
840 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
841 // on their array classes.
842 num_obj_array_klasses ++;
843 type = "array";
844 } else if (k->is_typeArray_klass()) {
845 num_type_array_klasses ++;
846 type = "array";
847 k->remove_unshareable_info();
848 } else {
849 assert(k->is_instance_klass(), " must be");
850 InstanceKlass* ik = InstanceKlass::cast(k);
851 InstanceKlass* src_ik = get_source_addr(ik);
852 bool aotlinked = AOTClassLinker::is_candidate(src_ik);
853 inited = ik->has_aot_initialized_mirror();
854 ADD_COUNT(num_instance_klasses);
855 if (ik->is_hidden()) {
856 ADD_COUNT(num_hidden_klasses);
857 hidden = " hidden";
858 oop loader = k->class_loader();
859 if (loader == nullptr) {
860 type = "boot";
861 ADD_COUNT(num_boot_klasses);
862 } else if (loader == SystemDictionary::java_platform_loader()) {
863 type = "plat";
864 ADD_COUNT(num_platform_klasses);
865 } else if (loader == SystemDictionary::java_system_loader()) {
866 type = "app";
867 ADD_COUNT(num_app_klasses);
868 } else {
869 type = "bad";
870 assert(0, "shouldn't happen");
871 }
872 if (CDSConfig::is_dumping_method_handles()) {
873 assert(HeapShared::is_archivable_hidden_klass(ik), "sanity");
874 } else {
875 // Legacy CDS support for lambda proxies
876 CDS_JAVA_HEAP_ONLY(assert(HeapShared::is_lambda_proxy_klass(ik), "sanity");)
877 }
878 } else if (ik->defined_by_boot_loader()) {
879 type = "boot";
880 ADD_COUNT(num_boot_klasses);
881 } else if (ik->defined_by_platform_loader()) {
882 type = "plat";
883 ADD_COUNT(num_platform_klasses);
884 } else if (ik->defined_by_app_loader()) {
885 type = "app";
886 ADD_COUNT(num_app_klasses);
887 } else {
888 assert(ik->defined_by_other_loaders(), "must be");
889 type = "unreg";
890 ADD_COUNT(num_unregistered_klasses);
891 }
892
893 if (AOTClassLinker::is_vm_class(src_ik)) {
894 ADD_COUNT(num_vm_klasses);
895 }
896
897 if (!ik->is_linked()) {
898 num_unlinked_klasses ++;
899 unlinked = " unlinked";
900 if (ik->defined_by_boot_loader()) {
901 boot_unlinked ++;
902 } else if (ik->defined_by_platform_loader()) {
903 platform_unlinked ++;
904 } else if (ik->defined_by_app_loader()) {
905 app_unlinked ++;
906 } else {
907 unreg_unlinked ++;
908 }
909 }
910
911 if (ik->is_interface()) {
912 kind = " interface";
913 } else if (src_ik->is_enum_subclass()) {
914 kind = " enum";
915 ADD_COUNT(num_enum_klasses);
916 }
917
918 if (CDSConfig::is_old_class_for_verifier(ik)) {
919 ADD_COUNT(num_old_klasses);
920 old = " old";
921 }
922
923 if (ik->is_aot_generated_class()) {
924 generated = " generated";
925 }
926 if (aotlinked) {
927 aotlinked_msg = " aot-linked";
928 }
929 if (inited) {
930 if (InstanceKlass::cast(k)->static_field_size() == 0) {
931 inited_msg = " inited (no static fields)";
932 } else {
933 inited_msg = " inited";
934 }
935 }
936
937 AOTMetaspace::rewrite_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
938 ik->remove_unshareable_info();
939 }
940
941 if (aot_log_is_enabled(Debug, aot, class)) {
942 ResourceMark rm;
943 aot_log_debug(aot, class)("klasses[%5d] = " PTR_FORMAT " %-5s %s%s%s%s%s%s%s%s", i,
944 p2i(to_requested(k)), type, k->external_name(),
945 kind, hidden, old, unlinked, generated, aotlinked_msg, inited_msg);
946 }
947 }
948
949 #define STATS_FORMAT "= %5d, aot-linked = %5d, inited = %5d"
950 #define STATS_PARAMS(x) num_ ## x, num_ ## x ## _a, num_ ## x ## _i
951
952 aot_log_info(aot)("Number of classes %d", num_instance_klasses + num_obj_array_klasses + num_type_array_klasses);
953 aot_log_info(aot)(" instance classes " STATS_FORMAT, STATS_PARAMS(instance_klasses));
954 aot_log_info(aot)(" boot " STATS_FORMAT, STATS_PARAMS(boot_klasses));
955 aot_log_info(aot)(" vm " STATS_FORMAT, STATS_PARAMS(vm_klasses));
956 aot_log_info(aot)(" platform " STATS_FORMAT, STATS_PARAMS(platform_klasses));
957 aot_log_info(aot)(" app " STATS_FORMAT, STATS_PARAMS(app_klasses));
958 aot_log_info(aot)(" unregistered " STATS_FORMAT, STATS_PARAMS(unregistered_klasses));
959 aot_log_info(aot)(" (enum) " STATS_FORMAT, STATS_PARAMS(enum_klasses));
960 aot_log_info(aot)(" (hidden) " STATS_FORMAT, STATS_PARAMS(hidden_klasses));
961 aot_log_info(aot)(" (old) " STATS_FORMAT, STATS_PARAMS(old_klasses));
962 aot_log_info(aot)(" (unlinked) = %5d, boot = %d, plat = %d, app = %d, unreg = %d",
963 num_unlinked_klasses, boot_unlinked, platform_unlinked, app_unlinked, unreg_unlinked);
964 aot_log_info(aot)(" obj array classes = %5d", num_obj_array_klasses);
965 aot_log_info(aot)(" type array classes = %5d", num_type_array_klasses);
966 aot_log_info(aot)(" symbols = %5d", _symbols->length());
967
968 #undef STATS_FORMAT
969 #undef STATS_PARAMS
970 }
971
972 void ArchiveBuilder::make_training_data_shareable() {
973 auto clean_td = [&] (address& src_obj, SourceObjInfo& info) {
974 if (!is_in_buffer_space(info.buffered_addr())) {
975 return;
976 }
977
978 if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
979 info.type() == MetaspaceClosureType::MethodTrainingDataType ||
980 info.type() == MetaspaceClosureType::CompileTrainingDataType) {
981 TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
982 buffered_td->remove_unshareable_info();
983 } else if (info.type() == MetaspaceClosureType::MethodDataType) {
984 MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
985 buffered_mdo->remove_unshareable_info();
986 } else if (info.type() == MetaspaceClosureType::MethodCountersType) {
987 MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
988 buffered_mc->remove_unshareable_info();
989 }
990 };
991 _src_obj_table.iterate_all(clean_td);
992 }
993
994 size_t ArchiveBuilder::buffer_to_offset(address p) const {
995 address requested_p = to_requested(p);
996 return pointer_delta(requested_p, _requested_static_archive_bottom, 1);
997 }
998
999 size_t ArchiveBuilder::any_to_offset(address p) const {
1000 if (is_in_mapped_static_archive(p)) {
1001 assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
1002 return pointer_delta(p, _mapped_static_archive_bottom, 1);
1003 }
1004 if (!is_in_buffer_space(p)) {
1005 // p must be a "source" address
1006 p = get_buffered_addr(p);
1007 }
1008 return buffer_to_offset(p);
1009 }
1010
1011 address ArchiveBuilder::offset_to_buffered_address(size_t offset) const {
1012 address requested_addr = _requested_static_archive_bottom + offset;
1013 address buffered_addr = requested_addr - _buffer_to_requested_delta;
1014 assert(is_in_buffer_space(buffered_addr), "bad offset");
1015 return buffered_addr;
1016 }
1017
1018 void ArchiveBuilder::start_ac_region() {
1019 ro_region()->pack();
1020 start_dump_region(&_ac_region);
1021 }
1022
1023 void ArchiveBuilder::end_ac_region() {
1024 _ac_region.pack();
1025 }
1026
1027 #if INCLUDE_CDS_JAVA_HEAP
1028 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
1029 assert(CDSConfig::is_dumping_heap(), "sanity");
1030 k = get_buffered_klass(k);
1031 Klass* requested_k = to_requested(k);
1032 const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
1033 #ifdef ASSERT
1034 const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift));
1035 assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k));
1036 #endif
1037 address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
1038 // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any
1039 // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting.
1040 return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
1041 }
1042 #endif // INCLUDE_CDS_JAVA_HEAP
1043
1044 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
1045 // so that the archive can be mapped to the "requested" location without runtime relocation.
1046 //
1047 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
1048 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
1049 // - Every pointer must have one of the following values:
1050 // [a] nullptr:
1051 // No relocation is needed. Remove this pointer from ptrmap so we don't need to
1052 // consider it at runtime.
1053 // [b] Points into an object X which is inside the buffer:
1054 // Adjust this pointer by _buffer_to_requested_delta, so it points to X
1055 // when the archive is mapped at the requested location.
1056 // [c] Points into an object Y which is inside mapped static archive:
1057 // - This happens only during dynamic dump
1058 // - Adjust this pointer by _mapped_to_requested_static_archive_delta,
1059 // so it points to Y when the static archive is mapped at the requested location.
1060 template <bool STATIC_DUMP>
1061 class RelocateBufferToRequested : public BitMapClosure {
1062 ArchiveBuilder* _builder;
1063 address _buffer_bottom;
1064 intx _buffer_to_requested_delta;
1065 intx _mapped_to_requested_static_archive_delta;
1066 size_t _max_non_null_offset;
1067
1068 public:
1069 RelocateBufferToRequested(ArchiveBuilder* builder) {
1070 _builder = builder;
1071 _buffer_bottom = _builder->buffer_bottom();
1072 _buffer_to_requested_delta = builder->buffer_to_requested_delta();
1073 _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
1074 _max_non_null_offset = 0;
1075
1076 address bottom = _builder->buffer_bottom();
1077 address top = _builder->buffer_top();
1078 address new_bottom = bottom + _buffer_to_requested_delta;
1079 address new_top = top + _buffer_to_requested_delta;
1080 aot_log_debug(aot)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
1081 "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1082 p2i(bottom), p2i(top),
1083 p2i(new_bottom), p2i(new_top));
1084 }
1085
1086 bool do_bit(size_t offset) {
1087 address* p = (address*)_buffer_bottom + offset;
1088 assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
1089
1090 if (*p == nullptr) {
1091 // todo -- clear bit, etc
1092 ArchivePtrMarker::ptrmap()->clear_bit(offset);
1093 } else {
1094 if (STATIC_DUMP) {
1095 assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
1096 *p += _buffer_to_requested_delta;
1097 assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
1098 } else {
1099 if (_builder->is_in_buffer_space(*p)) {
1100 *p += _buffer_to_requested_delta;
1101 // assert is in requested dynamic archive
1102 } else {
1103 assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
1104 *p += _mapped_to_requested_static_archive_delta;
1105 assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
1106 }
1107 }
1108 _max_non_null_offset = offset;
1109 }
1110
1111 return true; // keep iterating
1112 }
1113
1114 void doit() {
1115 ArchivePtrMarker::ptrmap()->iterate(this);
1116 ArchivePtrMarker::compact(_max_non_null_offset);
1117 }
1118 };
1119
1120 #ifdef _LP64
1121 int ArchiveBuilder::precomputed_narrow_klass_shift() {
1122 // Legacy Mode:
1123 // We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
1124 // CompactObjectHeader Mode:
1125 // narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
1126 // Klass encoding range.
1127 //
1128 // Note that all of this may change in the future, if we decide to correct the pre-calculated
1129 // narrow Klass IDs at archive load time.
1130 assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
1131 return UseCompactObjectHeaders ? CompressedKlassPointers::max_shift() : 0;
1132 }
1133 #endif // _LP64
1134
1135 void ArchiveBuilder::relocate_to_requested() {
1136 if (!ro_region()->is_packed()) {
1137 ro_region()->pack();
1138 }
1139 size_t my_archive_size = buffer_top() - buffer_bottom();
1140
1141 if (CDSConfig::is_dumping_static_archive()) {
1142 _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
1143 RelocateBufferToRequested<true> patcher(this);
1144 patcher.doit();
1145 } else {
1146 assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
1147 _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
1148 RelocateBufferToRequested<false> patcher(this);
1149 patcher.doit();
1150 }
1151 }
1152
1153 void ArchiveBuilder::print_stats() {
1154 _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1155 }
1156
1157 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) {
1158 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1159 // AOTMetaspace::n_regions (internal to hotspot).
1160 assert(NUM_CDS_REGIONS == AOTMetaspace::n_regions, "sanity");
1161
1162 ResourceMark rm;
1163
1164 write_region(mapinfo, AOTMetaspace::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1165 write_region(mapinfo, AOTMetaspace::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1166 write_region(mapinfo, AOTMetaspace::ac, &_ac_region, /*read_only=*/false,/*allow_exec=*/false);
1167
1168 // Split pointer map into read-write and read-only bitmaps
1169 ArchivePtrMarker::initialize_rw_ro_maps(&_rw_ptrmap, &_ro_ptrmap);
1170
1171 size_t bitmap_size_in_bytes;
1172 char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::rw_ptrmap(),
1173 ArchivePtrMarker::ro_ptrmap(),
1174 mapped_heap_info,
1175 streamed_heap_info,
1176 bitmap_size_in_bytes);
1177
1178 if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
1179 _total_heap_region_size = mapinfo->write_mapped_heap_region(mapped_heap_info);
1180 } else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
1181 _total_heap_region_size = mapinfo->write_streamed_heap_region(streamed_heap_info);
1182 }
1183
1184 print_region_stats(mapinfo, mapped_heap_info, streamed_heap_info);
1185
1186 mapinfo->set_requested_base((char*)AOTMetaspace::requested_base_address());
1187 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1188 // After this point, we should not write any data into mapinfo->header() since this
1189 // would corrupt its checksum we have calculated before.
1190 mapinfo->write_header();
1191 mapinfo->close();
1192
1193 if (log_is_enabled(Info, aot)) {
1194 log_info(aot)("Full module graph = %s", CDSConfig::is_dumping_full_module_graph() ? "enabled" : "disabled");
1195 print_stats();
1196 }
1197
1198 if (log_is_enabled(Info, aot, map)) {
1199 AOTMapLogger::dumptime_log(this, mapinfo, mapped_heap_info, streamed_heap_info, bitmap, bitmap_size_in_bytes);
1200 }
1201 CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
1202 FREE_C_HEAP_ARRAY(char, bitmap);
1203 }
1204
1205 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
1206 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1207 }
1208
1209 void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) {
1210 _relocated_ptr_info._num_ptrs ++;
1211 _relocated_ptr_info._num_tagged_ptrs += tagged ? 1 : 0;
1212 _relocated_ptr_info._num_nulled_ptrs += nulled ? 1 : 0;
1213 }
1214
1215 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
1216 AOTMappedHeapInfo* mapped_heap_info,
1217 AOTStreamedHeapInfo* streamed_heap_info) {
1218 // Print statistics of all the regions
1219 const size_t bitmap_used = mapinfo->region_at(AOTMetaspace::bm)->used();
1220 const size_t bitmap_reserved = mapinfo->region_at(AOTMetaspace::bm)->used_aligned();
1221 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1222 bitmap_reserved +
1223 _total_heap_region_size;
1224 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1225 bitmap_used +
1226 _total_heap_region_size;
1227 const double total_u_perc = percent_of(total_bytes, total_reserved);
1228
1229 _rw_region.print(total_reserved);
1230 _ro_region.print(total_reserved);
1231 _ac_region.print(total_reserved);
1232
1233 print_bitmap_region_stats(bitmap_used, total_reserved);
1234
1235 if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
1236 print_heap_region_stats(mapped_heap_info->buffer_start(), mapped_heap_info->buffer_byte_size(), total_reserved);
1237 } else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
1238 print_heap_region_stats(streamed_heap_info->buffer_start(), streamed_heap_info->buffer_byte_size(), total_reserved);
1239 }
1240
1241 aot_log_debug(aot)("total : %9zu [100.0%% of total] out of %9zu bytes [%5.1f%% used]",
1242 total_bytes, total_reserved, total_u_perc);
1243 }
1244
1245 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1246 aot_log_debug(aot)("bm space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used]",
1247 size, size/double(total_size)*100.0, size);
1248 }
1249
1250 void ArchiveBuilder::print_heap_region_stats(char* start, size_t size, size_t total_size) {
1251 char* top = start + size;
1252 aot_log_debug(aot)("hp space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used] at " INTPTR_FORMAT,
1253 size, size/double(total_size)*100.0, size, p2i(start));
1254 }
1255
1256 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1257 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1258 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1259 // or so.
1260 _rw_region.print_out_of_space_msg(name, needed_bytes);
1261 _ro_region.print_out_of_space_msg(name, needed_bytes);
1262
1263 log_error(aot)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1264 AOTMetaspace::unrecoverable_writing_error();
1265 }