1 /*
2 * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassLinker.hpp"
27 #include "cds/aotLogging.hpp"
28 #include "cds/aotMapLogger.hpp"
29 #include "cds/aotMetaspace.hpp"
30 #include "cds/archiveBuilder.hpp"
31 #include "cds/archiveUtils.hpp"
32 #include "cds/cdsConfig.hpp"
33 #include "cds/cppVtables.hpp"
34 #include "cds/dumpAllocStats.hpp"
35 #include "cds/dynamicArchive.hpp"
36 #include "cds/heapShared.hpp"
37 #include "cds/regeneratedClasses.hpp"
38 #include "classfile/classLoader.hpp"
39 #include "classfile/classLoaderDataShared.hpp"
40 #include "classfile/javaClasses.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionaryShared.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "code/aotCodeCache.hpp"
45 #include "interpreter/abstractInterpreter.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/allStatic.hpp"
49 #include "memory/memoryReserver.hpp"
50 #include "memory/memRegion.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "oops/compressedKlass.inline.hpp"
53 #include "oops/instanceKlass.hpp"
54 #include "oops/methodCounters.hpp"
55 #include "oops/methodData.hpp"
56 #include "oops/objArrayKlass.hpp"
57 #include "oops/objArrayOop.inline.hpp"
58 #include "oops/oopHandle.inline.hpp"
59 #include "oops/trainingData.hpp"
60 #include "runtime/arguments.hpp"
61 #include "runtime/globals_extension.hpp"
62 #include "runtime/javaThread.hpp"
63 #include "runtime/sharedRuntime.hpp"
64 #include "utilities/align.hpp"
65 #include "utilities/bitMap.inline.hpp"
66 #include "utilities/formatBuffer.hpp"
67
68 ArchiveBuilder* ArchiveBuilder::_current = nullptr;
69
70 ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
71 char* newtop = ArchiveBuilder::current()->_ro_region.top();
72 ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
73 }
74
75 ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, mtClassShared) {
76 _total_bytes = 0;
77 _objs = new (mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
78 }
79
80 ArchiveBuilder::SourceObjList::~SourceObjList() {
81 delete _objs;
82 }
83
84 void ArchiveBuilder::SourceObjList::append(SourceObjInfo* src_info) {
85 // Save this source object for copying
86 src_info->set_id(_objs->length());
87 _objs->append(src_info);
88
89 // Prepare for marking the pointers in this source object
90 assert(is_aligned(_total_bytes, sizeof(address)), "must be");
91 src_info->set_ptrmap_start(_total_bytes / sizeof(address));
92 _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
93 src_info->set_ptrmap_end(_total_bytes / sizeof(address));
94
95 BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
96 if (_ptrmap.size() <= bitmap_size_needed) {
97 _ptrmap.resize((bitmap_size_needed + 1) * 2);
98 }
99 }
100
101 void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
102 // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
103 // so that we can copy/relocate it later.
104 src_info->set_has_embedded_pointer();
105 address src_obj = src_info->source_addr();
106 address* field_addr = ref->addr();
107 assert(src_info->ptrmap_start() < _total_bytes, "sanity");
108 assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
109 assert(*field_addr != nullptr, "should have checked");
110
111 intx field_offset_in_bytes = ((address)field_addr) - src_obj;
112 DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
113 assert(field_offset_in_bytes >= 0, "must be");
114 assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
115 assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
116
117 BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
118 _ptrmap.set_bit(BitMap::idx_t(idx));
119 }
120
121 class RelocateEmbeddedPointers : public BitMapClosure {
122 ArchiveBuilder* _builder;
123 address _buffered_obj;
124 BitMap::idx_t _start_idx;
125 public:
126 RelocateEmbeddedPointers(ArchiveBuilder* builder, address buffered_obj, BitMap::idx_t start_idx) :
127 _builder(builder), _buffered_obj(buffered_obj), _start_idx(start_idx) {}
128
129 bool do_bit(BitMap::idx_t bit_offset) {
130 size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
131 address* ptr_loc = (address*)(_buffered_obj + field_offset);
132
133 address old_p_with_tags = *ptr_loc;
134 assert(old_p_with_tags != nullptr, "null ptrs shouldn't have been marked");
135
136 address old_p = MetaspaceClosure::strip_tags(old_p_with_tags);
137 uintx tags = MetaspaceClosure::decode_tags(old_p_with_tags);
138 address new_p = _builder->get_buffered_addr(old_p);
139
140 bool nulled;
141 if (new_p == nullptr) {
142 // old_p had a FollowMode of set_to_null
143 nulled = true;
144 } else {
145 new_p = MetaspaceClosure::add_tags(new_p, tags);
146 nulled = false;
147 }
148
149 log_trace(aot)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT " %zu",
150 p2i(ptr_loc), p2i(old_p) + tags, p2i(new_p), tags);
151
152 ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
153 ArchiveBuilder::current()->count_relocated_pointer(tags != 0, nulled);
154 return true; // keep iterating the bitmap
155 }
156 };
157
158 void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
159 SourceObjInfo* src_info = objs()->at(i);
160 assert(src_info->should_copy(), "must be");
161 BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
162 BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end()); // exclusive
163
164 RelocateEmbeddedPointers relocator(builder, src_info->buffered_addr(), start);
165 _ptrmap.iterate(&relocator, start, end);
166 }
167
168 ArchiveBuilder::ArchiveBuilder() :
169 _current_dump_region(nullptr),
170 _buffer_bottom(nullptr),
171 _requested_static_archive_bottom(nullptr),
172 _requested_static_archive_top(nullptr),
173 _requested_dynamic_archive_bottom(nullptr),
174 _requested_dynamic_archive_top(nullptr),
175 _mapped_static_archive_bottom(nullptr),
176 _mapped_static_archive_top(nullptr),
177 _buffer_to_requested_delta(0),
178 _pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive.
179 _rw_region("rw", MAX_SHARED_DELTA),
180 _ro_region("ro", MAX_SHARED_DELTA),
181 _ac_region("ac", MAX_SHARED_DELTA),
182 _ptrmap(mtClassShared),
183 _rw_ptrmap(mtClassShared),
184 _ro_ptrmap(mtClassShared),
185 _rw_src_objs(),
186 _ro_src_objs(),
187 _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
188 _buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
189 _total_heap_region_size(0)
190 {
191 _klasses = new (mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
192 _symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
193 _entropy_seed = 0x12345678;
194 _relocated_ptr_info._num_ptrs = 0;
195 _relocated_ptr_info._num_tagged_ptrs = 0;
196 _relocated_ptr_info._num_nulled_ptrs = 0;
197 assert(_current == nullptr, "must be");
198 _current = this;
199 }
200
201 ArchiveBuilder::~ArchiveBuilder() {
202 assert(_current == this, "must be");
203 _current = nullptr;
204
205 for (int i = 0; i < _symbols->length(); i++) {
206 _symbols->at(i)->decrement_refcount();
207 }
208
209 delete _klasses;
210 delete _symbols;
211 if (_shared_rs.is_reserved()) {
212 MemoryReserver::release(_shared_rs);
213 }
214
215 AOTArtifactFinder::dispose();
216 }
217
218 // Returns a deterministic sequence of pseudo random numbers. The main purpose is NOT
219 // for randomness but to get good entropy for the identity_hash() of archived Symbols,
220 // while keeping the contents of static CDS archives deterministic to ensure
221 // reproducibility of JDK builds.
222 int ArchiveBuilder::entropy() {
223 assert(SafepointSynchronize::is_at_safepoint(), "needed to ensure deterministic sequence");
224 _entropy_seed = os::next_random(_entropy_seed);
225 return static_cast<int>(_entropy_seed);
226 }
227
228 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
229 ArchiveBuilder* _builder;
230
231 public:
232 GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
233
234 virtual bool do_unique_ref(Ref* ref, bool read_only) {
235 return _builder->gather_klass_and_symbol(ref, read_only);
236 }
237 };
238
239 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
240 if (ref->obj() == nullptr) {
241 return false;
242 }
243 if (get_follow_mode(ref) != make_a_copy) {
244 return false;
245 }
246 if (ref->msotype() == MetaspaceObj::ClassType) {
247 Klass* klass = (Klass*)ref->obj();
248 assert(klass->is_klass(), "must be");
249 if (!is_excluded(klass)) {
250 _klasses->append(klass);
251 if (klass->is_hidden()) {
252 assert(klass->is_instance_klass(), "must be");
253 }
254 }
255 } else if (ref->msotype() == MetaspaceObj::SymbolType) {
256 // Make sure the symbol won't be GC'ed while we are dumping the archive.
257 Symbol* sym = (Symbol*)ref->obj();
258 sym->increment_refcount();
259 _symbols->append(sym);
260 }
261
262 return true; // recurse
263 }
264
265 void ArchiveBuilder::gather_klasses_and_symbols() {
266 ResourceMark rm;
267
268 AOTArtifactFinder::initialize();
269 AOTArtifactFinder::find_artifacts();
270
271 aot_log_info(aot)("Gathering classes and symbols ... ");
272 GatherKlassesAndSymbols doit(this);
273 iterate_roots(&doit);
274 #if INCLUDE_CDS_JAVA_HEAP
275 if (CDSConfig::is_dumping_full_module_graph()) {
276 ClassLoaderDataShared::iterate_symbols(&doit);
277 }
278 #endif
279 doit.finish();
280
281 if (CDSConfig::is_dumping_static_archive()) {
282 // To ensure deterministic contents in the static archive, we need to ensure that
283 // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
284 // the MetaspaceObjs are located originally, as they are copied sequentially into
285 // the archive during the iteration.
286 //
287 // The only issue here is that the symbol table and the system directories may be
288 // randomly ordered, so we copy the symbols and klasses into two arrays and sort
289 // them deterministically.
290 //
291 // During -Xshare:dump, the order of Symbol creation is strictly determined by
292 // the SharedClassListFile (class loading is done in a single thread and the JIT
293 // is disabled). Also, Symbols are allocated in monotonically increasing addresses
294 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
295 // ascending address order, we ensure that all Symbols are copied into deterministic
296 // locations in the archive.
297 //
298 // TODO: in the future, if we want to produce deterministic contents in the
299 // dynamic archive, we might need to sort the symbols alphabetically (also see
300 // DynamicArchiveBuilder::sort_methods()).
301 aot_log_info(aot)("Sorting symbols ... ");
302 _symbols->sort(compare_symbols_by_address);
303 sort_klasses();
304 }
305
306 AOTClassLinker::add_candidates();
307 }
308
309 int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
310 if (a[0] < b[0]) {
311 return -1;
312 } else {
313 assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
314 return 1;
315 }
316 }
317
318 int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
319 return a[0]->name()->fast_compare(b[0]->name());
320 }
321
322 void ArchiveBuilder::sort_klasses() {
323 aot_log_info(aot)("Sorting classes ... ");
324 _klasses->sort(compare_klass_by_name);
325 }
326
327 address ArchiveBuilder::reserve_buffer() {
328 // AOTCodeCache::max_aot_code_size() accounts for aot code region.
329 size_t buffer_size = LP64_ONLY(CompressedClassSpaceSize) NOT_LP64(256 * M) + AOTCodeCache::max_aot_code_size();
330 ReservedSpace rs = MemoryReserver::reserve(buffer_size,
331 AOTMetaspace::core_region_alignment(),
332 os::vm_page_size(),
333 mtNone);
334 if (!rs.is_reserved()) {
335 aot_log_error(aot)("Failed to reserve %zu bytes of output buffer.", buffer_size);
336 AOTMetaspace::unrecoverable_writing_error();
337 }
338
339 // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
340 // we are copying the class metadata into the buffer.
341 address buffer_bottom = (address)rs.base();
342 aot_log_info(aot)("Reserved output buffer space at " PTR_FORMAT " [%zu bytes]",
343 p2i(buffer_bottom), buffer_size);
344 _shared_rs = rs;
345
346 _buffer_bottom = buffer_bottom;
347
348 if (CDSConfig::is_dumping_static_archive()) {
349 _current_dump_region = &_pz_region;
350 } else {
351 _current_dump_region = &_rw_region;
352 }
353 _current_dump_region->init(&_shared_rs, &_shared_vs);
354
355 ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
356
357 // The bottom of the static archive should be mapped at this address by default.
358 _requested_static_archive_bottom = (address)AOTMetaspace::requested_base_address();
359
360 // The bottom of the archive (that I am writing now) should be mapped at this address by default.
361 address my_archive_requested_bottom;
362
363 if (CDSConfig::is_dumping_static_archive()) {
364 my_archive_requested_bottom = _requested_static_archive_bottom;
365 } else {
366 _mapped_static_archive_bottom = (address)MetaspaceObj::aot_metaspace_base();
367 _mapped_static_archive_top = (address)MetaspaceObj::aot_metaspace_top();
368 assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
369 size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
370
371 // At run time, we will mmap the dynamic archive at my_archive_requested_bottom
372 _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
373 my_archive_requested_bottom = align_up(_requested_static_archive_top, AOTMetaspace::core_region_alignment());
374
375 _requested_dynamic_archive_bottom = my_archive_requested_bottom;
376 }
377
378 _buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
379
380 address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
381 if (my_archive_requested_bottom < _requested_static_archive_bottom ||
382 my_archive_requested_top <= _requested_static_archive_bottom) {
383 // Size overflow.
384 aot_log_error(aot)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
385 aot_log_error(aot)("my_archive_requested_top = " INTPTR_FORMAT, p2i(my_archive_requested_top));
386 aot_log_error(aot)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
387 "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
388 AOTMetaspace::unrecoverable_writing_error();
389 }
390
391 if (CDSConfig::is_dumping_static_archive()) {
392 // We don't want any valid object to be at the very bottom of the archive.
393 // See ArchivePtrMarker::mark_pointer().
394 _pz_region.allocate(AOTMetaspace::protection_zone_size());
395 start_dump_region(&_rw_region);
396 }
397
398 return buffer_bottom;
399 }
400
401 void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it) {
402 int num_symbols = _symbols->length();
403 for (int i = 0; i < num_symbols; i++) {
404 it->push(_symbols->adr_at(i));
405 }
406
407 int num_klasses = _klasses->length();
408 for (int i = 0; i < num_klasses; i++) {
409 it->push(_klasses->adr_at(i));
410 }
411
412 iterate_roots(it);
413 }
414
415 class GatherSortedSourceObjs : public MetaspaceClosure {
416 ArchiveBuilder* _builder;
417
418 public:
419 GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
420
421 virtual bool do_ref(Ref* ref, bool read_only) {
422 return _builder->gather_one_source_obj(ref, read_only);
423 }
424 };
425
426 bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read_only) {
427 address src_obj = ref->obj();
428 if (src_obj == nullptr) {
429 return false;
430 }
431
432 remember_embedded_pointer_in_enclosing_obj(ref);
433 if (RegeneratedClasses::has_been_regenerated(src_obj)) {
434 // No need to copy it. We will later relocate it to point to the regenerated klass/method.
435 return false;
436 }
437
438 FollowMode follow_mode = get_follow_mode(ref);
439 SourceObjInfo src_info(ref, read_only, follow_mode);
440 bool created;
441 SourceObjInfo* p = _src_obj_table.put_if_absent(src_obj, src_info, &created);
442 if (created) {
443 if (_src_obj_table.maybe_grow()) {
444 log_info(aot, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
445 }
446 }
447
448 #ifdef ASSERT
449 if (ref->msotype() == MetaspaceObj::MethodType) {
450 Method* m = (Method*)ref->obj();
451 assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
452 "Should not archive methods in a class that has been regenerated");
453 }
454 #endif
455
456 if (ref->msotype() == MetaspaceObj::MethodDataType) {
457 MethodData* md = (MethodData*)ref->obj();
458 md->clean_method_data(false /* always_clean */);
459 }
460
461 assert(p->read_only() == src_info.read_only(), "must be");
462
463 if (created && src_info.should_copy()) {
464 if (read_only) {
465 _ro_src_objs.append(p);
466 } else {
467 _rw_src_objs.append(p);
468 }
469 return true; // Need to recurse into this ref only if we are copying it
470 } else {
471 return false;
472 }
473 }
474
475 void ArchiveBuilder::record_regenerated_object(address orig_src_obj, address regen_src_obj) {
476 // Record the fact that orig_src_obj has been replaced by regen_src_obj. All calls to get_buffered_addr(orig_src_obj)
477 // should return the same value as get_buffered_addr(regen_src_obj).
478 SourceObjInfo* p = _src_obj_table.get(regen_src_obj);
479 assert(p != nullptr, "regenerated object should always be dumped");
480 SourceObjInfo orig_src_info(orig_src_obj, p);
481 bool created;
482 _src_obj_table.put_if_absent(orig_src_obj, orig_src_info, &created);
483 assert(created, "We shouldn't have archived the original copy of a regenerated object");
484 }
485
486 // Remember that we have a pointer inside ref->enclosing_obj() that points to ref->obj()
487 void ArchiveBuilder::remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref) {
488 assert(ref->obj() != nullptr, "should have checked");
489
490 address enclosing_obj = ref->enclosing_obj();
491 if (enclosing_obj == nullptr) {
492 return;
493 }
494
495 // We are dealing with 3 addresses:
496 // address o = ref->obj(): We have found an object whose address is o.
497 // address* mpp = ref->mpp(): The object o is pointed to by a pointer whose address is mpp.
498 // I.e., (*mpp == o)
499 // enclosing_obj : If non-null, it is the object which has a field that points to o.
500 // mpp is the address if that field.
501 //
502 // Example: We have an array whose first element points to a Method:
503 // Method* o = 0x0000abcd;
504 // Array<Method*>* enclosing_obj = 0x00001000;
505 // enclosing_obj->at_put(0, o);
506 //
507 // We the MetaspaceClosure iterates on the very first element of this array, we have
508 // ref->obj() == 0x0000abcd (the Method)
509 // ref->mpp() == 0x00001008 (the location of the first element in the array)
510 // ref->enclosing_obj() == 0x00001000 (the Array that contains the Method)
511 //
512 // We use the above information to mark the bitmap to indicate that there's a pointer on address 0x00001008.
513 SourceObjInfo* src_info = _src_obj_table.get(enclosing_obj);
514 if (src_info == nullptr || !src_info->should_copy()) {
515 // source objects of point_to_it/set_to_null types are not copied
516 // so we don't need to remember their pointers.
517 } else {
518 if (src_info->read_only()) {
519 _ro_src_objs.remember_embedded_pointer(src_info, ref);
520 } else {
521 _rw_src_objs.remember_embedded_pointer(src_info, ref);
522 }
523 }
524 }
525
526 void ArchiveBuilder::gather_source_objs() {
527 ResourceMark rm;
528 aot_log_info(aot)("Gathering all archivable objects ... ");
529 gather_klasses_and_symbols();
530 GatherSortedSourceObjs doit(this);
531 iterate_sorted_roots(&doit);
532 doit.finish();
533 }
534
535 bool ArchiveBuilder::is_excluded(Klass* klass) {
536 if (klass->is_instance_klass()) {
537 InstanceKlass* ik = InstanceKlass::cast(klass);
538 return SystemDictionaryShared::is_excluded_class(ik);
539 } else if (klass->is_objArray_klass()) {
540 Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
541 if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache_static_region(bottom)) {
542 // The bottom class is in the static archive so it's clearly not excluded.
543 return false;
544 } else if (bottom->is_instance_klass()) {
545 return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
546 }
547 }
548
549 return false;
550 }
551
552 ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
553 address obj = ref->obj();
554 if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
555 // Don't dump existing shared metadata again.
556 return point_to_it;
557 } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
558 ref->msotype() == MetaspaceObj::MethodCountersType ||
559 ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
560 ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
561 ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
562 return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
563 } else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
564 return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
565 } else {
566 if (ref->msotype() == MetaspaceObj::ClassType) {
567 Klass* klass = (Klass*)ref->obj();
568 assert(klass->is_klass(), "must be");
569 if (RegeneratedClasses::has_been_regenerated(klass)) {
570 klass = RegeneratedClasses::get_regenerated_object(klass);
571 }
572 if (is_excluded(klass)) {
573 ResourceMark rm;
574 aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
575 return set_to_null;
576 }
577 if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
578 ResourceMark rm;
579 aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
580 return set_to_null;
581 }
582 }
583
584 return make_a_copy;
585 }
586 }
587
588 void ArchiveBuilder::start_dump_region(DumpRegion* next) {
589 current_dump_region()->pack(next);
590 _current_dump_region = next;
591 }
592
593 char* ArchiveBuilder::ro_strdup(const char* s) {
594 char* archived_str = ro_region_alloc((int)strlen(s) + 1);
595 strcpy(archived_str, s);
596 return archived_str;
597 }
598
599 // The objects that have embedded pointers will sink
600 // towards the end of the list. This ensures we have a maximum
601 // number of leading zero bits in the relocation bitmap.
602 int ArchiveBuilder::compare_src_objs(SourceObjInfo** a, SourceObjInfo** b) {
603 if ((*a)->has_embedded_pointer() && !(*b)->has_embedded_pointer()) {
604 return 1;
605 } else if (!(*a)->has_embedded_pointer() && (*b)->has_embedded_pointer()) {
606 return -1;
607 } else {
608 // This is necessary to keep the sorting order stable. Otherwise the
609 // archive's contents may not be deterministic.
610 return (*a)->id() - (*b)->id();
611 }
612 }
613
614 void ArchiveBuilder::sort_metadata_objs() {
615 _rw_src_objs.objs()->sort(compare_src_objs);
616 _ro_src_objs.objs()->sort(compare_src_objs);
617 }
618
619 void ArchiveBuilder::dump_rw_metadata() {
620 ResourceMark rm;
621 aot_log_info(aot)("Allocating RW objects ... ");
622 make_shallow_copies(&_rw_region, &_rw_src_objs);
623
624 #if INCLUDE_CDS_JAVA_HEAP
625 if (CDSConfig::is_dumping_full_module_graph()) {
626 // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
627 char* start = rw_region()->top();
628 ClassLoaderDataShared::allocate_archived_tables();
629 alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
630 }
631 #endif
632 }
633
634 void ArchiveBuilder::dump_ro_metadata() {
635 ResourceMark rm;
636 aot_log_info(aot)("Allocating RO objects ... ");
637
638 start_dump_region(&_ro_region);
639 make_shallow_copies(&_ro_region, &_ro_src_objs);
640
641 #if INCLUDE_CDS_JAVA_HEAP
642 if (CDSConfig::is_dumping_full_module_graph()) {
643 char* start = ro_region()->top();
644 ClassLoaderDataShared::init_archived_tables();
645 alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
646 }
647 #endif
648
649 RegeneratedClasses::record_regenerated_objects();
650 }
651
652 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
653 const ArchiveBuilder::SourceObjList* src_objs) {
654 for (int i = 0; i < src_objs->objs()->length(); i++) {
655 make_shallow_copy(dump_region, src_objs->objs()->at(i));
656 }
657 aot_log_info(aot)("done (%d objects)", src_objs->objs()->length());
658 }
659
660 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
661 address src = src_info->source_addr();
662 int bytes = src_info->size_in_bytes(); // word-aligned
663 size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
664
665 char* oldtop = dump_region->top();
666 if (src_info->msotype() == MetaspaceObj::ClassType) {
667 // Allocate space for a pointer directly in front of the future InstanceKlass, so
668 // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
669 // without building another hashtable. See RunTimeClassInfo::get_for()
670 // in systemDictionaryShared.cpp.
671 Klass* klass = (Klass*)src;
672 if (klass->is_instance_klass()) {
673 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
674 dump_region->allocate(sizeof(address));
675 }
676 #ifdef _LP64
677 // More strict alignments needed for UseCompressedClassPointers
678 if (UseCompressedClassPointers) {
679 alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
680 }
681 #endif
682 } else if (src_info->msotype() == MetaspaceObj::SymbolType) {
683 // Symbols may be allocated by using AllocateHeap, so their sizes
684 // may be less than size_in_bytes() indicates.
685 bytes = ((Symbol*)src)->byte_size();
686 }
687
688 char* dest = dump_region->allocate(bytes, alignment);
689 memcpy(dest, src, bytes);
690
691 // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
692 if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
693 Symbol* buffered_symbol = (Symbol*)dest;
694 assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
695 buffered_symbol->update_identity_hash();
696 }
697
698 {
699 bool created;
700 _buffered_to_src_table.put_if_absent((address)dest, src, &created);
701 assert(created, "must be");
702 if (_buffered_to_src_table.maybe_grow()) {
703 log_info(aot, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
704 }
705 }
706
707 intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
708 if (archived_vtable != nullptr) {
709 *(address*)dest = (address)archived_vtable;
710 ArchivePtrMarker::mark_pointer((address*)dest);
711 }
712
713 log_trace(aot)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
714 src_info->set_buffered_addr((address)dest);
715
716 char* newtop = dump_region->top();
717 _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
718
719 DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
720 }
721
722 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
723 // not handled by MetaspaceClosure.
724 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
725 assert(is_in_buffer_space(ptr_location), "must be");
726 if (src_addr == nullptr) {
727 *ptr_location = nullptr;
728 ArchivePtrMarker::clear_pointer(ptr_location);
729 } else {
730 *ptr_location = get_buffered_addr(src_addr);
731 ArchivePtrMarker::mark_pointer(ptr_location);
732 }
733 }
734
735 void ArchiveBuilder::mark_and_relocate_to_buffered_addr(address* ptr_location) {
736 assert(*ptr_location != nullptr, "sanity");
737 if (!is_in_mapped_static_archive(*ptr_location)) {
738 *ptr_location = get_buffered_addr(*ptr_location);
739 }
740 ArchivePtrMarker::mark_pointer(ptr_location);
741 }
742
743 bool ArchiveBuilder::has_been_archived(address src_addr) const {
744 SourceObjInfo* p = _src_obj_table.get(src_addr);
745 if (p == nullptr) {
746 // This object has never been seen by ArchiveBuilder
747 return false;
748 }
749 if (p->buffered_addr() == nullptr) {
750 // ArchiveBuilder has seen this object, but decided not to archive it. So
751 // Any reference to this object will be modified to nullptr inside the buffer.
752 assert(p->follow_mode() == set_to_null, "must be");
753 return false;
754 }
755
756 DEBUG_ONLY({
757 // This is a class/method that belongs to one of the "original" classes that
758 // have been regenerated by lambdaFormInvokers.cpp. We must have archived
759 // the "regenerated" version of it.
760 if (RegeneratedClasses::has_been_regenerated(src_addr)) {
761 address regen_obj = RegeneratedClasses::get_regenerated_object(src_addr);
762 precond(regen_obj != nullptr && regen_obj != src_addr);
763 assert(has_been_archived(regen_obj), "must be");
764 assert(get_buffered_addr(src_addr) == get_buffered_addr(regen_obj), "must be");
765 }});
766
767 return true;
768 }
769
770 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
771 SourceObjInfo* p = _src_obj_table.get(src_addr);
772 assert(p != nullptr, "src_addr " INTPTR_FORMAT " is used but has not been archived",
773 p2i(src_addr));
774
775 return p->buffered_addr();
776 }
777
778 address ArchiveBuilder::get_source_addr(address buffered_addr) const {
779 assert(is_in_buffer_space(buffered_addr), "must be");
780 address* src_p = _buffered_to_src_table.get(buffered_addr);
781 assert(src_p != nullptr && *src_p != nullptr, "must be");
782 return *src_p;
783 }
784
785 void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
786 for (int i = 0; i < src_objs->objs()->length(); i++) {
787 src_objs->relocate(i, this);
788 }
789 }
790
791 void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
792 aot_log_info(aot)("Relocating embedded pointers in core regions ... ");
793 relocate_embedded_pointers(&_rw_src_objs);
794 relocate_embedded_pointers(&_ro_src_objs);
795 log_info(cds)("Relocating %zu pointers, %zu tagged, %zu nulled",
796 _relocated_ptr_info._num_ptrs,
797 _relocated_ptr_info._num_tagged_ptrs,
798 _relocated_ptr_info._num_nulled_ptrs);
799 }
800
801 #define ADD_COUNT(x) \
802 x += 1; \
803 x ## _a += aotlinked ? 1 : 0; \
804 x ## _i += inited ? 1 : 0;
805
806 #define DECLARE_INSTANCE_KLASS_COUNTER(x) \
807 int x = 0; \
808 int x ## _a = 0; \
809 int x ## _i = 0;
810
811 void ArchiveBuilder::make_klasses_shareable() {
812 DECLARE_INSTANCE_KLASS_COUNTER(num_instance_klasses);
813 DECLARE_INSTANCE_KLASS_COUNTER(num_boot_klasses);
814 DECLARE_INSTANCE_KLASS_COUNTER(num_vm_klasses);
815 DECLARE_INSTANCE_KLASS_COUNTER(num_platform_klasses);
816 DECLARE_INSTANCE_KLASS_COUNTER(num_app_klasses);
817 DECLARE_INSTANCE_KLASS_COUNTER(num_old_klasses);
818 DECLARE_INSTANCE_KLASS_COUNTER(num_hidden_klasses);
819 DECLARE_INSTANCE_KLASS_COUNTER(num_enum_klasses);
820 DECLARE_INSTANCE_KLASS_COUNTER(num_unregistered_klasses);
821 int num_unlinked_klasses = 0;
822 int num_obj_array_klasses = 0;
823 int num_type_array_klasses = 0;
824
825 int boot_unlinked = 0;
826 int platform_unlinked = 0;
827 int app_unlinked = 0;
828 int unreg_unlinked = 0;
829
830 for (int i = 0; i < klasses()->length(); i++) {
831 // Some of the code in ConstantPool::remove_unshareable_info() requires the classes
832 // to be in linked state, so it must be call here before the next loop, which returns
833 // all classes to unlinked state.
834 Klass* k = get_buffered_addr(klasses()->at(i));
835 if (k->is_instance_klass()) {
836 InstanceKlass::cast(k)->constants()->remove_unshareable_info();
837 }
838 }
839
840 for (int i = 0; i < klasses()->length(); i++) {
841 const char* type;
842 const char* unlinked = "";
843 const char* kind = "";
844 const char* hidden = "";
845 const char* old = "";
846 const char* generated = "";
847 const char* aotlinked_msg = "";
848 const char* inited_msg = "";
849 Klass* k = get_buffered_addr(klasses()->at(i));
850 bool inited = false;
851 k->remove_java_mirror();
852 #ifdef _LP64
853 if (UseCompactObjectHeaders) {
854 Klass* requested_k = to_requested(k);
855 address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
856 const int narrow_klass_shift = precomputed_narrow_klass_shift();
857 narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
858 k->set_prototype_header_klass(nk);
859 }
860 #endif //_LP64
861 if (k->is_flatArray_klass()) {
862 num_obj_array_klasses ++;
863 type = "flat array";
864 } else if (k->is_refArray_klass()) {
865 num_obj_array_klasses ++;
866 type = "ref array";
867 } else if (k->is_objArray_klass()) {
868 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
869 // on their array classes.
870 num_obj_array_klasses ++;
871 type = "obj array";
872 } else if (k->is_typeArray_klass()) {
873 num_type_array_klasses ++;
874 type = "array";
875 k->remove_unshareable_info();
876 } else {
877 assert(k->is_instance_klass(), " must be");
878 InstanceKlass* ik = InstanceKlass::cast(k);
879 InstanceKlass* src_ik = get_source_addr(ik);
880 bool aotlinked = AOTClassLinker::is_candidate(src_ik);
881 inited = ik->has_aot_initialized_mirror();
882 ADD_COUNT(num_instance_klasses);
883 if (ik->is_hidden()) {
884 ADD_COUNT(num_hidden_klasses);
885 hidden = " hidden";
886 oop loader = k->class_loader();
887 if (loader == nullptr) {
888 type = "boot";
889 ADD_COUNT(num_boot_klasses);
890 } else if (loader == SystemDictionary::java_platform_loader()) {
891 type = "plat";
892 ADD_COUNT(num_platform_klasses);
893 } else if (loader == SystemDictionary::java_system_loader()) {
894 type = "app";
895 ADD_COUNT(num_app_klasses);
896 } else {
897 type = "bad";
898 assert(0, "shouldn't happen");
899 }
900 if (CDSConfig::is_dumping_method_handles()) {
901 assert(HeapShared::is_archivable_hidden_klass(ik), "sanity");
902 } else {
903 // Legacy CDS support for lambda proxies
904 CDS_JAVA_HEAP_ONLY(assert(HeapShared::is_lambda_proxy_klass(ik), "sanity");)
905 }
906 } else if (ik->defined_by_boot_loader()) {
907 type = "boot";
908 ADD_COUNT(num_boot_klasses);
909 } else if (ik->defined_by_platform_loader()) {
910 type = "plat";
911 ADD_COUNT(num_platform_klasses);
912 } else if (ik->defined_by_app_loader()) {
913 type = "app";
914 ADD_COUNT(num_app_klasses);
915 } else {
916 assert(ik->defined_by_other_loaders(), "must be");
917 type = "unreg";
918 ADD_COUNT(num_unregistered_klasses);
919 }
920
921 if (AOTClassLinker::is_vm_class(src_ik)) {
922 ADD_COUNT(num_vm_klasses);
923 }
924
925 if (!ik->is_linked()) {
926 num_unlinked_klasses ++;
927 unlinked = " unlinked";
928 if (ik->defined_by_boot_loader()) {
929 boot_unlinked ++;
930 } else if (ik->defined_by_platform_loader()) {
931 platform_unlinked ++;
932 } else if (ik->defined_by_app_loader()) {
933 app_unlinked ++;
934 } else {
935 unreg_unlinked ++;
936 }
937 }
938
939 if (ik->is_interface()) {
940 kind = " interface";
941 } else if (src_ik->is_enum_subclass()) {
942 kind = " enum";
943 ADD_COUNT(num_enum_klasses);
944 }
945
946 if (CDSConfig::is_old_class_for_verifier(ik)) {
947 ADD_COUNT(num_old_klasses);
948 old = " old";
949 }
950
951 if (ik->is_aot_generated_class()) {
952 generated = " generated";
953 }
954 if (aotlinked) {
955 aotlinked_msg = " aot-linked";
956 }
957 if (inited) {
958 if (InstanceKlass::cast(k)->static_field_size() == 0) {
959 inited_msg = " inited (no static fields)";
960 } else {
961 inited_msg = " inited";
962 }
963 }
964
965 AOTMetaspace::rewrite_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
966 ik->remove_unshareable_info();
967 }
968
969 if (aot_log_is_enabled(Debug, aot, class)) {
970 ResourceMark rm;
971 aot_log_debug(aot, class)("klasses[%5d] = " PTR_FORMAT " %-5s %s%s%s%s%s%s%s%s", i,
972 p2i(to_requested(k)), type, k->external_name(),
973 kind, hidden, old, unlinked, generated, aotlinked_msg, inited_msg);
974 }
975 }
976
977 #define STATS_FORMAT "= %5d, aot-linked = %5d, inited = %5d"
978 #define STATS_PARAMS(x) num_ ## x, num_ ## x ## _a, num_ ## x ## _i
979
980 aot_log_info(aot)("Number of classes %d", num_instance_klasses + num_obj_array_klasses + num_type_array_klasses);
981 aot_log_info(aot)(" instance classes " STATS_FORMAT, STATS_PARAMS(instance_klasses));
982 aot_log_info(aot)(" boot " STATS_FORMAT, STATS_PARAMS(boot_klasses));
983 aot_log_info(aot)(" vm " STATS_FORMAT, STATS_PARAMS(vm_klasses));
984 aot_log_info(aot)(" platform " STATS_FORMAT, STATS_PARAMS(platform_klasses));
985 aot_log_info(aot)(" app " STATS_FORMAT, STATS_PARAMS(app_klasses));
986 aot_log_info(aot)(" unregistered " STATS_FORMAT, STATS_PARAMS(unregistered_klasses));
987 aot_log_info(aot)(" (enum) " STATS_FORMAT, STATS_PARAMS(enum_klasses));
988 aot_log_info(aot)(" (hidden) " STATS_FORMAT, STATS_PARAMS(hidden_klasses));
989 aot_log_info(aot)(" (old) " STATS_FORMAT, STATS_PARAMS(old_klasses));
990 aot_log_info(aot)(" (unlinked) = %5d, boot = %d, plat = %d, app = %d, unreg = %d",
991 num_unlinked_klasses, boot_unlinked, platform_unlinked, app_unlinked, unreg_unlinked);
992 aot_log_info(aot)(" obj array classes = %5d", num_obj_array_klasses);
993 aot_log_info(aot)(" type array classes = %5d", num_type_array_klasses);
994 aot_log_info(aot)(" symbols = %5d", _symbols->length());
995
996 #undef STATS_FORMAT
997 #undef STATS_PARAMS
998 }
999
1000 void ArchiveBuilder::make_training_data_shareable() {
1001 auto clean_td = [&] (address& src_obj, SourceObjInfo& info) {
1002 if (!is_in_buffer_space(info.buffered_addr())) {
1003 return;
1004 }
1005
1006 if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
1007 info.msotype() == MetaspaceObj::MethodTrainingDataType ||
1008 info.msotype() == MetaspaceObj::CompileTrainingDataType) {
1009 TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
1010 buffered_td->remove_unshareable_info();
1011 } else if (info.msotype() == MetaspaceObj::MethodDataType) {
1012 MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
1013 buffered_mdo->remove_unshareable_info();
1014 } else if (info.msotype() == MetaspaceObj::MethodCountersType) {
1015 MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
1016 buffered_mc->remove_unshareable_info();
1017 }
1018 };
1019 _src_obj_table.iterate_all(clean_td);
1020 }
1021
1022 uintx ArchiveBuilder::buffer_to_offset(address p) const {
1023 address requested_p = to_requested(p);
1024 assert(requested_p >= _requested_static_archive_bottom, "must be");
1025 return requested_p - _requested_static_archive_bottom;
1026 }
1027
1028 uintx ArchiveBuilder::any_to_offset(address p) const {
1029 if (is_in_mapped_static_archive(p)) {
1030 assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
1031 return p - _mapped_static_archive_bottom;
1032 }
1033 if (!is_in_buffer_space(p)) {
1034 // p must be a "source" address
1035 p = get_buffered_addr(p);
1036 }
1037 return buffer_to_offset(p);
1038 }
1039
1040 address ArchiveBuilder::offset_to_buffered_address(u4 offset) const {
1041 address requested_addr = _requested_static_archive_bottom + offset;
1042 address buffered_addr = requested_addr - _buffer_to_requested_delta;
1043 assert(is_in_buffer_space(buffered_addr), "bad offset");
1044 return buffered_addr;
1045 }
1046
1047 void ArchiveBuilder::start_ac_region() {
1048 ro_region()->pack();
1049 start_dump_region(&_ac_region);
1050 }
1051
1052 void ArchiveBuilder::end_ac_region() {
1053 _ac_region.pack();
1054 }
1055
1056 #if INCLUDE_CDS_JAVA_HEAP
1057 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
1058 assert(CDSConfig::is_dumping_heap(), "sanity");
1059 k = get_buffered_klass(k);
1060 Klass* requested_k = to_requested(k);
1061 const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
1062 #ifdef ASSERT
1063 const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift));
1064 assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k));
1065 #endif
1066 address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
1067 // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any
1068 // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting.
1069 return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
1070 }
1071 #endif // INCLUDE_CDS_JAVA_HEAP
1072
1073 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
1074 // so that the archive can be mapped to the "requested" location without runtime relocation.
1075 //
1076 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
1077 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
1078 // - Every pointer must have one of the following values:
1079 // [a] nullptr:
1080 // No relocation is needed. Remove this pointer from ptrmap so we don't need to
1081 // consider it at runtime.
1082 // [b] Points into an object X which is inside the buffer:
1083 // Adjust this pointer by _buffer_to_requested_delta, so it points to X
1084 // when the archive is mapped at the requested location.
1085 // [c] Points into an object Y which is inside mapped static archive:
1086 // - This happens only during dynamic dump
1087 // - Adjust this pointer by _mapped_to_requested_static_archive_delta,
1088 // so it points to Y when the static archive is mapped at the requested location.
1089 template <bool STATIC_DUMP>
1090 class RelocateBufferToRequested : public BitMapClosure {
1091 ArchiveBuilder* _builder;
1092 address _buffer_bottom;
1093 intx _buffer_to_requested_delta;
1094 intx _mapped_to_requested_static_archive_delta;
1095 size_t _max_non_null_offset;
1096
1097 public:
1098 RelocateBufferToRequested(ArchiveBuilder* builder) {
1099 _builder = builder;
1100 _buffer_bottom = _builder->buffer_bottom();
1101 _buffer_to_requested_delta = builder->buffer_to_requested_delta();
1102 _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
1103 _max_non_null_offset = 0;
1104
1105 address bottom = _builder->buffer_bottom();
1106 address top = _builder->buffer_top();
1107 address new_bottom = bottom + _buffer_to_requested_delta;
1108 address new_top = top + _buffer_to_requested_delta;
1109 aot_log_debug(aot)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
1110 "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1111 p2i(bottom), p2i(top),
1112 p2i(new_bottom), p2i(new_top));
1113 }
1114
1115 bool do_bit(size_t offset) {
1116 address* p = (address*)_buffer_bottom + offset;
1117 assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
1118
1119 if (*p == nullptr) {
1120 // todo -- clear bit, etc
1121 ArchivePtrMarker::ptrmap()->clear_bit(offset);
1122 } else {
1123 if (STATIC_DUMP) {
1124 assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
1125 *p += _buffer_to_requested_delta;
1126 assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
1127 } else {
1128 if (_builder->is_in_buffer_space(*p)) {
1129 *p += _buffer_to_requested_delta;
1130 // assert is in requested dynamic archive
1131 } else {
1132 assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
1133 *p += _mapped_to_requested_static_archive_delta;
1134 assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
1135 }
1136 }
1137 _max_non_null_offset = offset;
1138 }
1139
1140 return true; // keep iterating
1141 }
1142
1143 void doit() {
1144 ArchivePtrMarker::ptrmap()->iterate(this);
1145 ArchivePtrMarker::compact(_max_non_null_offset);
1146 }
1147 };
1148
1149 #ifdef _LP64
1150 int ArchiveBuilder::precomputed_narrow_klass_shift() {
1151 // Legacy Mode:
1152 // We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
1153 // CompactObjectHeader Mode:
1154 // narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
1155 // Klass encoding range.
1156 //
1157 // Note that all of this may change in the future, if we decide to correct the pre-calculated
1158 // narrow Klass IDs at archive load time.
1159 assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
1160 return UseCompactObjectHeaders ? CompressedKlassPointers::max_shift() : 0;
1161 }
1162 #endif // _LP64
1163
1164 void ArchiveBuilder::relocate_to_requested() {
1165 if (!ro_region()->is_packed()) {
1166 ro_region()->pack();
1167 }
1168 size_t my_archive_size = buffer_top() - buffer_bottom();
1169
1170 if (CDSConfig::is_dumping_static_archive()) {
1171 _requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
1172 RelocateBufferToRequested<true> patcher(this);
1173 patcher.doit();
1174 } else {
1175 assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
1176 _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
1177 RelocateBufferToRequested<false> patcher(this);
1178 patcher.doit();
1179 }
1180 }
1181
1182 void ArchiveBuilder::print_stats() {
1183 _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
1184 }
1185
1186 void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
1187 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1188 // AOTMetaspace::n_regions (internal to hotspot).
1189 assert(NUM_CDS_REGIONS == AOTMetaspace::n_regions, "sanity");
1190
1191 ResourceMark rm;
1192
1193 write_region(mapinfo, AOTMetaspace::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1194 write_region(mapinfo, AOTMetaspace::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1195 write_region(mapinfo, AOTMetaspace::ac, &_ac_region, /*read_only=*/false,/*allow_exec=*/false);
1196
1197 // Split pointer map into read-write and read-only bitmaps
1198 ArchivePtrMarker::initialize_rw_ro_maps(&_rw_ptrmap, &_ro_ptrmap);
1199
1200 size_t bitmap_size_in_bytes;
1201 char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::rw_ptrmap(),
1202 ArchivePtrMarker::ro_ptrmap(),
1203 mapped_heap_info,
1204 streamed_heap_info,
1205 bitmap_size_in_bytes);
1206
1207 if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
1208 _total_heap_region_size = mapinfo->write_mapped_heap_region(mapped_heap_info);
1209 } else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
1210 _total_heap_region_size = mapinfo->write_streamed_heap_region(streamed_heap_info);
1211 }
1212
1213 print_region_stats(mapinfo, mapped_heap_info, streamed_heap_info);
1214
1215 mapinfo->set_requested_base((char*)AOTMetaspace::requested_base_address());
1216 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1217 // After this point, we should not write any data into mapinfo->header() since this
1218 // would corrupt its checksum we have calculated before.
1219 mapinfo->write_header();
1220 mapinfo->close();
1221
1222 if (log_is_enabled(Info, aot)) {
1223 log_info(aot)("Full module graph = %s", CDSConfig::is_dumping_full_module_graph() ? "enabled" : "disabled");
1224 print_stats();
1225 }
1226
1227 if (log_is_enabled(Info, aot, map)) {
1228 AOTMapLogger::dumptime_log(this, mapinfo, mapped_heap_info, streamed_heap_info, bitmap, bitmap_size_in_bytes);
1229 }
1230 CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
1231 FREE_C_HEAP_ARRAY(char, bitmap);
1232 }
1233
1234 void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
1235 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1236 }
1237
1238 void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) {
1239 _relocated_ptr_info._num_ptrs ++;
1240 _relocated_ptr_info._num_tagged_ptrs += tagged ? 1 : 0;
1241 _relocated_ptr_info._num_nulled_ptrs += nulled ? 1 : 0;
1242 }
1243
1244 void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
1245 ArchiveMappedHeapInfo* mapped_heap_info,
1246 ArchiveStreamedHeapInfo* streamed_heap_info) {
1247 // Print statistics of all the regions
1248 const size_t bitmap_used = mapinfo->region_at(AOTMetaspace::bm)->used();
1249 const size_t bitmap_reserved = mapinfo->region_at(AOTMetaspace::bm)->used_aligned();
1250 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1251 bitmap_reserved +
1252 _total_heap_region_size;
1253 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1254 bitmap_used +
1255 _total_heap_region_size;
1256 const double total_u_perc = percent_of(total_bytes, total_reserved);
1257
1258 _rw_region.print(total_reserved);
1259 _ro_region.print(total_reserved);
1260 _ac_region.print(total_reserved);
1261
1262 print_bitmap_region_stats(bitmap_used, total_reserved);
1263
1264 if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
1265 print_heap_region_stats(mapped_heap_info->buffer_start(), mapped_heap_info->buffer_byte_size(), total_reserved);
1266 } else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
1267 print_heap_region_stats(streamed_heap_info->buffer_start(), streamed_heap_info->buffer_byte_size(), total_reserved);
1268 }
1269
1270 aot_log_debug(aot)("total : %9zu [100.0%% of total] out of %9zu bytes [%5.1f%% used]",
1271 total_bytes, total_reserved, total_u_perc);
1272 }
1273
1274 void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
1275 aot_log_debug(aot)("bm space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used]",
1276 size, size/double(total_size)*100.0, size);
1277 }
1278
1279 void ArchiveBuilder::print_heap_region_stats(char* start, size_t size, size_t total_size) {
1280 char* top = start + size;
1281 aot_log_debug(aot)("hp space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used] at " INTPTR_FORMAT,
1282 size, size/double(total_size)*100.0, size, p2i(start));
1283 }
1284
1285 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1286 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1287 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1288 // or so.
1289 _rw_region.print_out_of_space_msg(name, needed_bytes);
1290 _ro_region.print_out_of_space_msg(name, needed_bytes);
1291
1292 log_error(aot)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1293 AOTMetaspace::unrecoverable_writing_error();
1294 }