1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciUtilities.hpp"
33 #include "classfile/javaAssertions.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/gcConfig.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/memoryReserver.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/flags/flagSetting.hpp"
42 #include "runtime/globals_extension.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/os.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubInfo.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "utilities/copy.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56 #if INCLUDE_G1GC
57 #include "gc/g1/g1BarrierSetRuntime.hpp"
58 #include "gc/g1/g1HeapRegion.hpp"
59 #endif
60 #if INCLUDE_SHENANDOAHGC
61 #include "gc/shenandoah/shenandoahRuntime.hpp"
62 #endif
63 #if INCLUDE_ZGC
64 #include "gc/z/zBarrierSetRuntime.hpp"
65 #endif
66
67 #include <errno.h>
68 #include <sys/stat.h>
69
70 const char* aot_code_entry_kind_name[] = {
71 #define DECL_KIND_STRING(kind) XSTR(kind),
72 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
73 #undef DECL_KIND_STRING
74 };
75
76 static void report_load_failure() {
77 if (AbortVMOnAOTCodeFailure) {
78 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
79 }
80 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
81 AOTCodeCache::disable_caching();
82 }
83
84 static void report_store_failure() {
85 if (AbortVMOnAOTCodeFailure) {
86 tty->print_cr("Unable to create AOT Code Cache.");
87 vm_abort(false);
88 }
89 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
90 AOTCodeCache::disable_caching();
91 }
92
93 // The sequence of AOT code caching flags and parametters settings.
94 //
95 // 1. The initial AOT code caching flags setting is done
96 // during call to CDSConfig::check_vm_args_consistency().
97 //
98 // 2. The earliest AOT code state check done in compilationPolicy_init()
99 // where we set number of compiler threads for AOT assembly phase.
100 //
101 // 3. We determine presence of AOT code in AOT Cache in
102 // AOTMetaspace::open_static_archive() which is calles
103 // after compilationPolicy_init() but before codeCache_init().
104 //
105 // 4. AOTCodeCache::initialize() is called during universe_init()
106 // and does final AOT state and flags settings.
107 //
108 // 5. Finally AOTCodeCache::init2() is called after universe_init()
109 // when all GC settings are finalized.
110
111 // Next methods determine which action we do with AOT code depending
112 // on phase of AOT process: assembly or production.
113
114 bool AOTCodeCache::is_dumping_adapter() {
115 return AOTAdapterCaching && is_on_for_dump();
116 }
117
118 bool AOTCodeCache::is_using_adapter() {
119 return AOTAdapterCaching && is_on_for_use();
120 }
121
122 bool AOTCodeCache::is_dumping_stub() {
123 return AOTStubCaching && is_on_for_dump();
124 }
125
126 bool AOTCodeCache::is_using_stub() {
127 return AOTStubCaching && is_on_for_use();
128 }
129
130 // Next methods could be called regardless AOT code cache status.
131 // Initially they are called during flags parsing and finilized
132 // in AOTCodeCache::initialize().
133 void AOTCodeCache::enable_caching() {
134 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
135 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
136 }
137
138 void AOTCodeCache::disable_caching() {
139 FLAG_SET_ERGO(AOTStubCaching, false);
140 FLAG_SET_ERGO(AOTAdapterCaching, false);
141 }
142
143 bool AOTCodeCache::is_caching_enabled() {
144 return AOTStubCaching || AOTAdapterCaching;
145 }
146
147 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
148 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
149 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
150 // becasue both id and kind are used to find an entry, and that combination should be unique
151 if (kind == AOTCodeEntry::Adapter) {
152 return id;
153 } else if (kind == AOTCodeEntry::SharedBlob) {
154 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
155 return id;
156 } else if (kind == AOTCodeEntry::C1Blob) {
157 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
158 return id;
159 } else {
160 // kind must be AOTCodeEntry::C2Blob
161 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
162 return id;
163 }
164 }
165
166 static uint _max_aot_code_size = 0;
167 uint AOTCodeCache::max_aot_code_size() {
168 return _max_aot_code_size;
169 }
170
171 // It is called from AOTMetaspace::initialize_shared_spaces()
172 // which is called from universe_init().
173 // At this point all AOT class linking seetings are finilized
174 // and AOT cache is open so we can map AOT code region.
175 void AOTCodeCache::initialize() {
176 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
177 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
178 disable_caching();
179 return;
180 #else
181 if (FLAG_IS_DEFAULT(AOTCache)) {
182 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
183 disable_caching();
184 return; // AOTCache must be specified to dump and use AOT code
185 }
186
187 // Disable stubs caching until JDK-8357398 is fixed.
188 FLAG_SET_ERGO(AOTStubCaching, false);
189
190 if (VerifyOops) {
191 // Disable AOT stub caching when VerifyOops flag is on.
192 // Verify oops code generated a lot of C strings which overflow
193 // AOT C string table (which has fixed size).
194 // AOT C string table will be reworked later to handle such cases.
195 log_info(aot, codecache, init)("AOT Stub Caching is not supported with VerifyOops.");
196 FLAG_SET_ERGO(AOTStubCaching, false);
197 if (InlineTypePassFieldsAsArgs) {
198 log_info(aot, codecache, init)("AOT Adapter Caching is not supported with VerifyOops + InlineTypePassFieldsAsArgs.");
199 FLAG_SET_ERGO(AOTAdapterCaching, false);
200 }
201 }
202
203 bool is_dumping = false;
204 bool is_using = false;
205 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
206 is_dumping = true;
207 enable_caching();
208 is_dumping = is_caching_enabled();
209 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
210 enable_caching();
211 is_using = is_caching_enabled();
212 } else {
213 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
214 disable_caching();
215 return; // nothing to do
216 }
217 if (!(is_dumping || is_using)) {
218 disable_caching();
219 return; // AOT code caching disabled on command line
220 }
221 _max_aot_code_size = AOTCodeMaxSize;
222 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
223 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
224 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
225 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
226 }
227 }
228 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
229 if (is_using && aot_code_size == 0) {
230 log_info(aot, codecache, init)("AOT Code Cache is empty");
231 disable_caching();
232 return;
233 }
234 if (!open_cache(is_dumping, is_using)) {
235 if (is_using) {
236 report_load_failure();
237 } else {
238 report_store_failure();
239 }
240 return;
241 }
242 if (is_dumping) {
243 FLAG_SET_DEFAULT(ForceUnreachable, true);
244 }
245 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
246 #endif // defined(AMD64) || defined(AARCH64)
247 }
248
249 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
250 AOTCodeCache* AOTCodeCache::_cache = nullptr;
251 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
252
253 // It is called after universe_init() when all GC settings are finalized.
254 void AOTCodeCache::init2() {
255 DEBUG_ONLY( _passed_init2 = true; )
256 if (opened_cache == nullptr) {
257 return;
258 }
259 if (!opened_cache->verify_config()) {
260 delete opened_cache;
261 opened_cache = nullptr;
262 report_load_failure();
263 return;
264 }
265
266 // initialize aot runtime constants as appropriate to this runtime
267 AOTRuntimeConstants::initialize_from_runtime();
268
269 // initialize the table of external routines so we can save
270 // generated code blobs that reference them
271 AOTCodeAddressTable* table = opened_cache->_table;
272 assert(table != nullptr, "should be initialized already");
273 table->init_extrs();
274
275 // Now cache and address table are ready for AOT code generation
276 _cache = opened_cache;
277 }
278
279 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
280 opened_cache = new AOTCodeCache(is_dumping, is_using);
281 if (opened_cache->failed()) {
282 delete opened_cache;
283 opened_cache = nullptr;
284 return false;
285 }
286 return true;
287 }
288
289 void AOTCodeCache::dump() {
290 if (is_on()) {
291 assert(is_on_for_dump(), "should be called only when dumping AOT code");
292 MutexLocker ml(Compile_lock);
293 _cache->finish_write();
294 }
295 }
296
297 #define DATA_ALIGNMENT HeapWordSize
298
299 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
300 _load_header(nullptr),
301 _load_buffer(nullptr),
302 _store_buffer(nullptr),
303 _C_store_buffer(nullptr),
304 _write_position(0),
305 _load_size(0),
306 _store_size(0),
307 _for_use(is_using),
308 _for_dump(is_dumping),
309 _failed(false),
310 _lookup_failed(false),
311 _table(nullptr),
312 _load_entries(nullptr),
313 _search_entries(nullptr),
314 _store_entries(nullptr),
315 _C_strings_buf(nullptr),
316 _store_entries_cnt(0)
317 {
318 // Read header at the begining of cache
319 if (_for_use) {
320 // Read cache
321 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
322 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
323 if (!rs.is_reserved()) {
324 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
325 set_failed();
326 return;
327 }
328 if (!AOTCacheAccess::map_aot_code_region(rs)) {
329 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
330 set_failed();
331 return;
332 }
333
334 _load_size = (uint)load_size;
335 _load_buffer = (char*)rs.base();
336 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
337 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
338
339 _load_header = (Header*)addr(0);
340 if (!_load_header->verify(_load_size)) {
341 set_failed();
342 return;
343 }
344 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
345 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
346 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
347 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
348 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
349 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
350
351 // Read strings
352 load_strings();
353 }
354 if (_for_dump) {
355 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
356 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
357 // Entries allocated at the end of buffer in reverse (as on stack).
358 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
359 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
360 }
361 _table = new AOTCodeAddressTable();
362 }
363
364 void AOTCodeCache::init_early_stubs_table() {
365 AOTCodeAddressTable* table = addr_table();
366 if (table != nullptr) {
367 table->init_early_stubs();
368 }
369 }
370
371 void AOTCodeCache::init_shared_blobs_table() {
372 AOTCodeAddressTable* table = addr_table();
373 if (table != nullptr) {
374 table->init_shared_blobs();
375 }
376 }
377
378 void AOTCodeCache::init_early_c1_table() {
379 AOTCodeAddressTable* table = addr_table();
380 if (table != nullptr) {
381 table->init_early_c1();
382 }
383 }
384
385 // macro to record which flags are set -- flag_type selects the
386 // relevant accessor e.g. set_flag, set_x86_flag, set_x86_use_flag.
387 // n.b. flag_enum_name and global_flag_name are both needed because we
388 // don't have consistent conventions for naming global flags e.g.
389 // EnableContended vs UseMulAddIntrinsic vs UseCRC32Intrinsics
390
391 #define RECORD_FLAG(flag_type, flag_enum_name, global_flag_name) \
392 if (global_flag_name) { \
393 set_ ## flag_type ## flag(flag_enum_name); \
394 }
395
396 void AOTCodeCache::Config::record(uint cpu_features_offset) {
397 _flags = 0;
398 #ifdef ASSERT
399 set_flag(debugVM);
400 #endif
401 RECORD_FLAG(, compressedOops, UseCompressedOops);
402 RECORD_FLAG(, useTLAB, UseTLAB);
403 if (JavaAssertions::systemClassDefault()) {
404 set_flag(systemClassAssertions);
405 }
406 if (JavaAssertions::userClassDefault()) {
407 set_flag(userClassAssertions);
408 }
409 RECORD_FLAG(, enableContendedPadding, EnableContended);
410 RECORD_FLAG(, restrictContendedPadding, RestrictContended);
411
412 _compressedOopShift = CompressedOops::shift();
413 _compressedOopBase = CompressedOops::base();
414 _compressedKlassShift = CompressedKlassPointers::shift();
415 _contendedPaddingWidth = ContendedPaddingWidth;
416 _gc = (uint)Universe::heap()->kind();
417 _optoLoopAlignment = (uint)OptoLoopAlignment;
418 _codeEntryAlignment = (uint)CodeEntryAlignment;
419 _allocatePrefetchLines = (uint)AllocatePrefetchLines;
420 _allocateInstancePrefetchLines = (uint)AllocateInstancePrefetchLines;
421 _allocatePrefetchDistance = (uint)AllocatePrefetchDistance;
422 _allocatePrefetchStepSize = (uint)AllocatePrefetchStepSize;
423 _use_intrinsics_flags = 0;
424 RECORD_FLAG(use_, useCRC32, UseCRC32Intrinsics);
425 RECORD_FLAG(use_, useCRC32C, UseCRC32CIntrinsics);
426 #ifdef COMPILER2
427 _maxVectorSize = (uint)MaxVectorSize;
428 _arrayOperationPartialInlineSize = (uint)ArrayOperationPartialInlineSize;
429 RECORD_FLAG(use_, useMultiplyToLen, UseMultiplyToLenIntrinsic);
430 RECORD_FLAG(use_, useSquareToLen, UseSquareToLenIntrinsic);
431 RECORD_FLAG(use_, useMulAdd, UseMulAddIntrinsic);
432 RECORD_FLAG(use_, useMontgomeryMultiply, UseMontgomeryMultiplyIntrinsic);
433 RECORD_FLAG(use_, useMontgomerySquare, UseMontgomerySquareIntrinsic);
434 #endif // COMPILER2
435 RECORD_FLAG(use_, useChaCha20, UseChaCha20Intrinsics);
436 RECORD_FLAG(use_, useDilithium, UseDilithiumIntrinsics);
437 RECORD_FLAG(use_, useKyber, UseKyberIntrinsics);
438 RECORD_FLAG(use_, useBASE64, UseBASE64Intrinsics);
439 RECORD_FLAG(use_, useAdler32, UseAdler32Intrinsics);
440 RECORD_FLAG(use_, useAES, UseAESIntrinsics);
441 RECORD_FLAG(use_, useAESCTR, UseAESCTRIntrinsics);
442 RECORD_FLAG(use_, useGHASH, UseGHASHIntrinsics);
443 RECORD_FLAG(use_, useMD5, UseMD5Intrinsics);
444 RECORD_FLAG(use_, useSHA1, UseSHA1Intrinsics);
445 RECORD_FLAG(use_, useSHA256, UseSHA256Intrinsics);
446 RECORD_FLAG(use_, useSHA512, UseSHA512Intrinsics);
447 RECORD_FLAG(use_, useSHA3, UseSHA3Intrinsics);
448 RECORD_FLAG(use_, usePoly1305, UsePoly1305Intrinsics);
449 RECORD_FLAG(use_, useVectorizedMismatch,UseVectorizedMismatchIntrinsic );
450 RECORD_FLAG(use_, useSecondarySupersTable, UseSecondarySupersTable);
451 #if defined(X86) && !defined(ZERO)
452 _avx3threshold = (uint)AVX3Threshold;
453 _useAVX = (uint)UseAVX;
454 _x86_flags = 0;
455 RECORD_FLAG(x86_, x86_enableX86ECoreOpts, EnableX86ECoreOpts);
456 RECORD_FLAG(x86_, x86_useUnalignedLoadStores, UseUnalignedLoadStores);
457 RECORD_FLAG(x86_, x86_useAPX, UseAPX);
458
459 _x86_use_intrinsics_flags = 0;
460 RECORD_FLAG(x86_use_, x86_useLibm, UseLibmIntrinsic);
461 RECORD_FLAG(x86_use_, x86_useIntPoly, UseIntPolyIntrinsics);
462 #endif // defined(X86) && !defined(ZERO)
463 #if defined(AARCH64) && !defined(ZERO)
464 _prefetchCopyIntervalInBytes = (uint)PrefetchCopyIntervalInBytes;
465 _blockZeroingLowLimit = (uint)BlockZeroingLowLimit;
466 _softwarePrefetchHintDistance = (uint)SoftwarePrefetchHintDistance;
467 _useSVE = (uint)UseSVE;
468 _aarch64_flags = 0;
469 RECORD_FLAG(aarch64_, aarch64_avoidUnalignedAccesses, AvoidUnalignedAccesses);
470 RECORD_FLAG(aarch64_, aarch64_useSIMDForMemoryOps, UseSIMDForMemoryOps);
471 RECORD_FLAG(aarch64_, aarch64_useSIMDForArrayEquals, UseSIMDForArrayEquals);
472 RECORD_FLAG(aarch64_, aarch64_useSIMDForSHA3, UseSIMDForSHA3Intrinsic);
473 RECORD_FLAG(aarch64_, aarch64_useLSE, UseLSE);
474
475 _aarch64_use_intrinsics_flags = 0;
476 RECORD_FLAG(aarch64_use_, aarch64_useBlockZeroing, UseBlockZeroing);
477 RECORD_FLAG(aarch64_use_, aarch64_useSIMDForBigIntegerShift, UseSIMDForBigIntegerShiftIntrinsics);
478 RECORD_FLAG(aarch64_use_, aarch64_useSimpleArrayEquals, UseSimpleArrayEquals);
479 RECORD_FLAG(aarch64_use_, aarch64_useSecondarySupersCache, UseSecondarySupersCache);
480 #endif // defined(AARCH64) && !defined(ZERO)
481 #if INCLUDE_JVMCI
482 _enableJVMCI = (uint)EnableJVMCI;
483 #endif
484 _cpu_features_offset = cpu_features_offset;
485 }
486
487 #undef RECORD_FLAG
488
489 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
490 LogStreamHandle(Debug, aot, codecache, init) log;
491 uint offset = _cpu_features_offset;
492 uint cpu_features_size = *(uint *)cache->addr(offset);
493 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
494 offset += sizeof(uint);
495
496 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
497 if (log.is_enabled()) {
498 ResourceMark rm; // required for stringStream::as_string()
499 stringStream ss;
500 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
501 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
502 }
503
504 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
505 if (log.is_enabled()) {
506 ResourceMark rm; // required for stringStream::as_string()
507 stringStream ss;
508 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
509 VM_Version::store_cpu_features(runtime_cpu_features);
510 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
511 if (!ss.is_empty()) {
512 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
513 }
514 }
515 } else {
516 if (log.is_enabled()) {
517 ResourceMark rm; // required for stringStream::as_string()
518 stringStream ss;
519 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
520 VM_Version::store_cpu_features(runtime_cpu_features);
521 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
522 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
523 }
524 return false;
525 }
526 return true;
527 }
528
529 // macro to do *standard* flag eq checks -- flag_type selects the
530 // relevant accessor e.g. test_flag, test_x86_flag, test_x86_use_flag.
531 // n.b. flag_enum_name and global_flag_name are both needed because we
532 // don't have consistent conventions for naming global flags e.g.
533 // EnableContended vs UseMulAddIntrinsic vs UseCRC32Intrinsics
534
535 #define CHECK_FLAG(flag_type, flag_enum_name, global_flag_name) \
536 if (test_ ## flag_type ## flag(flag_enum_name) != global_flag_name) { \
537 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with " # global_flag_name " = %s vs current %s" , (global_flag_name ? "false" : "true"), (global_flag_name ? "true" : "false")); \
538 return false; \
539 }
540
541 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
542 // First checks affect all cached AOT code
543 #ifdef ASSERT
544 if (!test_flag(debugVM)) {
545 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
546 return false;
547 }
548 #else
549 if (test_flag(debugVM)) {
550 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
551 return false;
552 }
553 #endif
554
555 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
556 if (aot_gc != Universe::heap()->kind()) {
557 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
558 return false;
559 }
560
561 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
562 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
563 return false;
564 }
565
566 // check CPU features before checking flags that may be
567 // auto-configured in response to them
568 if (!verify_cpu_features(cache)) {
569 return false;
570 }
571
572 // change to EnableContended can affect validity of nmethods
573 CHECK_FLAG(, enableContendedPadding, EnableContended);
574 // change to RestrictContended can affect validity of nmethods
575 CHECK_FLAG(, restrictContendedPadding, RestrictContended);
576
577 // Tests for config options which might affect validity of adapters,
578 // stubs or nmethods. Currently we take a pessemistic stand and
579 // drop the whole cache if any of these are changed.
580
581 // change to opto alignment can affect performance of array copy
582 // stubs and nmethods
583 if (_optoLoopAlignment != (uint)OptoLoopAlignment) {
584 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with OptoLoopAlignment = %d vs current %d", (int)_optoLoopAlignment, (int)OptoLoopAlignment);
585 return false;
586 }
587
588 // change to CodeEntryAlignment can affect performance of array
589 // copy stubs and nmethods
590 if (_codeEntryAlignment != CodeEntryAlignment) {
591 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeEntryAlignment = %d vs current %d", _codeEntryAlignment, CodeEntryAlignment);
592 return false;
593 }
594
595 // changing Prefetch configuration can affect validity of nmethods
596 // and stubs
597 if (_allocatePrefetchLines != (uint)AllocatePrefetchLines) {
598 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocatePrefetchLines, (int)AllocatePrefetchLines);
599 return false;
600 }
601 if (_allocateInstancePrefetchLines != (uint)AllocateInstancePrefetchLines) {
602 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocateInstancePrefetchLines, (int)AllocateInstancePrefetchLines);
603 return false;
604 }
605 if (_allocatePrefetchDistance != (uint)AllocatePrefetchDistance) {
606 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocatePrefetchDistance, (int)AllocatePrefetchDistance);
607 return false;
608 }
609 if (_allocatePrefetchStepSize != (uint)AllocatePrefetchStepSize) {
610 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocatePrefetchStepSize, (int)AllocatePrefetchStepSize);
611 return false;
612 }
613
614 // check intrinsic use settings are compatible
615
616 CHECK_FLAG(use_, useCRC32, UseCRC32Intrinsics);
617 CHECK_FLAG(use_, useCRC32C, UseCRC32CIntrinsics);
618
619 #ifdef COMPILER2
620 // change to MaxVectorSize can affect validity of array copy/fill
621 // stubs
622 if (_maxVectorSize != (uint)MaxVectorSize) {
623 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with MaxVectorSize = %d vs current %d", (int)_maxVectorSize, (int)MaxVectorSize);
624 return false;
625 }
626
627 // changing ArrayOperationPartialInlineSize can affect validity of
628 // nmethods and stubs
629 if (_arrayOperationPartialInlineSize != (uint)ArrayOperationPartialInlineSize) {
630 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ArrayOperationPartialInlineSize = %d vs current %d", (int)_arrayOperationPartialInlineSize, (int)ArrayOperationPartialInlineSize);
631 return false;
632 }
633 CHECK_FLAG(use_, useMultiplyToLen, UseMultiplyToLenIntrinsic);
634 CHECK_FLAG(use_, useSquareToLen, UseSquareToLenIntrinsic);
635 CHECK_FLAG(use_, useMulAdd, UseMulAddIntrinsic);
636 CHECK_FLAG(use_, useMontgomeryMultiply,UseMontgomeryMultiplyIntrinsic);
637 CHECK_FLAG(use_, useMontgomerySquare, UseMontgomerySquareIntrinsic);
638 #endif // COMPILER2
639 CHECK_FLAG(use_, useChaCha20, UseChaCha20Intrinsics);
640 CHECK_FLAG(use_, useDilithium, UseDilithiumIntrinsics);
641 CHECK_FLAG(use_, useKyber, UseKyberIntrinsics);
642 CHECK_FLAG(use_, useBASE64, UseBASE64Intrinsics);
643 CHECK_FLAG(use_, useAES, UseAESIntrinsics);
644 CHECK_FLAG(use_, useAESCTR, UseAESCTRIntrinsics);
645 CHECK_FLAG(use_, useGHASH, UseGHASHIntrinsics);
646 CHECK_FLAG(use_, useMD5, UseMD5Intrinsics);
647 CHECK_FLAG(use_, useSHA1, UseSHA1Intrinsics);
648 CHECK_FLAG(use_, useSHA256, UseSHA256Intrinsics);
649 CHECK_FLAG(use_, useSHA512, UseSHA512Intrinsics);
650 CHECK_FLAG(use_, useSHA3, UseSHA3Intrinsics);
651 CHECK_FLAG(use_, usePoly1305, UsePoly1305Intrinsics);
652 CHECK_FLAG(use_, useVectorizedMismatch, UseVectorizedMismatchIntrinsic);
653 CHECK_FLAG(use_, useSecondarySupersTable, UseSecondarySupersTable);
654
655 #if defined(X86) && !defined(ZERO)
656 // change to AVX3Threshold may affect validity of array copy stubs
657 // and nmethods
658 if (_avx3threshold != (uint)AVX3Threshold) {
659 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AVX3Threshold = %d vs current %d", (int)_avx3threshold, AVX3Threshold);
660 return false;
661 }
662
663 // change to UseAVX may affect validity of array copy stubs and
664 // nmethods
665 if (_useAVX != (uint)UseAVX) {
666 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with useAVX = %d vs current %d", (int)_useAVX, UseAVX);
667 return false;
668 }
669
670 // change to EnableX86ECoreOpts may affect validity of nmethods
671 CHECK_FLAG(x86_, x86_enableX86ECoreOpts, EnableX86ECoreOpts);
672
673 // switching off UseUnalignedLoadStores can affect validity of fill
674 // stubs
675 if (test_x86_flag(x86_useUnalignedLoadStores) && !UseUnalignedLoadStores) {
676 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseUnalignedLoadStores = true vs current = false");
677 return false;
678 }
679
680 // change to UseAPX can affect validity of nmethods and stubs
681 CHECK_FLAG(x86_, x86_useAPX, UseAPX);
682
683 // check x86-specific intrinsic use settings are compatible
684
685 CHECK_FLAG(x86_use_, x86_useLibm, UseLibmIntrinsic);
686 CHECK_FLAG(x86_use_, x86_useIntPoly, UseIntPolyIntrinsics);
687 #endif // defined(X86) && !defined(ZERO)
688
689 #if defined(AARCH64) && !defined(ZERO)
690 // change to PrefetchCopyIntervalInBytes may affect validity of
691 // array copy stubs
692 if (_prefetchCopyIntervalInBytes != (uint)PrefetchCopyIntervalInBytes) {
693 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PrefetchCopyIntervalInBytes = %d vs current %d", (int)_prefetchCopyIntervalInBytes, (int)PrefetchCopyIntervalInBytes);
694 return false;
695 }
696
697 // change to BlockZeroingLowLimit may affect validity of array fill
698 // stubs
699 if (_blockZeroingLowLimit != (uint)BlockZeroingLowLimit) {
700 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with BlockZeroingLowLimit = %d vs current %d", (int)_blockZeroingLowLimit, (int)BlockZeroingLowLimit);
701 return false;
702 }
703
704 // change to SoftwarePrefetchHintDistance may affect validity of array fill
705 // stubs
706 if (_softwarePrefetchHintDistance != (uint)SoftwarePrefetchHintDistance) {
707 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with SoftwarePrefetchHintDistance = %d vs current %d", (int)_softwarePrefetchHintDistance, (int)SoftwarePrefetchHintDistance);
708 return false;
709 }
710
711 // change to UseSVE may affect validity of stubs and nmethods
712 if (_useSVE != (uint)UseSVE) {
713 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseSVE = %d vs current %d",(int)_useSVE, UseSVE);
714 return false;
715 }
716
717 // switching on AvoidUnalignedAccesses may affect validity of array
718 // copy stubs and nmethods
719 if (!test_aarch64_flag(aarch64_avoidUnalignedAccesses) && AvoidUnalignedAccesses) {
720 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AvoidUnalignedAccesses = false vs current = true");
721 return false;
722 }
723
724 // change to UseSIMDForMemoryOps may affect validity of array
725 // copy stubs and nmethods
726 CHECK_FLAG(aarch64_, aarch64_useSIMDForMemoryOps, UseSIMDForMemoryOps);
727 // change to UseSIMDForArrayEquals may affect validity of array
728 // copy stubs and nmethods
729 CHECK_FLAG(aarch64_, aarch64_useSIMDForArrayEquals, UseSIMDForArrayEquals);
730 // change to useSIMDForSHA3 may affect validity of SHA3 stubs
731 CHECK_FLAG(aarch64_, aarch64_useSIMDForSHA3, UseSIMDForSHA3Intrinsic);
732 // change to UseLSE may affect validity of stubs and nmethods
733 CHECK_FLAG(aarch64_, aarch64_useLSE, UseLSE);
734
735 // check aarch64-specific intrinsic use settings are compatible
736
737 CHECK_FLAG(aarch64_use_, aarch64_useBlockZeroing, UseBlockZeroing);
738 CHECK_FLAG(aarch64_use_, aarch64_useSIMDForBigIntegerShift, UseSIMDForBigIntegerShiftIntrinsics);
739 CHECK_FLAG(aarch64_use_, aarch64_useSimpleArrayEquals, UseSimpleArrayEquals);
740 CHECK_FLAG(aarch64_use_, aarch64_useSecondarySupersCache, UseSecondarySupersCache);
741 #endif // defined(AARCH64) && !defined(ZERO)
742
743 #if INCLUDE_JVMCI
744 // change to EnableJVMCI will affect validity of adapters and
745 // nmethods
746 if (_enableJVMCI != (uint)EnableJVMCI) {
747 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableJVMCI = %s vs current %s", (_enableJVMCI ? "true" : "false"), (EnableJVMCI ? "true" : "false"));
748 return false;
749 }
750 #endif // INCLUDE_JVMCI
751
752 // The following checks do not affect AOT code, but can disable
753 // AOT stub/adapters caching if they are incompatible with runtime settings
754 // (adapters too as they access oops when buffering scalarized value objects).
755
756 if (test_flag(compressedOops) != UseCompressedOops) {
757 log_debug(aot, codecache, init)("AOT Stub/Adapter Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
758 AOTStubCaching = false;
759 if (InlineTypePassFieldsAsArgs) {
760 AOTAdapterCaching = false;
761 }
762 }
763 if (_compressedOopShift != (uint)CompressedOops::shift()) {
764 log_debug(aot, codecache, init)("AOT Stub/Adapter Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
765 AOTStubCaching = false;
766 if (InlineTypePassFieldsAsArgs) {
767 AOTAdapterCaching = false;
768 }
769 }
770
771 // This should be the last check as it only disables AOTStub/AdapterCaching
772 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
773 log_debug(aot, codecache, init)("AOT Stub/Adapter Cache disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
774 AOTStubCaching = false;
775 if (InlineTypePassFieldsAsArgs) {
776 AOTAdapterCaching = false;
777 }
778 }
779
780 return true;
781 }
782
783 #undef TEST_FLAG
784
785 bool AOTCodeCache::Header::verify(uint load_size) const {
786 if (_version != AOT_CODE_VERSION) {
787 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
788 return false;
789 }
790 if (load_size < _cache_size) {
791 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
792 return false;
793 }
794 return true;
795 }
796
797 AOTCodeCache* AOTCodeCache::open_for_use() {
798 if (AOTCodeCache::is_on_for_use()) {
799 return AOTCodeCache::cache();
800 }
801 return nullptr;
802 }
803
804 AOTCodeCache* AOTCodeCache::open_for_dump() {
805 if (AOTCodeCache::is_on_for_dump()) {
806 AOTCodeCache* cache = AOTCodeCache::cache();
807 cache->clear_lookup_failed(); // Reset bit
808 return cache;
809 }
810 return nullptr;
811 }
812
813 void copy_bytes(const char* from, address to, uint size) {
814 assert((int)size > 0, "sanity");
815 memcpy(to, from, size);
816 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
817 }
818
819 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
820 _cache = cache;
821 _entry = entry;
822 _load_buffer = cache->cache_buffer();
823 _read_position = 0;
824 _lookup_failed = false;
825 _name = nullptr;
826 _reloc_data = nullptr;
827 _oop_maps = nullptr;
828 }
829
830 void AOTCodeReader::set_read_position(uint pos) {
831 if (pos == _read_position) {
832 return;
833 }
834 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
835 _read_position = pos;
836 }
837
838 bool AOTCodeCache::set_write_position(uint pos) {
839 if (pos == _write_position) {
840 return true;
841 }
842 if (_store_size < _write_position) {
843 _store_size = _write_position; // Adjust during write
844 }
845 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
846 _write_position = pos;
847 return true;
848 }
849
850 static char align_buffer[256] = { 0 };
851
852 bool AOTCodeCache::align_write() {
853 // We are not executing code from cache - we copy it by bytes first.
854 // No need for big alignment (or at all).
855 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
856 if (padding == DATA_ALIGNMENT) {
857 return true;
858 }
859 uint n = write_bytes((const void*)&align_buffer, padding);
860 if (n != padding) {
861 return false;
862 }
863 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
864 return true;
865 }
866
867 // Check to see if AOT code cache has required space to store "nbytes" of data
868 address AOTCodeCache::reserve_bytes(uint nbytes) {
869 assert(for_dump(), "Code Cache file is not created");
870 uint new_position = _write_position + nbytes;
871 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
872 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
873 nbytes, _write_position);
874 set_failed();
875 report_store_failure();
876 return nullptr;
877 }
878 address buffer = (address)(_store_buffer + _write_position);
879 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
880 _write_position += nbytes;
881 if (_store_size < _write_position) {
882 _store_size = _write_position;
883 }
884 return buffer;
885 }
886
887 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
888 assert(for_dump(), "Code Cache file is not created");
889 if (nbytes == 0) {
890 return 0;
891 }
892 uint new_position = _write_position + nbytes;
893 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
894 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
895 nbytes, _write_position);
896 set_failed();
897 report_store_failure();
898 return 0;
899 }
900 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
901 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
902 _write_position += nbytes;
903 if (_store_size < _write_position) {
904 _store_size = _write_position;
905 }
906 return nbytes;
907 }
908
909 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
910 return (void*)(cache->add_entry());
911 }
912
913 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
914 if (entry->kind() == kind) {
915 assert(entry->id() == id, "sanity");
916 return true; // Found
917 }
918 return false;
919 }
920
921 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
922 assert(_for_use, "sanity");
923 uint count = _load_header->entries_count();
924 if (_load_entries == nullptr) {
925 // Read it
926 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
927 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
928 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
929 }
930 // Binary search
931 int l = 0;
932 int h = count - 1;
933 while (l <= h) {
934 int mid = (l + h) >> 1;
935 int ix = mid * 2;
936 uint is = _search_entries[ix];
937 if (is == id) {
938 int index = _search_entries[ix + 1];
939 AOTCodeEntry* entry = &(_load_entries[index]);
940 if (check_entry(kind, id, entry)) {
941 return entry; // Found
942 }
943 // Linear search around to handle id collission
944 for (int i = mid - 1; i >= l; i--) { // search back
945 ix = i * 2;
946 is = _search_entries[ix];
947 if (is != id) {
948 break;
949 }
950 index = _search_entries[ix + 1];
951 AOTCodeEntry* entry = &(_load_entries[index]);
952 if (check_entry(kind, id, entry)) {
953 return entry; // Found
954 }
955 }
956 for (int i = mid + 1; i <= h; i++) { // search forward
957 ix = i * 2;
958 is = _search_entries[ix];
959 if (is != id) {
960 break;
961 }
962 index = _search_entries[ix + 1];
963 AOTCodeEntry* entry = &(_load_entries[index]);
964 if (check_entry(kind, id, entry)) {
965 return entry; // Found
966 }
967 }
968 break; // Not found match
969 } else if (is < id) {
970 l = mid + 1;
971 } else {
972 h = mid - 1;
973 }
974 }
975 return nullptr;
976 }
977
978 extern "C" {
979 static int uint_cmp(const void *i, const void *j) {
980 uint a = *(uint *)i;
981 uint b = *(uint *)j;
982 return a > b ? 1 : a < b ? -1 : 0;
983 }
984 }
985
986 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
987 uint* size_ptr = (uint *)buffer;
988 *size_ptr = buffer_size;
989 buffer += sizeof(uint);
990
991 VM_Version::store_cpu_features(buffer);
992 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
993 buffer += buffer_size;
994 buffer = align_up(buffer, DATA_ALIGNMENT);
995 }
996
997 bool AOTCodeCache::finish_write() {
998 if (!align_write()) {
999 return false;
1000 }
1001 uint strings_offset = _write_position;
1002 int strings_count = store_strings();
1003 if (strings_count < 0) {
1004 return false;
1005 }
1006 if (!align_write()) {
1007 return false;
1008 }
1009 uint strings_size = _write_position - strings_offset;
1010
1011 uint entries_count = 0; // Number of entrant (useful) code entries
1012 uint entries_offset = _write_position;
1013
1014 uint store_count = _store_entries_cnt;
1015 if (store_count > 0) {
1016 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1017 uint code_count = store_count;
1018 uint search_count = code_count * 2;
1019 uint search_size = search_count * sizeof(uint);
1020 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1021 // _write_position includes size of code and strings
1022 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1023 uint cpu_features_size = VM_Version::cpu_features_size();
1024 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1025 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
1026 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1027 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1028
1029 // Allocate in AOT Cache buffer
1030 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1031 char* start = align_up(buffer, DATA_ALIGNMENT);
1032 char* current = start + header_size; // Skip header
1033
1034 uint cpu_features_offset = current - start;
1035 store_cpu_features(current, cpu_features_size);
1036 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1037 assert(current < start + total_size, "sanity check");
1038
1039 // Create ordered search table for entries [id, index];
1040 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1041
1042 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1043 uint adapters_count = 0;
1044 uint shared_blobs_count = 0;
1045 uint C1_blobs_count = 0;
1046 uint C2_blobs_count = 0;
1047 uint max_size = 0;
1048 // AOTCodeEntry entries were allocated in reverse in store buffer.
1049 // Process them in reverse order to cache first code first.
1050 for (int i = store_count - 1; i >= 0; i--) {
1051 entries_address[i].set_next(nullptr); // clear pointers before storing data
1052 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1053 if (size > max_size) {
1054 max_size = size;
1055 }
1056 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1057 entries_address[i].set_offset(current - start); // New offset
1058 current += size;
1059 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
1060 if (n != sizeof(AOTCodeEntry)) {
1061 FREE_C_HEAP_ARRAY(uint, search);
1062 return false;
1063 }
1064 search[entries_count*2 + 0] = entries_address[i].id();
1065 search[entries_count*2 + 1] = entries_count;
1066 entries_count++;
1067 AOTCodeEntry::Kind kind = entries_address[i].kind();
1068 if (kind == AOTCodeEntry::Adapter) {
1069 adapters_count++;
1070 } else if (kind == AOTCodeEntry::SharedBlob) {
1071 shared_blobs_count++;
1072 } else if (kind == AOTCodeEntry::C1Blob) {
1073 C1_blobs_count++;
1074 } else if (kind == AOTCodeEntry::C2Blob) {
1075 C2_blobs_count++;
1076 }
1077 }
1078 if (entries_count == 0) {
1079 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
1080 FREE_C_HEAP_ARRAY(uint, search);
1081 return true; // Nothing to write
1082 }
1083 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
1084 // Write strings
1085 if (strings_count > 0) {
1086 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1087 strings_offset = (current - start); // New offset
1088 current += strings_size;
1089 }
1090
1091 uint new_entries_offset = (current - start); // New offset
1092 // Sort and store search table
1093 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1094 search_size = 2 * entries_count * sizeof(uint);
1095 copy_bytes((const char*)search, (address)current, search_size);
1096 FREE_C_HEAP_ARRAY(uint, search);
1097 current += search_size;
1098
1099 // Write entries
1100 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1101 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1102 current += entries_size;
1103 uint size = (current - start);
1104 assert(size <= total_size, "%d > %d", size , total_size);
1105
1106 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
1107 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
1108 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
1109 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
1110 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1111
1112 // Finalize header
1113 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1114 header->init(size, (uint)strings_count, strings_offset,
1115 entries_count, new_entries_offset,
1116 adapters_count, shared_blobs_count,
1117 C1_blobs_count, C2_blobs_count, cpu_features_offset);
1118
1119 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1120 }
1121 return true;
1122 }
1123
1124 //------------------Store/Load AOT code ----------------------
1125
1126 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1127 AOTCodeCache* cache = open_for_dump();
1128 if (cache == nullptr) {
1129 return false;
1130 }
1131 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1132
1133 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1134 return false;
1135 }
1136 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1137 return false;
1138 }
1139 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1140
1141 #ifdef ASSERT
1142 LogStreamHandle(Trace, aot, codecache, stubs) log;
1143 if (log.is_enabled()) {
1144 FlagSetting fs(PrintRelocations, true);
1145 blob.print_on(&log);
1146 }
1147 #endif
1148 // we need to take a lock to prevent race between compiler threads generating AOT code
1149 // and the main thread generating adapter
1150 MutexLocker ml(Compile_lock);
1151 if (!is_on()) {
1152 return false; // AOT code cache was already dumped and closed.
1153 }
1154 if (!cache->align_write()) {
1155 return false;
1156 }
1157 uint entry_position = cache->_write_position;
1158
1159 // Write name
1160 uint name_offset = cache->_write_position - entry_position;
1161 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1162 uint n = cache->write_bytes(name, name_size);
1163 if (n != name_size) {
1164 return false;
1165 }
1166
1167 // Write CodeBlob
1168 if (!cache->align_write()) {
1169 return false;
1170 }
1171 uint blob_offset = cache->_write_position - entry_position;
1172 address archive_buffer = cache->reserve_bytes(blob.size());
1173 if (archive_buffer == nullptr) {
1174 return false;
1175 }
1176 CodeBlob::archive_blob(&blob, archive_buffer);
1177
1178 uint reloc_data_size = blob.relocation_size();
1179 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1180 if (n != reloc_data_size) {
1181 return false;
1182 }
1183
1184 bool has_oop_maps = false;
1185 if (blob.oop_maps() != nullptr) {
1186 if (!cache->write_oop_map_set(blob)) {
1187 return false;
1188 }
1189 has_oop_maps = true;
1190 }
1191
1192 if (!cache->write_relocations(blob)) {
1193 if (!cache->failed()) {
1194 // We may miss an address in AOT table - skip this code blob.
1195 cache->set_write_position(entry_position);
1196 }
1197 return false;
1198 }
1199
1200 #ifndef PRODUCT
1201 // Write asm remarks after relocation info
1202 if (!cache->write_asm_remarks(blob)) {
1203 return false;
1204 }
1205 if (!cache->write_dbg_strings(blob)) {
1206 return false;
1207 }
1208 #endif /* PRODUCT */
1209
1210 uint entry_size = cache->_write_position - entry_position;
1211 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1212 entry_position, entry_size, name_offset, name_size,
1213 blob_offset, has_oop_maps, blob.content_begin());
1214 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1215 return true;
1216 }
1217
1218 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1219 assert(AOTCodeEntry::is_blob(entry_kind),
1220 "wrong entry kind for blob id %s", StubInfo::name(id));
1221 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1222 }
1223
1224 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1225 AOTCodeCache* cache = open_for_use();
1226 if (cache == nullptr) {
1227 return nullptr;
1228 }
1229 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1230
1231 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1232 return nullptr;
1233 }
1234 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1235 return nullptr;
1236 }
1237 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1238
1239 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1240 if (entry == nullptr) {
1241 return nullptr;
1242 }
1243 AOTCodeReader reader(cache, entry);
1244 CodeBlob* blob = reader.compile_code_blob(name);
1245
1246 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1247 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1248 return blob;
1249 }
1250
1251 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1252 assert(AOTCodeEntry::is_blob(entry_kind),
1253 "wrong entry kind for blob id %s", StubInfo::name(id));
1254 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1255 }
1256
1257 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1258 uint entry_position = _entry->offset();
1259
1260 // Read name
1261 uint name_offset = entry_position + _entry->name_offset();
1262 uint name_size = _entry->name_size(); // Includes '/0'
1263 const char* stored_name = addr(name_offset);
1264
1265 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1266 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1267 stored_name, name);
1268 set_lookup_failed(); // Skip this blob
1269 return nullptr;
1270 }
1271 _name = stored_name;
1272
1273 // Read archived code blob
1274 uint offset = entry_position + _entry->blob_offset();
1275 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1276 offset += archived_blob->size();
1277
1278 _reloc_data = (address)addr(offset);
1279 offset += archived_blob->relocation_size();
1280 set_read_position(offset);
1281
1282 if (_entry->has_oop_maps()) {
1283 _oop_maps = read_oop_map_set();
1284 }
1285
1286 // CodeBlob::restore() calls AOTCodeReader::restore()
1287 CodeBlob* code_blob = CodeBlob::create(archived_blob, this);
1288
1289 if (code_blob == nullptr) { // no space left in CodeCache
1290 return nullptr;
1291 }
1292
1293 #ifdef ASSERT
1294 LogStreamHandle(Trace, aot, codecache, stubs) log;
1295 if (log.is_enabled()) {
1296 FlagSetting fs(PrintRelocations, true);
1297 code_blob->print_on(&log);
1298 }
1299 #endif
1300 return code_blob;
1301 }
1302
1303 void AOTCodeReader::restore(CodeBlob* code_blob) {
1304 precond(AOTCodeCache::is_on_for_use());
1305 precond(_name != nullptr);
1306 precond(_reloc_data != nullptr);
1307
1308 code_blob->set_name(_name);
1309 code_blob->restore_mutable_data(_reloc_data);
1310 code_blob->set_oop_maps(_oop_maps);
1311
1312 fix_relocations(code_blob);
1313
1314 #ifndef PRODUCT
1315 code_blob->asm_remarks().init();
1316 read_asm_remarks(code_blob->asm_remarks());
1317 code_blob->dbg_strings().init();
1318 read_dbg_strings(code_blob->dbg_strings());
1319 #endif // PRODUCT
1320 }
1321
1322 // ------------ process code and data --------------
1323
1324 // Can't use -1. It is valid value for jump to iteself destination
1325 // used by static call stub: see NativeJump::jump_destination().
1326 #define BAD_ADDRESS_ID -2
1327
1328 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
1329 GrowableArray<uint> reloc_data;
1330 RelocIterator iter(&code_blob);
1331 LogStreamHandle(Trace, aot, codecache, reloc) log;
1332 while (iter.next()) {
1333 int idx = reloc_data.append(0); // default value
1334 switch (iter.type()) {
1335 case relocInfo::none:
1336 break;
1337 case relocInfo::runtime_call_type: {
1338 // Record offset of runtime destination
1339 CallRelocation* r = (CallRelocation*)iter.reloc();
1340 address dest = r->destination();
1341 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1342 dest = (address)-1; // do nothing in this case when loading this relocation
1343 }
1344 int id = _table->id_for_address(dest, iter, &code_blob);
1345 if (id == BAD_ADDRESS_ID) {
1346 return false;
1347 }
1348 reloc_data.at_put(idx, id);
1349 break;
1350 }
1351 case relocInfo::runtime_call_w_cp_type:
1352 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1353 return false;
1354 case relocInfo::external_word_type: {
1355 // Record offset of runtime target
1356 address target = ((external_word_Relocation*)iter.reloc())->target();
1357 int id = _table->id_for_address(target, iter, &code_blob);
1358 if (id == BAD_ADDRESS_ID) {
1359 return false;
1360 }
1361 reloc_data.at_put(idx, id);
1362 break;
1363 }
1364 case relocInfo::internal_word_type:
1365 break;
1366 case relocInfo::section_word_type:
1367 break;
1368 case relocInfo::post_call_nop_type:
1369 break;
1370 default:
1371 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1372 return false;
1373 break;
1374 }
1375 if (log.is_enabled()) {
1376 iter.print_current_on(&log);
1377 }
1378 }
1379
1380 // Write additional relocation data: uint per relocation
1381 // Write the count first
1382 int count = reloc_data.length();
1383 write_bytes(&count, sizeof(int));
1384 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1385 iter != reloc_data.end(); ++iter) {
1386 uint value = *iter;
1387 int n = write_bytes(&value, sizeof(uint));
1388 if (n != sizeof(uint)) {
1389 return false;
1390 }
1391 }
1392 return true;
1393 }
1394
1395 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1396 LogStreamHandle(Trace, aot, reloc) log;
1397 uint offset = read_position();
1398 int count = *(int*)addr(offset);
1399 offset += sizeof(int);
1400 if (log.is_enabled()) {
1401 log.print_cr("======== extra relocations count=%d", count);
1402 }
1403 uint* reloc_data = (uint*)addr(offset);
1404 offset += (count * sizeof(uint));
1405 set_read_position(offset);
1406
1407 RelocIterator iter(code_blob);
1408 int j = 0;
1409 while (iter.next()) {
1410 switch (iter.type()) {
1411 case relocInfo::none:
1412 break;
1413 case relocInfo::runtime_call_type: {
1414 address dest = _cache->address_for_id(reloc_data[j]);
1415 if (dest != (address)-1) {
1416 ((CallRelocation*)iter.reloc())->set_destination(dest);
1417 }
1418 break;
1419 }
1420 case relocInfo::runtime_call_w_cp_type:
1421 // this relocation should not be in cache (see write_relocations)
1422 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1423 break;
1424 case relocInfo::external_word_type: {
1425 address target = _cache->address_for_id(reloc_data[j]);
1426 // Add external address to global table
1427 int index = ExternalsRecorder::find_index(target);
1428 // Update index in relocation
1429 Relocation::add_jint(iter.data(), index);
1430 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1431 assert(reloc->target() == target, "sanity");
1432 reloc->set_value(target); // Patch address in the code
1433 break;
1434 }
1435 case relocInfo::internal_word_type: {
1436 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1437 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1438 break;
1439 }
1440 case relocInfo::section_word_type: {
1441 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1442 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1443 break;
1444 }
1445 case relocInfo::post_call_nop_type:
1446 break;
1447 default:
1448 assert(false,"relocation %d unimplemented", (int)iter.type());
1449 break;
1450 }
1451 if (log.is_enabled()) {
1452 iter.print_current_on(&log);
1453 }
1454 j++;
1455 }
1456 assert(j == count, "sanity");
1457 }
1458
1459 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1460 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1461 int oopmaps_size = oopmaps->nr_of_bytes();
1462 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1463 return false;
1464 }
1465 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1466 if (n != (uint)oopmaps->nr_of_bytes()) {
1467 return false;
1468 }
1469 return true;
1470 }
1471
1472 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1473 uint offset = read_position();
1474 int size = *(int *)addr(offset);
1475 offset += sizeof(int);
1476 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1477 offset += size;
1478 set_read_position(offset);
1479 return oopmaps;
1480 }
1481
1482 #ifndef PRODUCT
1483 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1484 // Write asm remarks
1485 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1486 if (count_ptr == nullptr) {
1487 return false;
1488 }
1489 uint count = 0;
1490 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1491 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1492 uint n = write_bytes(&offset, sizeof(uint));
1493 if (n != sizeof(uint)) {
1494 return false;
1495 }
1496 const char* cstr = add_C_string(str);
1497 int id = _table->id_for_C_string((address)cstr);
1498 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1499 n = write_bytes(&id, sizeof(int));
1500 if (n != sizeof(int)) {
1501 return false;
1502 }
1503 count += 1;
1504 return true;
1505 });
1506 *count_ptr = count;
1507 return result;
1508 }
1509
1510 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1511 // Read asm remarks
1512 uint offset = read_position();
1513 uint count = *(uint *)addr(offset);
1514 offset += sizeof(uint);
1515 for (uint i = 0; i < count; i++) {
1516 uint remark_offset = *(uint *)addr(offset);
1517 offset += sizeof(uint);
1518 int remark_string_id = *(uint *)addr(offset);
1519 offset += sizeof(int);
1520 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1521 asm_remarks.insert(remark_offset, remark);
1522 }
1523 set_read_position(offset);
1524 }
1525
1526 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1527 // Write dbg strings
1528 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1529 if (count_ptr == nullptr) {
1530 return false;
1531 }
1532 uint count = 0;
1533 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1534 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1535 const char* cstr = add_C_string(str);
1536 int id = _table->id_for_C_string((address)cstr);
1537 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1538 uint n = write_bytes(&id, sizeof(int));
1539 if (n != sizeof(int)) {
1540 return false;
1541 }
1542 count += 1;
1543 return true;
1544 });
1545 *count_ptr = count;
1546 return result;
1547 }
1548
1549 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1550 // Read dbg strings
1551 uint offset = read_position();
1552 uint count = *(uint *)addr(offset);
1553 offset += sizeof(uint);
1554 for (uint i = 0; i < count; i++) {
1555 int string_id = *(uint *)addr(offset);
1556 offset += sizeof(int);
1557 const char* str = (const char*)_cache->address_for_C_string(string_id);
1558 dbg_strings.insert(str);
1559 }
1560 set_read_position(offset);
1561 }
1562 #endif // PRODUCT
1563
1564 //======================= AOTCodeAddressTable ===============
1565
1566 // address table ids for generated routines, external addresses and C
1567 // string addresses are partitioned into positive integer ranges
1568 // defined by the following positive base and max values
1569 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1570 // [_blobs_base, _blobs_base + _blobs_max -1],
1571 // ...
1572 // [_c_str_base, _c_str_base + _c_str_max -1],
1573
1574 #define _extrs_max 100
1575 #define _stubs_max 3
1576
1577 #define _shared_blobs_max 20
1578 #define _C1_blobs_max 10
1579 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1580 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1581
1582 #define _extrs_base 0
1583 #define _stubs_base (_extrs_base + _extrs_max)
1584 #define _shared_blobs_base (_stubs_base + _stubs_max)
1585 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1586 #define _blobs_end (_shared_blobs_base + _blobs_max)
1587
1588 #define SET_ADDRESS(type, addr) \
1589 { \
1590 type##_addr[type##_length++] = (address) (addr); \
1591 assert(type##_length <= type##_max, "increase size"); \
1592 }
1593
1594 static bool initializing_extrs = false;
1595
1596 void AOTCodeAddressTable::init_extrs() {
1597 if (_extrs_complete || initializing_extrs) return; // Done already
1598
1599 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1600
1601 initializing_extrs = true;
1602 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1603
1604 _extrs_length = 0;
1605
1606 // Record addresses of VM runtime methods
1607 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1608 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1609 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1610 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1611 SET_ADDRESS(_extrs, SharedRuntime::allocate_inline_types);
1612 #if defined(AARCH64) && !defined(ZERO)
1613 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1614 #endif
1615 {
1616 // Required by Shared blobs
1617 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1618 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1619 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1620 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1621 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1622 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1623 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1624 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1625 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1626 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1627 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1628 }
1629
1630 #ifdef COMPILER1
1631 {
1632 // Required by C1 blobs
1633 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1634 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1635 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1636 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1637 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1638 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1639 SET_ADDRESS(_extrs, Runtime1::new_instance);
1640 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1641 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1642 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1643 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1644 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1645 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1646 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1647 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1648 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1649 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1650 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1651 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1652 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1653 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1654 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1655 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1656 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1657 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1658 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1659 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1660 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1661 SET_ADDRESS(_extrs, Runtime1::new_null_free_array);
1662 SET_ADDRESS(_extrs, Runtime1::load_flat_array);
1663 SET_ADDRESS(_extrs, Runtime1::store_flat_array);
1664 SET_ADDRESS(_extrs, Runtime1::substitutability_check);
1665 SET_ADDRESS(_extrs, Runtime1::buffer_inline_args);
1666 SET_ADDRESS(_extrs, Runtime1::buffer_inline_args_no_receiver);
1667 SET_ADDRESS(_extrs, Runtime1::throw_identity_exception);
1668 SET_ADDRESS(_extrs, Runtime1::throw_illegal_monitor_state_exception);
1669 SET_ADDRESS(_extrs, Thread::current);
1670 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1671 #ifndef PRODUCT
1672 SET_ADDRESS(_extrs, os::breakpoint);
1673 #endif
1674 }
1675 #endif
1676
1677 #ifdef COMPILER2
1678 {
1679 // Required by C2 blobs
1680 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1681 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1682 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1683 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1684 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1685 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1686 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1687 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1688 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1689 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1690 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1691 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1692 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1693 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1694 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1695 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1696 SET_ADDRESS(_extrs, OptoRuntime::load_unknown_inline_C);
1697 SET_ADDRESS(_extrs, OptoRuntime::store_unknown_inline_C);
1698 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1699 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1700 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1701 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1702 #if defined(AARCH64)
1703 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1704 #endif // AARCH64
1705 }
1706 #endif // COMPILER2
1707
1708 #if INCLUDE_G1GC
1709 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1710 #endif
1711 #if INCLUDE_SHENANDOAHGC
1712 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1713 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1714 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1715 #endif
1716 #if INCLUDE_ZGC
1717 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1718 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1719 #if defined(AMD64)
1720 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1721 #endif
1722 #endif
1723 #ifndef ZERO
1724 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1725 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1726 #endif
1727 #endif // ZERO
1728
1729 if (UseCompressedOops) {
1730 SET_ADDRESS(_extrs, CompressedOops::base_addr());
1731 }
1732
1733 // addresses of fields in AOT runtime constants area
1734 address* p = AOTRuntimeConstants::field_addresses_list();
1735 while (*p != nullptr) {
1736 SET_ADDRESS(_extrs, *p++);
1737 }
1738
1739 _extrs_complete = true;
1740 log_debug(aot, codecache, init)("External addresses recorded");
1741 }
1742
1743 static bool initializing_early_stubs = false;
1744
1745 void AOTCodeAddressTable::init_early_stubs() {
1746 if (_complete || initializing_early_stubs) return; // Done already
1747 initializing_early_stubs = true;
1748 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1749 _stubs_length = 0;
1750 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1751
1752 {
1753 // Required by C1 blobs
1754 #if defined(AMD64) && !defined(ZERO)
1755 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1756 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1757 #endif // AMD64
1758 }
1759
1760 _early_stubs_complete = true;
1761 log_info(aot, codecache, init)("Early stubs recorded");
1762 }
1763
1764 static bool initializing_shared_blobs = false;
1765
1766 void AOTCodeAddressTable::init_shared_blobs() {
1767 if (_complete || initializing_shared_blobs) return; // Done already
1768 initializing_shared_blobs = true;
1769 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1770
1771 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1772 _shared_blobs_addr = blobs_addr;
1773 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1774
1775 _shared_blobs_length = 0;
1776 _C1_blobs_length = 0;
1777
1778 // clear the address table
1779 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1780
1781 // Record addresses of generated code blobs
1782 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1783 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1784 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1785 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1786 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1787 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1788 #if INCLUDE_JVMCI
1789 if (EnableJVMCI) {
1790 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1791 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1792 }
1793 #endif
1794
1795 _shared_blobs_complete = true;
1796 log_debug(aot, codecache, init)("Early shared blobs recorded");
1797 _complete = true;
1798 }
1799
1800 void AOTCodeAddressTable::init_early_c1() {
1801 #ifdef COMPILER1
1802 // Runtime1 Blobs
1803 StubId id = StubInfo::stub_base(StubGroup::C1);
1804 // include forward_exception in range we publish
1805 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1806 for (; id != limit; id = StubInfo::next(id)) {
1807 if (Runtime1::blob_for(id) == nullptr) {
1808 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1809 continue;
1810 }
1811 if (Runtime1::entry_for(id) == nullptr) {
1812 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1813 continue;
1814 }
1815 address entry = Runtime1::entry_for(id);
1816 SET_ADDRESS(_C1_blobs, entry);
1817 }
1818 #endif // COMPILER1
1819 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1820 _early_c1_complete = true;
1821 }
1822
1823 #undef SET_ADDRESS
1824
1825 #ifdef PRODUCT
1826 #define MAX_STR_COUNT 200
1827 #else
1828 #define MAX_STR_COUNT 500
1829 #endif
1830 #define _c_str_max MAX_STR_COUNT
1831 static const int _c_str_base = _all_max;
1832
1833 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1834 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1835 static int _C_strings_count = 0;
1836 static int _C_strings_s[MAX_STR_COUNT] = {0};
1837 static int _C_strings_id[MAX_STR_COUNT] = {0};
1838 static int _C_strings_used = 0;
1839
1840 void AOTCodeCache::load_strings() {
1841 uint strings_count = _load_header->strings_count();
1842 if (strings_count == 0) {
1843 return;
1844 }
1845 uint strings_offset = _load_header->strings_offset();
1846 uint* string_lengths = (uint*)addr(strings_offset);
1847 strings_offset += (strings_count * sizeof(uint));
1848 uint strings_size = _load_header->entries_offset() - strings_offset;
1849 // We have to keep cached strings longer than _cache buffer
1850 // because they are refernced from compiled code which may
1851 // still be executed on VM exit after _cache is freed.
1852 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1853 memcpy(p, addr(strings_offset), strings_size);
1854 _C_strings_buf = p;
1855 assert(strings_count <= MAX_STR_COUNT, "sanity");
1856 for (uint i = 0; i < strings_count; i++) {
1857 _C_strings[i] = p;
1858 uint len = string_lengths[i];
1859 _C_strings_s[i] = i;
1860 _C_strings_id[i] = i;
1861 p += len;
1862 }
1863 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1864 _C_strings_count = strings_count;
1865 _C_strings_used = strings_count;
1866 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1867 }
1868
1869 int AOTCodeCache::store_strings() {
1870 if (_C_strings_used > 0) {
1871 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1872 uint offset = _write_position;
1873 uint length = 0;
1874 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1875 if (lengths == nullptr) {
1876 return -1;
1877 }
1878 for (int i = 0; i < _C_strings_used; i++) {
1879 const char* str = _C_strings[_C_strings_s[i]];
1880 uint len = (uint)strlen(str) + 1;
1881 length += len;
1882 assert(len < 1000, "big string: %s", str);
1883 lengths[i] = len;
1884 uint n = write_bytes(str, len);
1885 if (n != len) {
1886 return -1;
1887 }
1888 }
1889 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1890 _C_strings_used, length, offset);
1891 }
1892 return _C_strings_used;
1893 }
1894
1895 const char* AOTCodeCache::add_C_string(const char* str) {
1896 if (is_on_for_dump() && str != nullptr) {
1897 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1898 AOTCodeAddressTable* table = addr_table();
1899 if (table != nullptr) {
1900 return table->add_C_string(str);
1901 }
1902 }
1903 return str;
1904 }
1905
1906 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1907 if (_extrs_complete) {
1908 // Check previous strings address
1909 for (int i = 0; i < _C_strings_count; i++) {
1910 if (_C_strings_in[i] == str) {
1911 return _C_strings[i]; // Found previous one - return our duplicate
1912 } else if (strcmp(_C_strings[i], str) == 0) {
1913 return _C_strings[i];
1914 }
1915 }
1916 // Add new one
1917 if (_C_strings_count < MAX_STR_COUNT) {
1918 // Passed in string can be freed and used space become inaccessible.
1919 // Keep original address but duplicate string for future compare.
1920 _C_strings_id[_C_strings_count] = -1; // Init
1921 _C_strings_in[_C_strings_count] = str;
1922 const char* dup = os::strdup(str);
1923 _C_strings[_C_strings_count++] = dup;
1924 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1925 return dup;
1926 } else {
1927 assert(false, "Number of C strings >= MAX_STR_COUNT");
1928 }
1929 }
1930 return str;
1931 }
1932
1933 int AOTCodeAddressTable::id_for_C_string(address str) {
1934 if (str == nullptr) {
1935 return -1;
1936 }
1937 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1938 for (int i = 0; i < _C_strings_count; i++) {
1939 if (_C_strings[i] == (const char*)str) { // found
1940 int id = _C_strings_id[i];
1941 if (id >= 0) {
1942 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1943 return id; // Found recorded
1944 }
1945 // Not found in recorded, add new
1946 id = _C_strings_used++;
1947 _C_strings_s[id] = i;
1948 _C_strings_id[i] = id;
1949 return id;
1950 }
1951 }
1952 return -1;
1953 }
1954
1955 address AOTCodeAddressTable::address_for_C_string(int idx) {
1956 assert(idx < _C_strings_count, "sanity");
1957 return (address)_C_strings[idx];
1958 }
1959
1960 static int search_address(address addr, address* table, uint length) {
1961 for (int i = 0; i < (int)length; i++) {
1962 if (table[i] == addr) {
1963 return i;
1964 }
1965 }
1966 return BAD_ADDRESS_ID;
1967 }
1968
1969 address AOTCodeAddressTable::address_for_id(int idx) {
1970 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1971 if (idx == -1) {
1972 return (address)-1;
1973 }
1974 uint id = (uint)idx;
1975 // special case for symbols based relative to os::init
1976 if (id > (_c_str_base + _c_str_max)) {
1977 return (address)os::init + idx;
1978 }
1979 if (idx < 0) {
1980 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1981 return nullptr;
1982 }
1983 // no need to compare unsigned id against 0
1984 if (/* id >= _extrs_base && */ id < _extrs_length) {
1985 return _extrs_addr[id - _extrs_base];
1986 }
1987 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1988 return _stubs_addr[id - _stubs_base];
1989 }
1990 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1991 return _shared_blobs_addr[id - _shared_blobs_base];
1992 }
1993 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1994 return _C1_blobs_addr[id - _C1_blobs_base];
1995 }
1996 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1997 return address_for_C_string(id - _c_str_base);
1998 }
1999 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2000 return nullptr;
2001 }
2002
2003 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
2004 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
2005 int id = -1;
2006 if (addr == (address)-1) { // Static call stub has jump to itself
2007 return id;
2008 }
2009 // Check card_table_base address first since it can point to any address
2010 BarrierSet* bs = BarrierSet::barrier_set();
2011 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
2012 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
2013
2014 // Seach for C string
2015 id = id_for_C_string(addr);
2016 if (id >= 0) {
2017 return id + _c_str_base;
2018 }
2019 if (StubRoutines::contains(addr)) {
2020 // Search in stubs
2021 id = search_address(addr, _stubs_addr, _stubs_length);
2022 if (id < 0) {
2023 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
2024 if (desc == nullptr) {
2025 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
2026 }
2027 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
2028 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
2029 } else {
2030 return id + _stubs_base;
2031 }
2032 } else {
2033 CodeBlob* cb = CodeCache::find_blob(addr);
2034 if (cb != nullptr) {
2035 // Search in code blobs
2036 int id_base = _shared_blobs_base;
2037 id = search_address(addr, _shared_blobs_addr, _blobs_max);
2038 if (id < 0) {
2039 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
2040 } else {
2041 return id_base + id;
2042 }
2043 } else {
2044 // Search in runtime functions
2045 id = search_address(addr, _extrs_addr, _extrs_length);
2046 if (id < 0) {
2047 ResourceMark rm;
2048 const int buflen = 1024;
2049 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
2050 int offset = 0;
2051 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
2052 if (offset > 0) {
2053 // Could be address of C string
2054 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
2055 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
2056 p2i(addr), dist, (const char*)addr);
2057 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
2058 return dist;
2059 }
2060 #ifdef ASSERT
2061 reloc.print_current_on(tty);
2062 code_blob->print_on(tty);
2063 code_blob->print_code_on(tty);
2064 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
2065 #endif
2066 } else {
2067 #ifdef ASSERT
2068 reloc.print_current_on(tty);
2069 code_blob->print_on(tty);
2070 code_blob->print_code_on(tty);
2071 os::find(addr, tty);
2072 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
2073 #endif
2074 }
2075 } else {
2076 return _extrs_base + id;
2077 }
2078 }
2079 }
2080 return id;
2081 }
2082
2083 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
2084
2085 void AOTRuntimeConstants::initialize_from_runtime() {
2086 BarrierSet* bs = BarrierSet::barrier_set();
2087 address card_table_base = nullptr;
2088 uint grain_shift = 0;
2089 #if INCLUDE_G1GC
2090 if (bs->is_a(BarrierSet::G1BarrierSet)) {
2091 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
2092 } else
2093 #endif
2094 #if INCLUDE_SHENANDOAHGC
2095 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
2096 grain_shift = 0;
2097 } else
2098 #endif
2099 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
2100 CardTable::CardValue* base = ci_card_table_address_const();
2101 assert(base != nullptr, "unexpected byte_map_base");
2102 card_table_base = base;
2103 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
2104 grain_shift = ctbs->grain_shift();
2105 }
2106 _aot_runtime_constants._card_table_base = card_table_base;
2107 _aot_runtime_constants._grain_shift = grain_shift;
2108 }
2109
2110 address AOTRuntimeConstants::_field_addresses_list[] = {
2111 ((address)&_aot_runtime_constants._card_table_base),
2112 ((address)&_aot_runtime_constants._grain_shift),
2113 nullptr
2114 };
2115
2116 address AOTRuntimeConstants::card_table_base_address() {
2117 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
2118 return (address)&_aot_runtime_constants._card_table_base;
2119 }
2120
2121 // This is called after initialize() but before init2()
2122 // and _cache is not set yet.
2123 void AOTCodeCache::print_on(outputStream* st) {
2124 if (opened_cache != nullptr && opened_cache->for_use()) {
2125 st->print_cr("\nAOT Code Cache");
2126 uint count = opened_cache->_load_header->entries_count();
2127 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
2128 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
2129
2130 for (uint i = 0; i < count; i++) {
2131 // Use search_entries[] to order ouput
2132 int index = search_entries[2*i + 1];
2133 AOTCodeEntry* entry = &(load_entries[index]);
2134
2135 uint entry_position = entry->offset();
2136 uint name_offset = entry->name_offset() + entry_position;
2137 const char* saved_name = opened_cache->addr(name_offset);
2138
2139 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
2140 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
2141 }
2142 }
2143 }