1 /*
   2  * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/aotCacheAccess.hpp"
  28 #include "cds/aotMetaspace.hpp"
  29 #include "cds/cds_globals.hpp"
  30 #include "cds/cdsConfig.hpp"
  31 #include "cds/heapShared.hpp"
  32 #include "ci/ciUtilities.hpp"
  33 #include "classfile/javaAssertions.hpp"
  34 #include "code/aotCodeCache.hpp"
  35 #include "code/codeCache.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/gcConfig.hpp"
  38 #include "logging/logStream.hpp"
  39 #include "memory/memoryReserver.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/flags/flagSetting.hpp"
  42 #include "runtime/globals_extension.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/mutexLocker.hpp"
  45 #include "runtime/os.inline.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubInfo.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "utilities/copy.hpp"
  50 #ifdef COMPILER1
  51 #include "c1/c1_Runtime1.hpp"
  52 #endif
  53 #ifdef COMPILER2
  54 #include "opto/runtime.hpp"
  55 #endif
  56 #if INCLUDE_G1GC
  57 #include "gc/g1/g1BarrierSetRuntime.hpp"
  58 #include "gc/g1/g1HeapRegion.hpp"
  59 #endif
  60 #if INCLUDE_SHENANDOAHGC
  61 #include "gc/shenandoah/shenandoahRuntime.hpp"
  62 #endif
  63 #if INCLUDE_ZGC
  64 #include "gc/z/zBarrierSetRuntime.hpp"
  65 #endif
  66 
  67 #include <errno.h>
  68 #include <sys/stat.h>
  69 
  70 const char* aot_code_entry_kind_name[] = {
  71 #define DECL_KIND_STRING(kind) XSTR(kind),
  72   DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
  73 #undef DECL_KIND_STRING
  74 };
  75 
  76 static void report_load_failure() {
  77   if (AbortVMOnAOTCodeFailure) {
  78     vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
  79   }
  80   log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
  81   AOTCodeCache::disable_caching();
  82 }
  83 
  84 static void report_store_failure() {
  85   if (AbortVMOnAOTCodeFailure) {
  86     tty->print_cr("Unable to create AOT Code Cache.");
  87     vm_abort(false);
  88   }
  89   log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
  90   AOTCodeCache::disable_caching();
  91 }
  92 
  93 // The sequence of AOT code caching flags and parametters settings.
  94 //
  95 // 1. The initial AOT code caching flags setting is done
  96 // during call to CDSConfig::check_vm_args_consistency().
  97 //
  98 // 2. The earliest AOT code state check done in compilationPolicy_init()
  99 // where we set number of compiler threads for AOT assembly phase.
 100 //
 101 // 3. We determine presence of AOT code in AOT Cache in
 102 // AOTMetaspace::open_static_archive() which is calles
 103 // after compilationPolicy_init() but before codeCache_init().
 104 //
 105 // 4. AOTCodeCache::initialize() is called during universe_init()
 106 // and does final AOT state and flags settings.
 107 //
 108 // 5. Finally AOTCodeCache::init2() is called after universe_init()
 109 // when all GC settings are finalized.
 110 
 111 // Next methods determine which action we do with AOT code depending
 112 // on phase of AOT process: assembly or production.
 113 
 114 bool AOTCodeCache::is_dumping_adapter() {
 115   return AOTAdapterCaching && is_on_for_dump();
 116 }
 117 
 118 bool AOTCodeCache::is_using_adapter()   {
 119   return AOTAdapterCaching && is_on_for_use();
 120 }
 121 
 122 bool AOTCodeCache::is_dumping_stub() {
 123   return AOTStubCaching && is_on_for_dump();
 124 }
 125 
 126 bool AOTCodeCache::is_using_stub()   {
 127   return AOTStubCaching && is_on_for_use();
 128 }
 129 
 130 // Next methods could be called regardless AOT code cache status.
 131 // Initially they are called during flags parsing and finilized
 132 // in AOTCodeCache::initialize().
 133 void AOTCodeCache::enable_caching() {
 134   FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
 135   FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
 136 }
 137 
 138 void AOTCodeCache::disable_caching() {
 139   FLAG_SET_ERGO(AOTStubCaching, false);
 140   FLAG_SET_ERGO(AOTAdapterCaching, false);
 141 }
 142 
 143 bool AOTCodeCache::is_caching_enabled() {
 144   return AOTStubCaching || AOTAdapterCaching;
 145 }
 146 
 147 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
 148   assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
 149   // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
 150   // becasue both id and kind are used to find an entry, and that combination should be unique
 151   if (kind == AOTCodeEntry::Adapter) {
 152     return id;
 153   } else if (kind == AOTCodeEntry::SharedBlob) {
 154     assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
 155     return id;
 156   } else if (kind == AOTCodeEntry::C1Blob) {
 157     assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
 158     return id;
 159   } else {
 160     // kind must be AOTCodeEntry::C2Blob
 161     assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
 162     return id;
 163   }
 164 }
 165 
 166 static uint _max_aot_code_size = 0;
 167 uint AOTCodeCache::max_aot_code_size() {
 168   return _max_aot_code_size;
 169 }
 170 
 171 // It is called from AOTMetaspace::initialize_shared_spaces()
 172 // which is called from universe_init().
 173 // At this point all AOT class linking seetings are finilized
 174 // and AOT cache is open so we can map AOT code region.
 175 void AOTCodeCache::initialize() {
 176 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
 177   log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
 178   disable_caching();
 179   return;
 180 #else
 181   if (FLAG_IS_DEFAULT(AOTCache)) {
 182     log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
 183     disable_caching();
 184     return; // AOTCache must be specified to dump and use AOT code
 185   }
 186 
 187   // Disable stubs caching until JDK-8357398 is fixed.
 188   FLAG_SET_ERGO(AOTStubCaching, false);
 189 
 190   if (VerifyOops) {
 191     // Disable AOT stubs caching when VerifyOops flag is on.
 192     // Verify oops code generated a lot of C strings which overflow
 193     // AOT C string table (which has fixed size).
 194     // AOT C string table will be reworked later to handle such cases.
 195     //
 196     // Note: AOT adapters are not affected - they don't have oop operations.
 197     log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
 198     FLAG_SET_ERGO(AOTStubCaching, false);
 199   }
 200 
 201   bool is_dumping = false;
 202   bool is_using   = false;
 203   if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
 204     is_dumping = true;
 205     enable_caching();
 206     is_dumping = is_caching_enabled();
 207   } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
 208     enable_caching();
 209     is_using = is_caching_enabled();
 210   } else {
 211     log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
 212     disable_caching();
 213     return; // nothing to do
 214   }
 215   if (!(is_dumping || is_using)) {
 216     disable_caching();
 217     return; // AOT code caching disabled on command line
 218   }
 219   _max_aot_code_size = AOTCodeMaxSize;
 220   if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
 221     if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
 222       _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
 223       log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
 224     }
 225   }
 226   size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
 227   if (is_using && aot_code_size == 0) {
 228     log_info(aot, codecache, init)("AOT Code Cache is empty");
 229     disable_caching();
 230     return;
 231   }
 232   if (!open_cache(is_dumping, is_using)) {
 233     if (is_using) {
 234       report_load_failure();
 235     } else {
 236       report_store_failure();
 237     }
 238     return;
 239   }
 240   if (is_dumping) {
 241     FLAG_SET_DEFAULT(ForceUnreachable, true);
 242   }
 243   FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 244 #endif // defined(AMD64) || defined(AARCH64)
 245 }
 246 
 247 static AOTCodeCache*  opened_cache = nullptr; // Use this until we verify the cache
 248 AOTCodeCache* AOTCodeCache::_cache = nullptr;
 249 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
 250 
 251 // It is called after universe_init() when all GC settings are finalized.
 252 void AOTCodeCache::init2() {
 253   DEBUG_ONLY( _passed_init2 = true; )
 254   if (opened_cache == nullptr) {
 255     return;
 256   }
 257   if (!opened_cache->verify_config()) {
 258     delete opened_cache;
 259     opened_cache = nullptr;
 260     report_load_failure();
 261     return;
 262   }
 263 
 264   // initialize aot runtime constants as appropriate to this runtime
 265   AOTRuntimeConstants::initialize_from_runtime();
 266 
 267   // initialize the table of external routines so we can save
 268   // generated code blobs that reference them
 269   AOTCodeAddressTable* table = opened_cache->_table;
 270   assert(table != nullptr, "should be initialized already");
 271   table->init_extrs();
 272 
 273   // Now cache and address table are ready for AOT code generation
 274   _cache = opened_cache;
 275 }
 276 
 277 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
 278   opened_cache = new AOTCodeCache(is_dumping, is_using);
 279   if (opened_cache->failed()) {
 280     delete opened_cache;
 281     opened_cache = nullptr;
 282     return false;
 283   }
 284   return true;
 285 }
 286 
 287 void AOTCodeCache::dump() {
 288   if (is_on()) {
 289     assert(is_on_for_dump(), "should be called only when dumping AOT code");
 290     MutexLocker ml(Compile_lock);
 291     _cache->finish_write();
 292   }
 293 }
 294 
 295 #define DATA_ALIGNMENT HeapWordSize
 296 
 297 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
 298   _load_header(nullptr),
 299   _load_buffer(nullptr),
 300   _store_buffer(nullptr),
 301   _C_store_buffer(nullptr),
 302   _write_position(0),
 303   _load_size(0),
 304   _store_size(0),
 305   _for_use(is_using),
 306   _for_dump(is_dumping),
 307   _failed(false),
 308   _lookup_failed(false),
 309   _table(nullptr),
 310   _load_entries(nullptr),
 311   _search_entries(nullptr),
 312   _store_entries(nullptr),
 313   _C_strings_buf(nullptr),
 314   _store_entries_cnt(0)
 315 {
 316   // Read header at the begining of cache
 317   if (_for_use) {
 318     // Read cache
 319     size_t load_size = AOTCacheAccess::get_aot_code_region_size();
 320     ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
 321     if (!rs.is_reserved()) {
 322       log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
 323       set_failed();
 324       return;
 325     }
 326     if (!AOTCacheAccess::map_aot_code_region(rs)) {
 327       log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
 328       set_failed();
 329       return;
 330     }
 331 
 332     _load_size = (uint)load_size;
 333     _load_buffer = (char*)rs.base();
 334     assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
 335     log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
 336 
 337     _load_header = (Header*)addr(0);
 338     if (!_load_header->verify(_load_size)) {
 339       set_failed();
 340       return;
 341     }
 342     log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
 343     log_debug(aot, codecache, init)("  Adapters:  total=%u", _load_header->adapters_count());
 344     log_debug(aot, codecache, init)("  Shared Blobs: total=%u", _load_header->shared_blobs_count());
 345     log_debug(aot, codecache, init)("  C1 Blobs: total=%u", _load_header->C1_blobs_count());
 346     log_debug(aot, codecache, init)("  C2 Blobs: total=%u", _load_header->C2_blobs_count());
 347     log_debug(aot, codecache, init)("  AOT code cache size: %u bytes", _load_header->cache_size());
 348 
 349     // Read strings
 350     load_strings();
 351   }
 352   if (_for_dump) {
 353     _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
 354     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 355     // Entries allocated at the end of buffer in reverse (as on stack).
 356     _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
 357     log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
 358   }
 359   _table = new AOTCodeAddressTable();
 360 }
 361 
 362 void AOTCodeCache::init_early_stubs_table() {
 363   AOTCodeAddressTable* table = addr_table();
 364   if (table != nullptr) {
 365     table->init_early_stubs();
 366   }
 367 }
 368 
 369 void AOTCodeCache::init_shared_blobs_table() {
 370   AOTCodeAddressTable* table = addr_table();
 371   if (table != nullptr) {
 372     table->init_shared_blobs();
 373   }
 374 }
 375 
 376 void AOTCodeCache::init_early_c1_table() {
 377   AOTCodeAddressTable* table = addr_table();
 378   if (table != nullptr) {
 379     table->init_early_c1();
 380   }
 381 }
 382 
 383 // macro to record which flags are set -- flag_type selects the
 384 // relevant accessor e.g. set_flag, set_x86_flag, set_x86_use_flag.
 385 // n.b. flag_enum_name and global_flag_name are both needed because we
 386 // don't have consistent conventions for naming global flags e.g.
 387 // EnableContended vs UseMulAddIntrinsic vs UseCRC32Intrinsics
 388 
 389 #define RECORD_FLAG(flag_type, flag_enum_name, global_flag_name)        \
 390   if (global_flag_name) {                                               \
 391     set_ ## flag_type ## flag(flag_enum_name);                          \
 392   }
 393 
 394 void AOTCodeCache::Config::record(uint cpu_features_offset) {
 395   _flags = 0;
 396 #ifdef ASSERT
 397   set_flag(debugVM);
 398 #endif
 399   RECORD_FLAG(, compressedOops, UseCompressedOops);
 400   RECORD_FLAG(, useTLAB, UseTLAB);
 401   if (JavaAssertions::systemClassDefault()) {
 402     set_flag(systemClassAssertions);
 403   }
 404   if (JavaAssertions::userClassDefault()) {
 405     set_flag(userClassAssertions);
 406   }
 407   RECORD_FLAG(, enableContendedPadding, EnableContended);
 408   RECORD_FLAG(, restrictContendedPadding, RestrictContended);
 409 
 410   _compressedOopShift    = CompressedOops::shift();
 411   _compressedOopBase     = CompressedOops::base();
 412   _compressedKlassShift  = CompressedKlassPointers::shift();
 413   _contendedPaddingWidth = ContendedPaddingWidth;
 414   _gc                    = (uint)Universe::heap()->kind();
 415   _optoLoopAlignment               = (uint)OptoLoopAlignment;
 416   _codeEntryAlignment              = (uint)CodeEntryAlignment;
 417   _allocatePrefetchLines           = (uint)AllocatePrefetchLines;
 418   _allocateInstancePrefetchLines   = (uint)AllocateInstancePrefetchLines;
 419   _allocatePrefetchDistance        = (uint)AllocatePrefetchDistance;
 420   _allocatePrefetchStepSize        = (uint)AllocatePrefetchStepSize;
 421   _use_intrinsics_flags = 0;
 422   RECORD_FLAG(use_, useCRC32, UseCRC32Intrinsics);
 423   RECORD_FLAG(use_, useCRC32C, UseCRC32CIntrinsics);
 424 #ifdef COMPILER2
 425   _maxVectorSize                   = (uint)MaxVectorSize;
 426   _arrayOperationPartialInlineSize = (uint)ArrayOperationPartialInlineSize;
 427   RECORD_FLAG(use_, useMultiplyToLen, UseMultiplyToLenIntrinsic);
 428   RECORD_FLAG(use_, useSquareToLen, UseSquareToLenIntrinsic);
 429   RECORD_FLAG(use_, useMulAdd, UseMulAddIntrinsic);
 430   RECORD_FLAG(use_, useMontgomeryMultiply, UseMontgomeryMultiplyIntrinsic);
 431   RECORD_FLAG(use_, useMontgomerySquare, UseMontgomerySquareIntrinsic);
 432 #endif // COMPILER2
 433   RECORD_FLAG(use_, useChaCha20, UseChaCha20Intrinsics);
 434   RECORD_FLAG(use_, useDilithium, UseDilithiumIntrinsics);
 435   RECORD_FLAG(use_, useKyber, UseKyberIntrinsics);
 436   RECORD_FLAG(use_, useBASE64, UseBASE64Intrinsics);
 437   RECORD_FLAG(use_, useAdler32, UseAdler32Intrinsics);
 438   RECORD_FLAG(use_, useAES, UseAESIntrinsics);
 439   RECORD_FLAG(use_, useAESCTR, UseAESCTRIntrinsics);
 440   RECORD_FLAG(use_, useGHASH, UseGHASHIntrinsics);
 441   RECORD_FLAG(use_, useMD5, UseMD5Intrinsics);
 442   RECORD_FLAG(use_, useSHA1, UseSHA1Intrinsics);
 443   RECORD_FLAG(use_, useSHA256, UseSHA256Intrinsics);
 444   RECORD_FLAG(use_, useSHA512, UseSHA512Intrinsics);
 445   RECORD_FLAG(use_, useSHA3, UseSHA3Intrinsics);
 446   RECORD_FLAG(use_, usePoly1305, UsePoly1305Intrinsics);
 447   RECORD_FLAG(use_, useVectorizedMismatch,UseVectorizedMismatchIntrinsic );
 448   RECORD_FLAG(use_, useSecondarySupersTable, UseSecondarySupersTable);
 449 #if defined(X86) && !defined(ZERO)
 450   _avx3threshold                   = (uint)AVX3Threshold;
 451   _useAVX                          = (uint)UseAVX;
 452   _x86_flags                       = 0;
 453   RECORD_FLAG(x86_, x86_enableX86ECoreOpts, EnableX86ECoreOpts);
 454   RECORD_FLAG(x86_, x86_useUnalignedLoadStores, UseUnalignedLoadStores);
 455   RECORD_FLAG(x86_, x86_useAPX, UseAPX);
 456 
 457   _x86_use_intrinsics_flags            = 0;
 458   RECORD_FLAG(x86_use_, x86_useLibm, UseLibmIntrinsic);
 459   RECORD_FLAG(x86_use_, x86_useIntPoly, UseIntPolyIntrinsics);
 460 #endif // defined(X86) && !defined(ZERO)
 461 #if defined(AARCH64)  && !defined(ZERO)
 462   _prefetchCopyIntervalInBytes     = (uint)PrefetchCopyIntervalInBytes;
 463   _blockZeroingLowLimit            = (uint)BlockZeroingLowLimit;
 464   _softwarePrefetchHintDistance    = (uint)SoftwarePrefetchHintDistance;
 465   _useSVE                          = (uint)UseSVE;
 466   _aarch64_flags                   = 0;
 467   RECORD_FLAG(aarch64_, aarch64_avoidUnalignedAccesses, AvoidUnalignedAccesses);
 468   RECORD_FLAG(aarch64_, aarch64_useSIMDForMemoryOps, UseSIMDForMemoryOps);
 469   RECORD_FLAG(aarch64_, aarch64_useSIMDForArrayEquals, UseSIMDForArrayEquals);
 470   RECORD_FLAG(aarch64_, aarch64_useSIMDForSHA3, UseSIMDForSHA3Intrinsic);
 471   RECORD_FLAG(aarch64_, aarch64_useLSE, UseLSE);
 472 
 473   _aarch64_use_intrinsics_flags     = 0;
 474   RECORD_FLAG(aarch64_use_, aarch64_useBlockZeroing, UseBlockZeroing);
 475   RECORD_FLAG(aarch64_use_, aarch64_useSIMDForBigIntegerShift, UseSIMDForBigIntegerShiftIntrinsics);
 476   RECORD_FLAG(aarch64_use_, aarch64_useSimpleArrayEquals, UseSimpleArrayEquals);
 477   RECORD_FLAG(aarch64_use_, aarch64_useSecondarySupersCache, UseSecondarySupersCache);
 478 #endif // defined(AARCH64) && !defined(ZERO)
 479 #if INCLUDE_JVMCI
 480   _enableJVMCI                     = (uint)EnableJVMCI;
 481 #endif
 482   _cpu_features_offset   = cpu_features_offset;
 483 }
 484 
 485 #undef RECORD_FLAG
 486 
 487 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
 488   LogStreamHandle(Debug, aot, codecache, init) log;
 489   uint offset = _cpu_features_offset;
 490   uint cpu_features_size = *(uint *)cache->addr(offset);
 491   assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
 492   offset += sizeof(uint);
 493 
 494   void* cached_cpu_features_buffer = (void *)cache->addr(offset);
 495   if (log.is_enabled()) {
 496     ResourceMark rm; // required for stringStream::as_string()
 497     stringStream ss;
 498     VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
 499     log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
 500   }
 501 
 502   if (VM_Version::supports_features(cached_cpu_features_buffer)) {
 503     if (log.is_enabled()) {
 504       ResourceMark rm; // required for stringStream::as_string()
 505       stringStream ss;
 506       char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
 507       VM_Version::store_cpu_features(runtime_cpu_features);
 508       VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
 509       if (!ss.is_empty()) {
 510         log.print_cr("Additional runtime CPU features: %s", ss.as_string());
 511       }
 512     }
 513   } else {
 514     if (log.is_enabled()) {
 515       ResourceMark rm; // required for stringStream::as_string()
 516       stringStream ss;
 517       char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
 518       VM_Version::store_cpu_features(runtime_cpu_features);
 519       VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
 520       log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
 521     }
 522     return false;
 523   }
 524   return true;
 525 }
 526 
 527 // macro to do *standard* flag eq checks -- flag_type selects the
 528 // relevant accessor e.g. test_flag, test_x86_flag, test_x86_use_flag.
 529 // n.b. flag_enum_name and global_flag_name are both needed because we
 530 // don't have consistent conventions for naming global flags e.g.
 531 // EnableContended vs UseMulAddIntrinsic vs UseCRC32Intrinsics
 532 
 533 #define CHECK_FLAG(flag_type, flag_enum_name, global_flag_name)         \
 534   if (test_ ## flag_type ## flag(flag_enum_name) != global_flag_name) {   \
 535     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with " # global_flag_name " = %s vs current %s" , (global_flag_name ? "false" : "true"), (global_flag_name ? "true" : "false")); \
 536     return false;                                                       \
 537   }
 538 
 539 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
 540   // First checks affect all cached AOT code
 541 #ifdef ASSERT
 542   if (!test_flag(debugVM)) {
 543     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
 544     return false;
 545   }
 546 #else
 547   if (test_flag(debugVM)) {
 548     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
 549     return false;
 550   }
 551 #endif
 552 
 553   CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
 554   if (aot_gc != Universe::heap()->kind()) {
 555     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
 556     return false;
 557   }
 558 
 559   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 560     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
 561     return false;
 562   }
 563 
 564   // check CPU features before checking flags that may be
 565   // auto-configured in response to them
 566   if (!verify_cpu_features(cache)) {
 567     return false;
 568   }
 569 
 570   // change to EnableContended can affect validity of nmethods
 571   CHECK_FLAG(, enableContendedPadding, EnableContended);
 572   // change to RestrictContended can affect validity of nmethods
 573   CHECK_FLAG(, restrictContendedPadding, RestrictContended);
 574 
 575   // Tests for config options which might affect validity of adapters,
 576   // stubs or nmethods. Currently we take a pessemistic stand and
 577   // drop the whole cache if any of these are changed.
 578 
 579   // change to opto alignment can affect performance of array copy
 580   // stubs and nmethods
 581   if (_optoLoopAlignment != (uint)OptoLoopAlignment) {
 582     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with OptoLoopAlignment = %d vs current %d", (int)_optoLoopAlignment, (int)OptoLoopAlignment);
 583     return false;
 584   }
 585 
 586   // change to CodeEntryAlignment can affect performance of array
 587   // copy stubs and nmethods
 588   if (_codeEntryAlignment != CodeEntryAlignment) {
 589     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeEntryAlignment = %d vs current %d", _codeEntryAlignment, CodeEntryAlignment);
 590     return false;
 591   }
 592 
 593   // changing Prefetch configuration can affect validity of nmethods
 594   // and stubs
 595   if (_allocatePrefetchLines != (uint)AllocatePrefetchLines) {
 596     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with  = %d vs current %d", (int)_allocatePrefetchLines, (int)AllocatePrefetchLines);
 597     return false;
 598   }
 599   if (_allocateInstancePrefetchLines != (uint)AllocateInstancePrefetchLines) {
 600     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with  = %d vs current %d", (int)_allocateInstancePrefetchLines, (int)AllocateInstancePrefetchLines);
 601     return false;
 602   }
 603   if (_allocatePrefetchDistance != (uint)AllocatePrefetchDistance) {
 604     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with  = %d vs current %d", (int)_allocatePrefetchDistance, (int)AllocatePrefetchDistance);
 605     return false;
 606   }
 607   if (_allocatePrefetchStepSize != (uint)AllocatePrefetchStepSize) {
 608     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with  = %d vs current %d", (int)_allocatePrefetchStepSize, (int)AllocatePrefetchStepSize);
 609     return false;
 610   }
 611 
 612   // check intrinsic use settings are compatible
 613 
 614   CHECK_FLAG(use_, useCRC32, UseCRC32Intrinsics);
 615   CHECK_FLAG(use_, useCRC32C, UseCRC32CIntrinsics);
 616 
 617 #ifdef COMPILER2
 618   // change to MaxVectorSize can affect validity of array copy/fill
 619   // stubs
 620   if (_maxVectorSize != (uint)MaxVectorSize) {
 621     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with MaxVectorSize = %d vs current %d", (int)_maxVectorSize, (int)MaxVectorSize);
 622     return false;
 623   }
 624 
 625   // changing ArrayOperationPartialInlineSize can affect validity of
 626   // nmethods and stubs
 627   if (_arrayOperationPartialInlineSize != (uint)ArrayOperationPartialInlineSize) {
 628     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ArrayOperationPartialInlineSize = %d vs current %d", (int)_arrayOperationPartialInlineSize, (int)ArrayOperationPartialInlineSize);
 629     return false;
 630   }
 631   CHECK_FLAG(use_, useMultiplyToLen, UseMultiplyToLenIntrinsic);
 632   CHECK_FLAG(use_, useSquareToLen, UseSquareToLenIntrinsic);
 633   CHECK_FLAG(use_, useMulAdd, UseMulAddIntrinsic);
 634   CHECK_FLAG(use_, useMontgomeryMultiply,UseMontgomeryMultiplyIntrinsic);
 635   CHECK_FLAG(use_, useMontgomerySquare, UseMontgomerySquareIntrinsic);
 636 #endif // COMPILER2
 637   CHECK_FLAG(use_, useChaCha20, UseChaCha20Intrinsics);
 638   CHECK_FLAG(use_, useDilithium, UseDilithiumIntrinsics);
 639   CHECK_FLAG(use_, useKyber, UseKyberIntrinsics);
 640   CHECK_FLAG(use_, useBASE64, UseBASE64Intrinsics);
 641   CHECK_FLAG(use_, useAES, UseAESIntrinsics);
 642   CHECK_FLAG(use_, useAESCTR, UseAESCTRIntrinsics);
 643   CHECK_FLAG(use_, useGHASH, UseGHASHIntrinsics);
 644   CHECK_FLAG(use_, useMD5, UseMD5Intrinsics);
 645   CHECK_FLAG(use_, useSHA1, UseSHA1Intrinsics);
 646   CHECK_FLAG(use_, useSHA256, UseSHA256Intrinsics);
 647   CHECK_FLAG(use_, useSHA512, UseSHA512Intrinsics);
 648   CHECK_FLAG(use_, useSHA3, UseSHA3Intrinsics);
 649   CHECK_FLAG(use_, usePoly1305, UsePoly1305Intrinsics);
 650   CHECK_FLAG(use_, useVectorizedMismatch, UseVectorizedMismatchIntrinsic);
 651   CHECK_FLAG(use_, useSecondarySupersTable, UseSecondarySupersTable);
 652 
 653 #if defined(X86) && !defined(ZERO)
 654   // change to AVX3Threshold may affect validity of array copy stubs
 655   // and nmethods
 656   if (_avx3threshold != (uint)AVX3Threshold) {
 657     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AVX3Threshold = %d vs current %d", (int)_avx3threshold, AVX3Threshold);
 658     return false;
 659   }
 660 
 661   // change to UseAVX may affect validity of array copy stubs and
 662   // nmethods
 663   if (_useAVX != (uint)UseAVX) {
 664     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with useAVX = %d vs current %d", (int)_useAVX, UseAVX);
 665     return false;
 666   }
 667 
 668   // change to EnableX86ECoreOpts may affect validity of nmethods
 669   CHECK_FLAG(x86_, x86_enableX86ECoreOpts, EnableX86ECoreOpts);
 670 
 671   // switching off UseUnalignedLoadStores can affect validity of fill
 672   // stubs
 673   if (test_x86_flag(x86_useUnalignedLoadStores) && !UseUnalignedLoadStores) {
 674     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseUnalignedLoadStores = true vs current = false");
 675     return false;
 676   }
 677 
 678   // change to UseAPX can affect validity of nmethods and stubs
 679   CHECK_FLAG(x86_, x86_useAPX, UseAPX);
 680 
 681   // check x86-specific intrinsic use settings are compatible
 682 
 683   CHECK_FLAG(x86_use_, x86_useLibm, UseLibmIntrinsic);
 684   CHECK_FLAG(x86_use_, x86_useIntPoly, UseIntPolyIntrinsics);
 685 #endif // defined(X86) && !defined(ZERO)
 686 
 687 #if defined(AARCH64) && !defined(ZERO)
 688   // change to PrefetchCopyIntervalInBytes may affect validity of
 689   // array copy stubs
 690   if (_prefetchCopyIntervalInBytes != (uint)PrefetchCopyIntervalInBytes) {
 691     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PrefetchCopyIntervalInBytes = %d vs current %d", (int)_prefetchCopyIntervalInBytes, (int)PrefetchCopyIntervalInBytes);
 692     return false;
 693   }
 694 
 695   // change to BlockZeroingLowLimit may affect validity of array fill
 696   // stubs
 697   if (_blockZeroingLowLimit != (uint)BlockZeroingLowLimit) {
 698     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with BlockZeroingLowLimit = %d vs current %d", (int)_blockZeroingLowLimit, (int)BlockZeroingLowLimit);
 699     return false;
 700   }
 701 
 702   // change to SoftwarePrefetchHintDistance may affect validity of array fill
 703   // stubs
 704   if (_softwarePrefetchHintDistance != (uint)SoftwarePrefetchHintDistance) {
 705     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with SoftwarePrefetchHintDistance = %d vs current %d", (int)_softwarePrefetchHintDistance, (int)SoftwarePrefetchHintDistance);
 706     return false;
 707   }
 708 
 709   // change to UseSVE may affect validity of stubs and nmethods
 710   if (_useSVE != (uint)UseSVE) {
 711   log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseSVE = %d vs current %d",(int)_useSVE, UseSVE);
 712     return false;
 713   }
 714 
 715   // switching on AvoidUnalignedAccesses may affect validity of array
 716   // copy stubs and nmethods
 717   if (!test_aarch64_flag(aarch64_avoidUnalignedAccesses) && AvoidUnalignedAccesses) {
 718     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AvoidUnalignedAccesses = false vs current = true");
 719     return false;
 720   }
 721 
 722   // change to UseSIMDForMemoryOps may affect validity of array
 723   // copy stubs and nmethods
 724   CHECK_FLAG(aarch64_, aarch64_useSIMDForMemoryOps, UseSIMDForMemoryOps);
 725   // change to UseSIMDForArrayEquals may affect validity of array
 726   // copy stubs and nmethods
 727   CHECK_FLAG(aarch64_, aarch64_useSIMDForArrayEquals, UseSIMDForArrayEquals);
 728   // change to useSIMDForSHA3 may affect validity of SHA3 stubs
 729   CHECK_FLAG(aarch64_, aarch64_useSIMDForSHA3, UseSIMDForSHA3Intrinsic);
 730   // change to UseLSE may affect validity of stubs and nmethods
 731   CHECK_FLAG(aarch64_, aarch64_useLSE, UseLSE);
 732 
 733   // check aarch64-specific intrinsic use settings are compatible
 734 
 735   CHECK_FLAG(aarch64_use_, aarch64_useBlockZeroing, UseBlockZeroing);
 736   CHECK_FLAG(aarch64_use_, aarch64_useSIMDForBigIntegerShift, UseSIMDForBigIntegerShiftIntrinsics);
 737   CHECK_FLAG(aarch64_use_, aarch64_useSimpleArrayEquals, UseSimpleArrayEquals);
 738   CHECK_FLAG(aarch64_use_, aarch64_useSecondarySupersCache, UseSecondarySupersCache);
 739 #endif // defined(AARCH64) && !defined(ZERO)
 740 
 741 #if INCLUDE_JVMCI
 742   // change to EnableJVMCI will affect validity of adapters and
 743   // nmethods
 744   if (_enableJVMCI != (uint)EnableJVMCI) {
 745     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableJVMCI = %s vs current %s", (_enableJVMCI ? "true" : "false"), (EnableJVMCI ? "true" : "false"));
 746     return false;
 747   }
 748 #endif // INCLUDE_JVMCI
 749 
 750   // The following checks do not affect AOT adapters caching
 751 
 752   if (test_flag(compressedOops) != UseCompressedOops) {
 753     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
 754     AOTStubCaching = false;
 755   }
 756   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 757     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
 758     AOTStubCaching = false;
 759   }
 760 
 761   // This should be the last check as it only disables AOTStubCaching
 762   if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
 763     log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
 764     AOTStubCaching = false;
 765   }
 766 
 767   return true;
 768 }
 769 
 770 #undef TEST_FLAG
 771 
 772 bool AOTCodeCache::Header::verify(uint load_size) const {
 773   if (_version != AOT_CODE_VERSION) {
 774     log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
 775     return false;
 776   }
 777   if (load_size < _cache_size) {
 778     log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
 779     return false;
 780   }
 781   return true;
 782 }
 783 
 784 AOTCodeCache* AOTCodeCache::open_for_use() {
 785   if (AOTCodeCache::is_on_for_use()) {
 786     return AOTCodeCache::cache();
 787   }
 788   return nullptr;
 789 }
 790 
 791 AOTCodeCache* AOTCodeCache::open_for_dump() {
 792   if (AOTCodeCache::is_on_for_dump()) {
 793     AOTCodeCache* cache = AOTCodeCache::cache();
 794     cache->clear_lookup_failed(); // Reset bit
 795     return cache;
 796   }
 797   return nullptr;
 798 }
 799 
 800 void copy_bytes(const char* from, address to, uint size) {
 801   assert((int)size > 0, "sanity");
 802   memcpy(to, from, size);
 803   log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
 804 }
 805 
 806 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
 807   _cache = cache;
 808   _entry = entry;
 809   _load_buffer = cache->cache_buffer();
 810   _read_position = 0;
 811   _lookup_failed = false;
 812   _name          = nullptr;
 813   _reloc_data    = nullptr;
 814   _oop_maps      = nullptr;
 815 }
 816 
 817 void AOTCodeReader::set_read_position(uint pos) {
 818   if (pos == _read_position) {
 819     return;
 820   }
 821   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 822   _read_position = pos;
 823 }
 824 
 825 bool AOTCodeCache::set_write_position(uint pos) {
 826   if (pos == _write_position) {
 827     return true;
 828   }
 829   if (_store_size < _write_position) {
 830     _store_size = _write_position; // Adjust during write
 831   }
 832   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 833   _write_position = pos;
 834   return true;
 835 }
 836 
 837 static char align_buffer[256] = { 0 };
 838 
 839 bool AOTCodeCache::align_write() {
 840   // We are not executing code from cache - we copy it by bytes first.
 841   // No need for big alignment (or at all).
 842   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 843   if (padding == DATA_ALIGNMENT) {
 844     return true;
 845   }
 846   uint n = write_bytes((const void*)&align_buffer, padding);
 847   if (n != padding) {
 848     return false;
 849   }
 850   log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
 851   return true;
 852 }
 853 
 854 // Check to see if AOT code cache has required space to store "nbytes" of data
 855 address AOTCodeCache::reserve_bytes(uint nbytes) {
 856   assert(for_dump(), "Code Cache file is not created");
 857   uint new_position = _write_position + nbytes;
 858   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 859     log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
 860                                nbytes, _write_position);
 861     set_failed();
 862     report_store_failure();
 863     return nullptr;
 864   }
 865   address buffer = (address)(_store_buffer + _write_position);
 866   log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
 867   _write_position += nbytes;
 868   if (_store_size < _write_position) {
 869     _store_size = _write_position;
 870   }
 871   return buffer;
 872 }
 873 
 874 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
 875   assert(for_dump(), "Code Cache file is not created");
 876   if (nbytes == 0) {
 877     return 0;
 878   }
 879   uint new_position = _write_position + nbytes;
 880   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 881     log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
 882                                 nbytes, _write_position);
 883     set_failed();
 884     report_store_failure();
 885     return 0;
 886   }
 887   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 888   log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
 889   _write_position += nbytes;
 890   if (_store_size < _write_position) {
 891     _store_size = _write_position;
 892   }
 893   return nbytes;
 894 }
 895 
 896 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
 897   return (void*)(cache->add_entry());
 898 }
 899 
 900 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
 901   if (entry->kind() == kind) {
 902     assert(entry->id() == id, "sanity");
 903     return true; // Found
 904   }
 905   return false;
 906 }
 907 
 908 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
 909   assert(_for_use, "sanity");
 910   uint count = _load_header->entries_count();
 911   if (_load_entries == nullptr) {
 912     // Read it
 913     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 914     _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
 915     log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
 916   }
 917   // Binary search
 918   int l = 0;
 919   int h = count - 1;
 920   while (l <= h) {
 921     int mid = (l + h) >> 1;
 922     int ix = mid * 2;
 923     uint is = _search_entries[ix];
 924     if (is == id) {
 925       int index = _search_entries[ix + 1];
 926       AOTCodeEntry* entry = &(_load_entries[index]);
 927       if (check_entry(kind, id, entry)) {
 928         return entry; // Found
 929       }
 930       // Linear search around to handle id collission
 931       for (int i = mid - 1; i >= l; i--) { // search back
 932         ix = i * 2;
 933         is = _search_entries[ix];
 934         if (is != id) {
 935           break;
 936         }
 937         index = _search_entries[ix + 1];
 938         AOTCodeEntry* entry = &(_load_entries[index]);
 939         if (check_entry(kind, id, entry)) {
 940           return entry; // Found
 941         }
 942       }
 943       for (int i = mid + 1; i <= h; i++) { // search forward
 944         ix = i * 2;
 945         is = _search_entries[ix];
 946         if (is != id) {
 947           break;
 948         }
 949         index = _search_entries[ix + 1];
 950         AOTCodeEntry* entry = &(_load_entries[index]);
 951         if (check_entry(kind, id, entry)) {
 952           return entry; // Found
 953         }
 954       }
 955       break; // Not found match
 956     } else if (is < id) {
 957       l = mid + 1;
 958     } else {
 959       h = mid - 1;
 960     }
 961   }
 962   return nullptr;
 963 }
 964 
 965 extern "C" {
 966   static int uint_cmp(const void *i, const void *j) {
 967     uint a = *(uint *)i;
 968     uint b = *(uint *)j;
 969     return a > b ? 1 : a < b ? -1 : 0;
 970   }
 971 }
 972 
 973 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
 974   uint* size_ptr = (uint *)buffer;
 975   *size_ptr = buffer_size;
 976   buffer += sizeof(uint);
 977 
 978   VM_Version::store_cpu_features(buffer);
 979   log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
 980   buffer += buffer_size;
 981   buffer = align_up(buffer, DATA_ALIGNMENT);
 982 }
 983 
 984 bool AOTCodeCache::finish_write() {
 985   if (!align_write()) {
 986     return false;
 987   }
 988   uint strings_offset = _write_position;
 989   int strings_count = store_strings();
 990   if (strings_count < 0) {
 991     return false;
 992   }
 993   if (!align_write()) {
 994     return false;
 995   }
 996   uint strings_size = _write_position - strings_offset;
 997 
 998   uint entries_count = 0; // Number of entrant (useful) code entries
 999   uint entries_offset = _write_position;
1000 
1001   uint store_count = _store_entries_cnt;
1002   if (store_count > 0) {
1003     uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1004     uint code_count = store_count;
1005     uint search_count = code_count * 2;
1006     uint search_size = search_count * sizeof(uint);
1007     uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1008     // _write_position includes size of code and strings
1009     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1010     uint cpu_features_size = VM_Version::cpu_features_size();
1011     uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1012     uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
1013                       align_up(total_cpu_features_size, DATA_ALIGNMENT);
1014     assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1015 
1016     // Allocate in AOT Cache buffer
1017     char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1018     char* start = align_up(buffer, DATA_ALIGNMENT);
1019     char* current = start + header_size; // Skip header
1020 
1021     uint cpu_features_offset = current - start;
1022     store_cpu_features(current, cpu_features_size);
1023     assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1024     assert(current < start + total_size, "sanity check");
1025 
1026     // Create ordered search table for entries [id, index];
1027     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1028 
1029     AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1030     uint adapters_count = 0;
1031     uint shared_blobs_count = 0;
1032     uint C1_blobs_count = 0;
1033     uint C2_blobs_count = 0;
1034     uint max_size = 0;
1035     // AOTCodeEntry entries were allocated in reverse in store buffer.
1036     // Process them in reverse order to cache first code first.
1037     for (int i = store_count - 1; i >= 0; i--) {
1038       entries_address[i].set_next(nullptr); // clear pointers before storing data
1039       uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1040       if (size > max_size) {
1041         max_size = size;
1042       }
1043       copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1044       entries_address[i].set_offset(current - start); // New offset
1045       current += size;
1046       uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
1047       if (n != sizeof(AOTCodeEntry)) {
1048         FREE_C_HEAP_ARRAY(uint, search);
1049         return false;
1050       }
1051       search[entries_count*2 + 0] = entries_address[i].id();
1052       search[entries_count*2 + 1] = entries_count;
1053       entries_count++;
1054       AOTCodeEntry::Kind kind = entries_address[i].kind();
1055       if (kind == AOTCodeEntry::Adapter) {
1056         adapters_count++;
1057       } else if (kind == AOTCodeEntry::SharedBlob) {
1058         shared_blobs_count++;
1059       } else if (kind == AOTCodeEntry::C1Blob) {
1060         C1_blobs_count++;
1061       } else if (kind == AOTCodeEntry::C2Blob) {
1062         C2_blobs_count++;
1063       }
1064     }
1065     if (entries_count == 0) {
1066       log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
1067       FREE_C_HEAP_ARRAY(uint, search);
1068       return true; // Nothing to write
1069     }
1070     assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
1071     // Write strings
1072     if (strings_count > 0) {
1073       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1074       strings_offset = (current - start); // New offset
1075       current += strings_size;
1076     }
1077 
1078     uint new_entries_offset = (current - start); // New offset
1079     // Sort and store search table
1080     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1081     search_size = 2 * entries_count * sizeof(uint);
1082     copy_bytes((const char*)search, (address)current, search_size);
1083     FREE_C_HEAP_ARRAY(uint, search);
1084     current += search_size;
1085 
1086     // Write entries
1087     entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1088     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1089     current += entries_size;
1090     uint size = (current - start);
1091     assert(size <= total_size, "%d > %d", size , total_size);
1092 
1093     log_debug(aot, codecache, exit)("  Adapters:  total=%u", adapters_count);
1094     log_debug(aot, codecache, exit)("  Shared Blobs:  total=%d", shared_blobs_count);
1095     log_debug(aot, codecache, exit)("  C1 Blobs:      total=%d", C1_blobs_count);
1096     log_debug(aot, codecache, exit)("  C2 Blobs:      total=%d", C2_blobs_count);
1097     log_debug(aot, codecache, exit)("  AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1098 
1099     // Finalize header
1100     AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1101     header->init(size, (uint)strings_count, strings_offset,
1102                  entries_count, new_entries_offset,
1103                  adapters_count, shared_blobs_count,
1104                  C1_blobs_count, C2_blobs_count, cpu_features_offset);
1105 
1106     log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1107   }
1108   return true;
1109 }
1110 
1111 //------------------Store/Load AOT code ----------------------
1112 
1113 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1114   AOTCodeCache* cache = open_for_dump();
1115   if (cache == nullptr) {
1116     return false;
1117   }
1118   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1119 
1120   if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1121     return false;
1122   }
1123   if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1124     return false;
1125   }
1126   log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1127 
1128 #ifdef ASSERT
1129   LogStreamHandle(Trace, aot, codecache, stubs) log;
1130   if (log.is_enabled()) {
1131     FlagSetting fs(PrintRelocations, true);
1132     blob.print_on(&log);
1133   }
1134 #endif
1135   // we need to take a lock to prevent race between compiler threads generating AOT code
1136   // and the main thread generating adapter
1137   MutexLocker ml(Compile_lock);
1138   if (!is_on()) {
1139     return false; // AOT code cache was already dumped and closed.
1140   }
1141   if (!cache->align_write()) {
1142     return false;
1143   }
1144   uint entry_position = cache->_write_position;
1145 
1146   // Write name
1147   uint name_offset = cache->_write_position - entry_position;
1148   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1149   uint n = cache->write_bytes(name, name_size);
1150   if (n != name_size) {
1151     return false;
1152   }
1153 
1154   // Write CodeBlob
1155   if (!cache->align_write()) {
1156     return false;
1157   }
1158   uint blob_offset = cache->_write_position - entry_position;
1159   address archive_buffer = cache->reserve_bytes(blob.size());
1160   if (archive_buffer == nullptr) {
1161     return false;
1162   }
1163   CodeBlob::archive_blob(&blob, archive_buffer);
1164 
1165   uint reloc_data_size = blob.relocation_size();
1166   n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1167   if (n != reloc_data_size) {
1168     return false;
1169   }
1170 
1171   bool has_oop_maps = false;
1172   if (blob.oop_maps() != nullptr) {
1173     if (!cache->write_oop_map_set(blob)) {
1174       return false;
1175     }
1176     has_oop_maps = true;
1177   }
1178 
1179   if (!cache->write_relocations(blob)) {
1180     if (!cache->failed()) {
1181       // We may miss an address in AOT table - skip this code blob.
1182       cache->set_write_position(entry_position);
1183     }
1184     return false;
1185   }
1186 
1187 #ifndef PRODUCT
1188   // Write asm remarks after relocation info
1189   if (!cache->write_asm_remarks(blob)) {
1190     return false;
1191   }
1192   if (!cache->write_dbg_strings(blob)) {
1193     return false;
1194   }
1195 #endif /* PRODUCT */
1196 
1197   uint entry_size = cache->_write_position - entry_position;
1198   AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1199                                                 entry_position, entry_size, name_offset, name_size,
1200                                                 blob_offset, has_oop_maps, blob.content_begin());
1201   log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1202   return true;
1203 }
1204 
1205 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1206   assert(AOTCodeEntry::is_blob(entry_kind),
1207          "wrong entry kind for blob id %s", StubInfo::name(id));
1208   return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1209 }
1210 
1211 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1212   AOTCodeCache* cache = open_for_use();
1213   if (cache == nullptr) {
1214     return nullptr;
1215   }
1216   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1217 
1218   if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1219     return nullptr;
1220   }
1221   if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1222     return nullptr;
1223   }
1224   log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1225 
1226   AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1227   if (entry == nullptr) {
1228     return nullptr;
1229   }
1230   AOTCodeReader reader(cache, entry);
1231   CodeBlob* blob = reader.compile_code_blob(name);
1232 
1233   log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1234                                    (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1235   return blob;
1236 }
1237 
1238 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1239   assert(AOTCodeEntry::is_blob(entry_kind),
1240          "wrong entry kind for blob id %s", StubInfo::name(id));
1241   return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1242 }
1243 
1244 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1245   uint entry_position = _entry->offset();
1246 
1247   // Read name
1248   uint name_offset = entry_position + _entry->name_offset();
1249   uint name_size = _entry->name_size(); // Includes '/0'
1250   const char* stored_name = addr(name_offset);
1251 
1252   if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1253     log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1254                                        stored_name, name);
1255     set_lookup_failed(); // Skip this blob
1256     return nullptr;
1257   }
1258   _name = stored_name;
1259 
1260   // Read archived code blob
1261   uint offset = entry_position + _entry->blob_offset();
1262   CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1263   offset += archived_blob->size();
1264 
1265   _reloc_data = (address)addr(offset);
1266   offset += archived_blob->relocation_size();
1267   set_read_position(offset);
1268 
1269   if (_entry->has_oop_maps()) {
1270     _oop_maps = read_oop_map_set();
1271   }
1272 
1273   // CodeBlob::restore() calls AOTCodeReader::restore()
1274   CodeBlob* code_blob = CodeBlob::create(archived_blob, this);
1275 
1276   if (code_blob == nullptr) { // no space left in CodeCache
1277     return nullptr;
1278   }
1279 
1280 #ifdef ASSERT
1281   LogStreamHandle(Trace, aot, codecache, stubs) log;
1282   if (log.is_enabled()) {
1283     FlagSetting fs(PrintRelocations, true);
1284     code_blob->print_on(&log);
1285   }
1286 #endif
1287   return code_blob;
1288 }
1289 
1290 void AOTCodeReader::restore(CodeBlob* code_blob) {
1291   precond(AOTCodeCache::is_on_for_use());
1292   precond(_name != nullptr);
1293   precond(_reloc_data != nullptr);
1294 
1295   code_blob->set_name(_name);
1296   code_blob->restore_mutable_data(_reloc_data);
1297   code_blob->set_oop_maps(_oop_maps);
1298 
1299   fix_relocations(code_blob);
1300 
1301 #ifndef PRODUCT
1302   code_blob->asm_remarks().init();
1303   read_asm_remarks(code_blob->asm_remarks());
1304   code_blob->dbg_strings().init();
1305   read_dbg_strings(code_blob->dbg_strings());
1306 #endif // PRODUCT
1307 }
1308 
1309 // ------------ process code and data --------------
1310 
1311 // Can't use -1. It is valid value for jump to iteself destination
1312 // used by static call stub: see NativeJump::jump_destination().
1313 #define BAD_ADDRESS_ID -2
1314 
1315 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
1316   GrowableArray<uint> reloc_data;
1317   RelocIterator iter(&code_blob);
1318   LogStreamHandle(Trace, aot, codecache, reloc) log;
1319   while (iter.next()) {
1320     int idx = reloc_data.append(0); // default value
1321     switch (iter.type()) {
1322       case relocInfo::none:
1323         break;
1324       case relocInfo::runtime_call_type: {
1325         // Record offset of runtime destination
1326         CallRelocation* r = (CallRelocation*)iter.reloc();
1327         address dest = r->destination();
1328         if (dest == r->addr()) { // possible call via trampoline on Aarch64
1329           dest = (address)-1;    // do nothing in this case when loading this relocation
1330         }
1331         int id = _table->id_for_address(dest, iter, &code_blob);
1332         if (id == BAD_ADDRESS_ID) {
1333           return false;
1334         }
1335         reloc_data.at_put(idx, id);
1336         break;
1337       }
1338       case relocInfo::runtime_call_w_cp_type:
1339         log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1340         return false;
1341       case relocInfo::external_word_type: {
1342         // Record offset of runtime target
1343         address target = ((external_word_Relocation*)iter.reloc())->target();
1344         int id = _table->id_for_address(target, iter, &code_blob);
1345         if (id == BAD_ADDRESS_ID) {
1346           return false;
1347         }
1348         reloc_data.at_put(idx, id);
1349         break;
1350       }
1351       case relocInfo::internal_word_type:
1352         break;
1353       case relocInfo::section_word_type:
1354         break;
1355       case relocInfo::post_call_nop_type:
1356         break;
1357       default:
1358         log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1359         return false;
1360         break;
1361     }
1362     if (log.is_enabled()) {
1363       iter.print_current_on(&log);
1364     }
1365   }
1366 
1367   // Write additional relocation data: uint per relocation
1368   // Write the count first
1369   int count = reloc_data.length();
1370   write_bytes(&count, sizeof(int));
1371   for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1372        iter != reloc_data.end(); ++iter) {
1373     uint value = *iter;
1374     int n = write_bytes(&value, sizeof(uint));
1375     if (n != sizeof(uint)) {
1376       return false;
1377     }
1378   }
1379   return true;
1380 }
1381 
1382 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1383   LogStreamHandle(Trace, aot, reloc) log;
1384   uint offset = read_position();
1385   int count = *(int*)addr(offset);
1386   offset += sizeof(int);
1387   if (log.is_enabled()) {
1388     log.print_cr("======== extra relocations count=%d", count);
1389   }
1390   uint* reloc_data = (uint*)addr(offset);
1391   offset += (count * sizeof(uint));
1392   set_read_position(offset);
1393 
1394   RelocIterator iter(code_blob);
1395   int j = 0;
1396   while (iter.next()) {
1397     switch (iter.type()) {
1398       case relocInfo::none:
1399         break;
1400       case relocInfo::runtime_call_type: {
1401         address dest = _cache->address_for_id(reloc_data[j]);
1402         if (dest != (address)-1) {
1403           ((CallRelocation*)iter.reloc())->set_destination(dest);
1404         }
1405         break;
1406       }
1407       case relocInfo::runtime_call_w_cp_type:
1408         // this relocation should not be in cache (see write_relocations)
1409         assert(false, "runtime_call_w_cp_type relocation is not implemented");
1410         break;
1411       case relocInfo::external_word_type: {
1412         address target = _cache->address_for_id(reloc_data[j]);
1413         // Add external address to global table
1414         int index = ExternalsRecorder::find_index(target);
1415         // Update index in relocation
1416         Relocation::add_jint(iter.data(), index);
1417         external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1418         assert(reloc->target() == target, "sanity");
1419         reloc->set_value(target); // Patch address in the code
1420         break;
1421       }
1422       case relocInfo::internal_word_type: {
1423         internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1424         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1425         break;
1426       }
1427       case relocInfo::section_word_type: {
1428         section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1429         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1430         break;
1431       }
1432       case relocInfo::post_call_nop_type:
1433         break;
1434       default:
1435         assert(false,"relocation %d unimplemented", (int)iter.type());
1436         break;
1437     }
1438     if (log.is_enabled()) {
1439       iter.print_current_on(&log);
1440     }
1441     j++;
1442   }
1443   assert(j == count, "sanity");
1444 }
1445 
1446 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1447   ImmutableOopMapSet* oopmaps = cb.oop_maps();
1448   int oopmaps_size = oopmaps->nr_of_bytes();
1449   if (!write_bytes(&oopmaps_size, sizeof(int))) {
1450     return false;
1451   }
1452   uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1453   if (n != (uint)oopmaps->nr_of_bytes()) {
1454     return false;
1455   }
1456   return true;
1457 }
1458 
1459 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1460   uint offset = read_position();
1461   int size = *(int *)addr(offset);
1462   offset += sizeof(int);
1463   ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1464   offset += size;
1465   set_read_position(offset);
1466   return oopmaps;
1467 }
1468 
1469 #ifndef PRODUCT
1470 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1471   // Write asm remarks
1472   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1473   if (count_ptr == nullptr) {
1474     return false;
1475   }
1476   uint count = 0;
1477   bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1478     log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1479     uint n = write_bytes(&offset, sizeof(uint));
1480     if (n != sizeof(uint)) {
1481       return false;
1482     }
1483     const char* cstr = add_C_string(str);
1484     int id = _table->id_for_C_string((address)cstr);
1485     assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1486     n = write_bytes(&id, sizeof(int));
1487     if (n != sizeof(int)) {
1488       return false;
1489     }
1490     count += 1;
1491     return true;
1492   });
1493   *count_ptr = count;
1494   return result;
1495 }
1496 
1497 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1498   // Read asm remarks
1499   uint offset = read_position();
1500   uint count = *(uint *)addr(offset);
1501   offset += sizeof(uint);
1502   for (uint i = 0; i < count; i++) {
1503     uint remark_offset = *(uint *)addr(offset);
1504     offset += sizeof(uint);
1505     int remark_string_id = *(uint *)addr(offset);
1506     offset += sizeof(int);
1507     const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1508     asm_remarks.insert(remark_offset, remark);
1509   }
1510   set_read_position(offset);
1511 }
1512 
1513 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1514   // Write dbg strings
1515   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1516   if (count_ptr == nullptr) {
1517     return false;
1518   }
1519   uint count = 0;
1520   bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1521     log_trace(aot, codecache, stubs)("dbg string=%s", str);
1522     const char* cstr = add_C_string(str);
1523     int id = _table->id_for_C_string((address)cstr);
1524     assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1525     uint n = write_bytes(&id, sizeof(int));
1526     if (n != sizeof(int)) {
1527       return false;
1528     }
1529     count += 1;
1530     return true;
1531   });
1532   *count_ptr = count;
1533   return result;
1534 }
1535 
1536 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1537   // Read dbg strings
1538   uint offset = read_position();
1539   uint count = *(uint *)addr(offset);
1540   offset += sizeof(uint);
1541   for (uint i = 0; i < count; i++) {
1542     int string_id = *(uint *)addr(offset);
1543     offset += sizeof(int);
1544     const char* str = (const char*)_cache->address_for_C_string(string_id);
1545     dbg_strings.insert(str);
1546   }
1547   set_read_position(offset);
1548 }
1549 #endif // PRODUCT
1550 
1551 //======================= AOTCodeAddressTable ===============
1552 
1553 // address table ids for generated routines, external addresses and C
1554 // string addresses are partitioned into positive integer ranges
1555 // defined by the following positive base and max values
1556 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1557 //      [_blobs_base, _blobs_base + _blobs_max -1],
1558 //      ...
1559 //      [_c_str_base, _c_str_base + _c_str_max -1],
1560 
1561 #define _extrs_max 100
1562 #define _stubs_max 3
1563 
1564 #define _shared_blobs_max 20
1565 #define _C1_blobs_max 10
1566 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1567 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1568 
1569 #define _extrs_base 0
1570 #define _stubs_base (_extrs_base + _extrs_max)
1571 #define _shared_blobs_base (_stubs_base + _stubs_max)
1572 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1573 #define _blobs_end  (_shared_blobs_base + _blobs_max)
1574 
1575 #define SET_ADDRESS(type, addr)                           \
1576   {                                                       \
1577     type##_addr[type##_length++] = (address) (addr);      \
1578     assert(type##_length <= type##_max, "increase size"); \
1579   }
1580 
1581 static bool initializing_extrs = false;
1582 
1583 void AOTCodeAddressTable::init_extrs() {
1584   if (_extrs_complete || initializing_extrs) return; // Done already
1585 
1586   assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1587 
1588   initializing_extrs = true;
1589   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1590 
1591   _extrs_length = 0;
1592 
1593   // Record addresses of VM runtime methods
1594   SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1595   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1596   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1597   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1598 #if defined(AARCH64) && !defined(ZERO)
1599   SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1600 #endif
1601   {
1602     // Required by Shared blobs
1603     SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1604     SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1605     SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1606     SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1607     SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1608     SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1609     SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1610     SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1611     SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1612     SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1613     SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1614   }
1615 
1616 #ifdef COMPILER1
1617   {
1618     // Required by C1 blobs
1619     SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1620     SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1621     SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1622     SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1623     SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1624     SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1625     SET_ADDRESS(_extrs, Runtime1::new_instance);
1626     SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1627     SET_ADDRESS(_extrs, Runtime1::new_type_array);
1628     SET_ADDRESS(_extrs, Runtime1::new_object_array);
1629     SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1630     SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1631     SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1632     SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1633     SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1634     SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1635     SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1636     SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1637     SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1638     SET_ADDRESS(_extrs, Runtime1::monitorenter);
1639     SET_ADDRESS(_extrs, Runtime1::monitorexit);
1640     SET_ADDRESS(_extrs, Runtime1::deoptimize);
1641     SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1642     SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1643     SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1644     SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1645     SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1646     SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1647     SET_ADDRESS(_extrs, Thread::current);
1648     SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1649 #ifndef PRODUCT
1650     SET_ADDRESS(_extrs, os::breakpoint);
1651 #endif
1652   }
1653 #endif
1654 
1655 #ifdef COMPILER2
1656   {
1657     // Required by C2 blobs
1658     SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1659     SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1660     SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1661     SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1662     SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1663     SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1664     SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1665     SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1666     SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1667     SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1668     SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1669     SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1670     SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1671     SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1672     SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1673     SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1674     SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1675     SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1676     SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1677     SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1678 #if defined(AARCH64)
1679     SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1680 #endif // AARCH64
1681   }
1682 #endif // COMPILER2
1683 
1684 #if INCLUDE_G1GC
1685   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1686 #endif
1687 #if INCLUDE_SHENANDOAHGC
1688   SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1689   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1690   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1691 #endif
1692 #if INCLUDE_ZGC
1693   SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1694   SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1695 #if defined(AMD64)
1696   SET_ADDRESS(_extrs, &ZPointerLoadShift);
1697 #endif
1698 #endif
1699 #ifndef ZERO
1700 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1701   SET_ADDRESS(_extrs, MacroAssembler::debug64);
1702 #endif
1703 #endif // ZERO
1704 
1705   // addresses of fields in AOT runtime constants area
1706   address* p = AOTRuntimeConstants::field_addresses_list();
1707   while (*p != nullptr) {
1708     SET_ADDRESS(_extrs, *p++);
1709   }
1710 
1711   _extrs_complete = true;
1712   log_debug(aot, codecache, init)("External addresses recorded");
1713 }
1714 
1715 static bool initializing_early_stubs = false;
1716 
1717 void AOTCodeAddressTable::init_early_stubs() {
1718   if (_complete || initializing_early_stubs) return; // Done already
1719   initializing_early_stubs = true;
1720   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1721   _stubs_length = 0;
1722   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1723 
1724   {
1725     // Required by C1 blobs
1726 #if defined(AMD64) && !defined(ZERO)
1727     SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1728     SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1729 #endif // AMD64
1730   }
1731 
1732   _early_stubs_complete = true;
1733   log_info(aot, codecache, init)("Early stubs recorded");
1734 }
1735 
1736 static bool initializing_shared_blobs = false;
1737 
1738 void AOTCodeAddressTable::init_shared_blobs() {
1739   if (_complete || initializing_shared_blobs) return; // Done already
1740   initializing_shared_blobs = true;
1741   address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1742 
1743   // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1744   _shared_blobs_addr = blobs_addr;
1745   _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1746 
1747   _shared_blobs_length = 0;
1748   _C1_blobs_length = 0;
1749 
1750   // clear the address table
1751   memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1752 
1753   // Record addresses of generated code blobs
1754   SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1755   SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1756   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1757   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1758   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1759   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1760 #if INCLUDE_JVMCI
1761   if (EnableJVMCI) {
1762     SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1763     SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1764   }
1765 #endif
1766 
1767   _shared_blobs_complete = true;
1768   log_debug(aot, codecache, init)("Early shared blobs recorded");
1769   _complete = true;
1770 }
1771 
1772 void AOTCodeAddressTable::init_early_c1() {
1773 #ifdef COMPILER1
1774   // Runtime1 Blobs
1775   StubId id = StubInfo::stub_base(StubGroup::C1);
1776   // include forward_exception in range we publish
1777   StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1778   for (; id != limit; id = StubInfo::next(id)) {
1779     if (Runtime1::blob_for(id) == nullptr) {
1780       log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1781       continue;
1782     }
1783     if (Runtime1::entry_for(id) == nullptr) {
1784       log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1785       continue;
1786     }
1787     address entry = Runtime1::entry_for(id);
1788     SET_ADDRESS(_C1_blobs, entry);
1789   }
1790 #endif // COMPILER1
1791   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1792   _early_c1_complete = true;
1793 }
1794 
1795 #undef SET_ADDRESS
1796 
1797 #ifdef PRODUCT
1798 #define MAX_STR_COUNT 200
1799 #else
1800 #define MAX_STR_COUNT 500
1801 #endif
1802 #define _c_str_max  MAX_STR_COUNT
1803 static const int _c_str_base = _all_max;
1804 
1805 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1806 static const char* _C_strings[MAX_STR_COUNT]    = {nullptr}; // Our duplicates
1807 static int _C_strings_count = 0;
1808 static int _C_strings_s[MAX_STR_COUNT] = {0};
1809 static int _C_strings_id[MAX_STR_COUNT] = {0};
1810 static int _C_strings_used = 0;
1811 
1812 void AOTCodeCache::load_strings() {
1813   uint strings_count  = _load_header->strings_count();
1814   if (strings_count == 0) {
1815     return;
1816   }
1817   uint strings_offset = _load_header->strings_offset();
1818   uint* string_lengths = (uint*)addr(strings_offset);
1819   strings_offset += (strings_count * sizeof(uint));
1820   uint strings_size = _load_header->entries_offset() - strings_offset;
1821   // We have to keep cached strings longer than _cache buffer
1822   // because they are refernced from compiled code which may
1823   // still be executed on VM exit after _cache is freed.
1824   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1825   memcpy(p, addr(strings_offset), strings_size);
1826   _C_strings_buf = p;
1827   assert(strings_count <= MAX_STR_COUNT, "sanity");
1828   for (uint i = 0; i < strings_count; i++) {
1829     _C_strings[i] = p;
1830     uint len = string_lengths[i];
1831     _C_strings_s[i] = i;
1832     _C_strings_id[i] = i;
1833     p += len;
1834   }
1835   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1836   _C_strings_count = strings_count;
1837   _C_strings_used  = strings_count;
1838   log_debug(aot, codecache, init)("  Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1839 }
1840 
1841 int AOTCodeCache::store_strings() {
1842   if (_C_strings_used > 0) {
1843     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1844     uint offset = _write_position;
1845     uint length = 0;
1846     uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1847     if (lengths == nullptr) {
1848       return -1;
1849     }
1850     for (int i = 0; i < _C_strings_used; i++) {
1851       const char* str = _C_strings[_C_strings_s[i]];
1852       uint len = (uint)strlen(str) + 1;
1853       length += len;
1854       assert(len < 1000, "big string: %s", str);
1855       lengths[i] = len;
1856       uint n = write_bytes(str, len);
1857       if (n != len) {
1858         return -1;
1859       }
1860     }
1861     log_debug(aot, codecache, exit)("  Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1862                                    _C_strings_used, length, offset);
1863   }
1864   return _C_strings_used;
1865 }
1866 
1867 const char* AOTCodeCache::add_C_string(const char* str) {
1868   if (is_on_for_dump() && str != nullptr) {
1869     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1870     AOTCodeAddressTable* table = addr_table();
1871     if (table != nullptr) {
1872       return table->add_C_string(str);
1873     }
1874   }
1875   return str;
1876 }
1877 
1878 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1879   if (_extrs_complete) {
1880     // Check previous strings address
1881     for (int i = 0; i < _C_strings_count; i++) {
1882       if (_C_strings_in[i] == str) {
1883         return _C_strings[i]; // Found previous one - return our duplicate
1884       } else if (strcmp(_C_strings[i], str) == 0) {
1885         return _C_strings[i];
1886       }
1887     }
1888     // Add new one
1889     if (_C_strings_count < MAX_STR_COUNT) {
1890       // Passed in string can be freed and used space become inaccessible.
1891       // Keep original address but duplicate string for future compare.
1892       _C_strings_id[_C_strings_count] = -1; // Init
1893       _C_strings_in[_C_strings_count] = str;
1894       const char* dup = os::strdup(str);
1895       _C_strings[_C_strings_count++] = dup;
1896       log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1897       return dup;
1898     } else {
1899       assert(false, "Number of C strings >= MAX_STR_COUNT");
1900     }
1901   }
1902   return str;
1903 }
1904 
1905 int AOTCodeAddressTable::id_for_C_string(address str) {
1906   if (str == nullptr) {
1907     return -1;
1908   }
1909   MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1910   for (int i = 0; i < _C_strings_count; i++) {
1911     if (_C_strings[i] == (const char*)str) { // found
1912       int id = _C_strings_id[i];
1913       if (id >= 0) {
1914         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1915         return id; // Found recorded
1916       }
1917       // Not found in recorded, add new
1918       id = _C_strings_used++;
1919       _C_strings_s[id] = i;
1920       _C_strings_id[i] = id;
1921       return id;
1922     }
1923   }
1924   return -1;
1925 }
1926 
1927 address AOTCodeAddressTable::address_for_C_string(int idx) {
1928   assert(idx < _C_strings_count, "sanity");
1929   return (address)_C_strings[idx];
1930 }
1931 
1932 static int search_address(address addr, address* table, uint length) {
1933   for (int i = 0; i < (int)length; i++) {
1934     if (table[i] == addr) {
1935       return i;
1936     }
1937   }
1938   return BAD_ADDRESS_ID;
1939 }
1940 
1941 address AOTCodeAddressTable::address_for_id(int idx) {
1942   assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1943   if (idx == -1) {
1944     return (address)-1;
1945   }
1946   uint id = (uint)idx;
1947   // special case for symbols based relative to os::init
1948   if (id > (_c_str_base + _c_str_max)) {
1949     return (address)os::init + idx;
1950   }
1951   if (idx < 0) {
1952     fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1953     return nullptr;
1954   }
1955   // no need to compare unsigned id against 0
1956   if (/* id >= _extrs_base && */ id < _extrs_length) {
1957     return _extrs_addr[id - _extrs_base];
1958   }
1959   if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1960     return _stubs_addr[id - _stubs_base];
1961   }
1962   if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1963     return _shared_blobs_addr[id - _shared_blobs_base];
1964   }
1965   if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1966     return _C1_blobs_addr[id - _C1_blobs_base];
1967   }
1968   if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1969     return address_for_C_string(id - _c_str_base);
1970   }
1971   fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1972   return nullptr;
1973 }
1974 
1975 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1976   assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1977   int id = -1;
1978   if (addr == (address)-1) { // Static call stub has jump to itself
1979     return id;
1980   }
1981   // Check card_table_base address first since it can point to any address
1982   BarrierSet* bs = BarrierSet::barrier_set();
1983   bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
1984   guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
1985 
1986   // Seach for C string
1987   id = id_for_C_string(addr);
1988   if (id >= 0) {
1989     return id + _c_str_base;
1990   }
1991   if (StubRoutines::contains(addr)) {
1992     // Search in stubs
1993     id = search_address(addr, _stubs_addr, _stubs_length);
1994     if (id < 0) {
1995       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1996       if (desc == nullptr) {
1997         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1998       }
1999       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
2000       assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
2001     } else {
2002       return id + _stubs_base;
2003     }
2004   } else {
2005     CodeBlob* cb = CodeCache::find_blob(addr);
2006     if (cb != nullptr) {
2007       // Search in code blobs
2008       int id_base = _shared_blobs_base;
2009       id = search_address(addr, _shared_blobs_addr, _blobs_max);
2010       if (id < 0) {
2011         assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
2012       } else {
2013         return id_base + id;
2014       }
2015     } else {
2016       // Search in runtime functions
2017       id = search_address(addr, _extrs_addr, _extrs_length);
2018       if (id < 0) {
2019         ResourceMark rm;
2020         const int buflen = 1024;
2021         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
2022         int offset = 0;
2023         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
2024           if (offset > 0) {
2025             // Could be address of C string
2026             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
2027             log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
2028                                       p2i(addr), dist, (const char*)addr);
2029             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
2030             return dist;
2031           }
2032 #ifdef ASSERT
2033           reloc.print_current_on(tty);
2034           code_blob->print_on(tty);
2035           code_blob->print_code_on(tty);
2036           assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
2037 #endif
2038         } else {
2039 #ifdef ASSERT
2040           reloc.print_current_on(tty);
2041           code_blob->print_on(tty);
2042           code_blob->print_code_on(tty);
2043           os::find(addr, tty);
2044           assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
2045 #endif
2046         }
2047       } else {
2048         return _extrs_base + id;
2049       }
2050     }
2051   }
2052   return id;
2053 }
2054 
2055 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
2056 
2057 void AOTRuntimeConstants::initialize_from_runtime() {
2058   BarrierSet* bs = BarrierSet::barrier_set();
2059   address card_table_base = nullptr;
2060   uint grain_shift = 0;
2061 #if INCLUDE_G1GC
2062   if (bs->is_a(BarrierSet::G1BarrierSet)) {
2063     grain_shift = G1HeapRegion::LogOfHRGrainBytes;
2064   } else
2065 #endif
2066 #if INCLUDE_SHENANDOAHGC
2067   if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
2068     grain_shift = 0;
2069   } else
2070 #endif
2071   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
2072     CardTable::CardValue* base = ci_card_table_address_const();
2073     assert(base != nullptr, "unexpected byte_map_base");
2074     card_table_base = base;
2075     CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
2076     grain_shift = ctbs->grain_shift();
2077   }
2078   _aot_runtime_constants._card_table_base = card_table_base;
2079   _aot_runtime_constants._grain_shift = grain_shift;
2080 }
2081 
2082 address AOTRuntimeConstants::_field_addresses_list[] = {
2083   ((address)&_aot_runtime_constants._card_table_base),
2084   ((address)&_aot_runtime_constants._grain_shift),
2085   nullptr
2086 };
2087 
2088 address AOTRuntimeConstants::card_table_base_address() {
2089   assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
2090   return (address)&_aot_runtime_constants._card_table_base;
2091 }
2092 
2093 // This is called after initialize() but before init2()
2094 // and _cache is not set yet.
2095 void AOTCodeCache::print_on(outputStream* st) {
2096   if (opened_cache != nullptr && opened_cache->for_use()) {
2097     st->print_cr("\nAOT Code Cache");
2098     uint count = opened_cache->_load_header->entries_count();
2099     uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
2100     AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
2101 
2102     for (uint i = 0; i < count; i++) {
2103       // Use search_entries[] to order ouput
2104       int index = search_entries[2*i + 1];
2105       AOTCodeEntry* entry = &(load_entries[index]);
2106 
2107       uint entry_position = entry->offset();
2108       uint name_offset = entry->name_offset() + entry_position;
2109       const char* saved_name = opened_cache->addr(name_offset);
2110 
2111       st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
2112                    i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
2113     }
2114   }
2115 }