1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/aotCacheAccess.hpp"
  28 #include "cds/aotMetaspace.hpp"
  29 #include "cds/cds_globals.hpp"
  30 #include "cds/cdsConfig.hpp"
  31 #include "cds/heapShared.hpp"
  32 #include "classfile/javaAssertions.hpp"
  33 #include "code/aotCodeCache.hpp"
  34 #include "code/codeCache.hpp"
  35 #include "gc/shared/gcConfig.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/memoryReserver.hpp"
  38 #include "runtime/deoptimization.hpp"
  39 #include "runtime/flags/flagSetting.hpp"
  40 #include "runtime/globals_extension.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutexLocker.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubInfo.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/copy.hpp"
  48 #ifdef COMPILER1
  49 #include "c1/c1_Runtime1.hpp"
  50 #endif
  51 #ifdef COMPILER2
  52 #include "opto/runtime.hpp"
  53 #endif
  54 #if INCLUDE_G1GC
  55 #include "gc/g1/g1BarrierSetRuntime.hpp"
  56 #endif
  57 #if INCLUDE_SHENANDOAHGC
  58 #include "gc/shenandoah/shenandoahRuntime.hpp"
  59 #endif
  60 #if INCLUDE_ZGC
  61 #include "gc/z/zBarrierSetRuntime.hpp"
  62 #endif
  63 
  64 #include <errno.h>
  65 #include <sys/stat.h>
  66 
  67 const char* aot_code_entry_kind_name[] = {
  68 #define DECL_KIND_STRING(kind) XSTR(kind),
  69   DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
  70 #undef DECL_KIND_STRING
  71 };
  72 
  73 static void report_load_failure() {
  74   if (AbortVMOnAOTCodeFailure) {
  75     vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
  76   }
  77   log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
  78   AOTCodeCache::disable_caching();
  79 }
  80 
  81 static void report_store_failure() {
  82   if (AbortVMOnAOTCodeFailure) {
  83     tty->print_cr("Unable to create AOT Code Cache.");
  84     vm_abort(false);
  85   }
  86   log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
  87   AOTCodeCache::disable_caching();
  88 }
  89 
  90 // The sequence of AOT code caching flags and parametters settings.
  91 //
  92 // 1. The initial AOT code caching flags setting is done
  93 // during call to CDSConfig::check_vm_args_consistency().
  94 //
  95 // 2. The earliest AOT code state check done in compilationPolicy_init()
  96 // where we set number of compiler threads for AOT assembly phase.
  97 //
  98 // 3. We determine presence of AOT code in AOT Cache in
  99 // AOTMetaspace::open_static_archive() which is calles
 100 // after compilationPolicy_init() but before codeCache_init().
 101 //
 102 // 4. AOTCodeCache::initialize() is called during universe_init()
 103 // and does final AOT state and flags settings.
 104 //
 105 // 5. Finally AOTCodeCache::init2() is called after universe_init()
 106 // when all GC settings are finalized.
 107 
 108 // Next methods determine which action we do with AOT code depending
 109 // on phase of AOT process: assembly or production.
 110 
 111 bool AOTCodeCache::is_dumping_adapter() {
 112   return AOTAdapterCaching && is_on_for_dump();
 113 }
 114 
 115 bool AOTCodeCache::is_using_adapter()   {
 116   return AOTAdapterCaching && is_on_for_use();
 117 }
 118 
 119 bool AOTCodeCache::is_dumping_stub() {
 120   return AOTStubCaching && is_on_for_dump();
 121 }
 122 
 123 bool AOTCodeCache::is_using_stub()   {
 124   return AOTStubCaching && is_on_for_use();
 125 }
 126 
 127 // Next methods could be called regardless AOT code cache status.
 128 // Initially they are called during flags parsing and finilized
 129 // in AOTCodeCache::initialize().
 130 void AOTCodeCache::enable_caching() {
 131   FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
 132   FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
 133 }
 134 
 135 void AOTCodeCache::disable_caching() {
 136   FLAG_SET_ERGO(AOTStubCaching, false);
 137   FLAG_SET_ERGO(AOTAdapterCaching, false);
 138 }
 139 
 140 bool AOTCodeCache::is_caching_enabled() {
 141   return AOTStubCaching || AOTAdapterCaching;
 142 }
 143 
 144 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
 145   assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
 146   // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
 147   // becasue both id and kind are used to find an entry, and that combination should be unique
 148   if (kind == AOTCodeEntry::Adapter) {
 149     return id;
 150   } else if (kind == AOTCodeEntry::SharedBlob) {
 151     assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
 152     return id;
 153   } else if (kind == AOTCodeEntry::C1Blob) {
 154     assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
 155     return id;
 156   } else {
 157     // kind must be AOTCodeEntry::C2Blob
 158     assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
 159     return id;
 160   }
 161 }
 162 
 163 static uint _max_aot_code_size = 0;
 164 uint AOTCodeCache::max_aot_code_size() {
 165   return _max_aot_code_size;
 166 }
 167 
 168 // It is called from AOTMetaspace::initialize_shared_spaces()
 169 // which is called from universe_init().
 170 // At this point all AOT class linking seetings are finilized
 171 // and AOT cache is open so we can map AOT code region.
 172 void AOTCodeCache::initialize() {
 173 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
 174   log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
 175   disable_caching();
 176   return;
 177 #else
 178   if (FLAG_IS_DEFAULT(AOTCache)) {
 179     log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
 180     disable_caching();
 181     return; // AOTCache must be specified to dump and use AOT code
 182   }
 183 
 184   // Disable stubs caching until JDK-8357398 is fixed.
 185   FLAG_SET_ERGO(AOTStubCaching, false);
 186 
 187   if (VerifyOops) {
 188     // Disable AOT stubs caching when VerifyOops flag is on.
 189     // Verify oops code generated a lot of C strings which overflow
 190     // AOT C string table (which has fixed size).
 191     // AOT C string table will be reworked later to handle such cases.
 192     //
 193     // Note: AOT adapters are not affected - they don't have oop operations.
 194     log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
 195     FLAG_SET_ERGO(AOTStubCaching, false);
 196   }
 197 
 198   bool is_dumping = false;
 199   bool is_using   = false;
 200   if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
 201     is_dumping = true;
 202     enable_caching();
 203     is_dumping = is_caching_enabled();
 204   } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
 205     enable_caching();
 206     is_using = is_caching_enabled();
 207   } else {
 208     log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
 209     disable_caching();
 210     return; // nothing to do
 211   }
 212   if (!(is_dumping || is_using)) {
 213     disable_caching();
 214     return; // AOT code caching disabled on command line
 215   }
 216   _max_aot_code_size = AOTCodeMaxSize;
 217   if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
 218     if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
 219       _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
 220       log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
 221     }
 222   }
 223   size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
 224   if (is_using && aot_code_size == 0) {
 225     log_info(aot, codecache, init)("AOT Code Cache is empty");
 226     disable_caching();
 227     return;
 228   }
 229   if (!open_cache(is_dumping, is_using)) {
 230     if (is_using) {
 231       report_load_failure();
 232     } else {
 233       report_store_failure();
 234     }
 235     return;
 236   }
 237   if (is_dumping) {
 238     FLAG_SET_DEFAULT(ForceUnreachable, true);
 239   }
 240   FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 241 #endif // defined(AMD64) || defined(AARCH64)
 242 }
 243 
 244 static AOTCodeCache*  opened_cache = nullptr; // Use this until we verify the cache
 245 AOTCodeCache* AOTCodeCache::_cache = nullptr;
 246 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
 247 
 248 // It is called after universe_init() when all GC settings are finalized.
 249 void AOTCodeCache::init2() {
 250   DEBUG_ONLY( _passed_init2 = true; )
 251   if (opened_cache == nullptr) {
 252     return;
 253   }
 254   if (!opened_cache->verify_config()) {
 255     delete opened_cache;
 256     opened_cache = nullptr;
 257     report_load_failure();
 258     return;
 259   }
 260 
 261   // initialize the table of external routines so we can save
 262   // generated code blobs that reference them
 263   AOTCodeAddressTable* table = opened_cache->_table;
 264   assert(table != nullptr, "should be initialized already");
 265   table->init_extrs();
 266 
 267   // Now cache and address table are ready for AOT code generation
 268   _cache = opened_cache;
 269 }
 270 
 271 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
 272   opened_cache = new AOTCodeCache(is_dumping, is_using);
 273   if (opened_cache->failed()) {
 274     delete opened_cache;
 275     opened_cache = nullptr;
 276     return false;
 277   }
 278   return true;
 279 }
 280 
 281 void AOTCodeCache::close() {
 282   if (is_on()) {
 283     delete _cache; // Free memory
 284     _cache = nullptr;
 285     opened_cache = nullptr;
 286   }
 287 }
 288 
 289 #define DATA_ALIGNMENT HeapWordSize
 290 
 291 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
 292   _load_header(nullptr),
 293   _load_buffer(nullptr),
 294   _store_buffer(nullptr),
 295   _C_store_buffer(nullptr),
 296   _write_position(0),
 297   _load_size(0),
 298   _store_size(0),
 299   _for_use(is_using),
 300   _for_dump(is_dumping),
 301   _closing(false),
 302   _failed(false),
 303   _lookup_failed(false),
 304   _table(nullptr),
 305   _load_entries(nullptr),
 306   _search_entries(nullptr),
 307   _store_entries(nullptr),
 308   _C_strings_buf(nullptr),
 309   _store_entries_cnt(0)
 310 {
 311   // Read header at the begining of cache
 312   if (_for_use) {
 313     // Read cache
 314     size_t load_size = AOTCacheAccess::get_aot_code_region_size();
 315     ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
 316     if (!rs.is_reserved()) {
 317       log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
 318       set_failed();
 319       return;
 320     }
 321     if (!AOTCacheAccess::map_aot_code_region(rs)) {
 322       log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
 323       set_failed();
 324       return;
 325     }
 326 
 327     _load_size = (uint)load_size;
 328     _load_buffer = (char*)rs.base();
 329     assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
 330     log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
 331 
 332     _load_header = (Header*)addr(0);
 333     if (!_load_header->verify(_load_size)) {
 334       set_failed();
 335       return;
 336     }
 337     log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
 338     log_debug(aot, codecache, init)("  Adapters:  total=%u", _load_header->adapters_count());
 339     log_debug(aot, codecache, init)("  Shared Blobs: total=%u", _load_header->shared_blobs_count());
 340     log_debug(aot, codecache, init)("  C1 Blobs: total=%u", _load_header->C1_blobs_count());
 341     log_debug(aot, codecache, init)("  C2 Blobs: total=%u", _load_header->C2_blobs_count());
 342     log_debug(aot, codecache, init)("  AOT code cache size: %u bytes", _load_header->cache_size());
 343 
 344     // Read strings
 345     load_strings();
 346   }
 347   if (_for_dump) {
 348     _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
 349     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 350     // Entries allocated at the end of buffer in reverse (as on stack).
 351     _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
 352     log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
 353   }
 354   _table = new AOTCodeAddressTable();
 355 }
 356 
 357 void AOTCodeCache::init_early_stubs_table() {
 358   AOTCodeAddressTable* table = addr_table();
 359   if (table != nullptr) {
 360     table->init_early_stubs();
 361   }
 362 }
 363 
 364 void AOTCodeCache::init_shared_blobs_table() {
 365   AOTCodeAddressTable* table = addr_table();
 366   if (table != nullptr) {
 367     table->init_shared_blobs();
 368   }
 369 }
 370 
 371 void AOTCodeCache::init_early_c1_table() {
 372   AOTCodeAddressTable* table = addr_table();
 373   if (table != nullptr) {
 374     table->init_early_c1();
 375   }
 376 }
 377 
 378 AOTCodeCache::~AOTCodeCache() {
 379   if (_closing) {
 380     return; // Already closed
 381   }
 382   // Stop any further access to cache.
 383   _closing = true;
 384 
 385   MutexLocker ml(Compile_lock);
 386   if (for_dump()) { // Finalize cache
 387     finish_write();
 388   }
 389   _load_buffer = nullptr;
 390   if (_C_store_buffer != nullptr) {
 391     FREE_C_HEAP_ARRAY(char, _C_store_buffer);
 392     _C_store_buffer = nullptr;
 393     _store_buffer = nullptr;
 394   }
 395   if (_table != nullptr) {
 396     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
 397     delete _table;
 398     _table = nullptr;
 399   }
 400 }
 401 
 402 void AOTCodeCache::Config::record() {
 403   _flags = 0;
 404 #ifdef ASSERT
 405   _flags |= debugVM;
 406 #endif
 407   if (UseCompressedOops) {
 408     _flags |= compressedOops;
 409   }
 410   if (UseCompressedClassPointers) {
 411     _flags |= compressedClassPointers;
 412   }
 413   if (UseTLAB) {
 414     _flags |= useTLAB;
 415   }
 416   if (JavaAssertions::systemClassDefault()) {
 417     _flags |= systemClassAssertions;
 418   }
 419   if (JavaAssertions::userClassDefault()) {
 420     _flags |= userClassAssertions;
 421   }
 422   if (EnableContended) {
 423     _flags |= enableContendedPadding;
 424   }
 425   if (RestrictContended) {
 426     _flags |= restrictContendedPadding;
 427   }
 428   _compressedOopShift    = CompressedOops::shift();
 429   _compressedOopBase     = CompressedOops::base();
 430   _compressedKlassShift  = CompressedKlassPointers::shift();
 431   _contendedPaddingWidth = ContendedPaddingWidth;
 432   _gc                    = (uint)Universe::heap()->kind();
 433 }
 434 
 435 bool AOTCodeCache::Config::verify() const {
 436   // First checks affect all cached AOT code
 437 #ifdef ASSERT
 438   if ((_flags & debugVM) == 0) {
 439     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
 440     return false;
 441   }
 442 #else
 443   if ((_flags & debugVM) != 0) {
 444     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
 445     return false;
 446   }
 447 #endif
 448 
 449   CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
 450   if (aot_gc != Universe::heap()->kind()) {
 451     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
 452     return false;
 453   }
 454 
 455   if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
 456     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
 457     return false;
 458   }
 459   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 460     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
 461     return false;
 462   }
 463 
 464   // The following checks do not affect AOT adapters caching
 465 
 466   if (((_flags & compressedOops) != 0) != UseCompressedOops) {
 467     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
 468     AOTStubCaching = false;
 469   }
 470   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 471     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
 472     AOTStubCaching = false;
 473   }
 474 
 475   // This should be the last check as it only disables AOTStubCaching
 476   if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
 477     log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
 478     AOTStubCaching = false;
 479   }
 480 
 481   return true;
 482 }
 483 
 484 bool AOTCodeCache::Header::verify(uint load_size) const {
 485   if (_version != AOT_CODE_VERSION) {
 486     log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
 487     return false;
 488   }
 489   if (load_size < _cache_size) {
 490     log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
 491     return false;
 492   }
 493   return true;
 494 }
 495 
 496 AOTCodeCache* AOTCodeCache::open_for_use() {
 497   if (AOTCodeCache::is_on_for_use()) {
 498     return AOTCodeCache::cache();
 499   }
 500   return nullptr;
 501 }
 502 
 503 AOTCodeCache* AOTCodeCache::open_for_dump() {
 504   if (AOTCodeCache::is_on_for_dump()) {
 505     AOTCodeCache* cache = AOTCodeCache::cache();
 506     cache->clear_lookup_failed(); // Reset bit
 507     return cache;
 508   }
 509   return nullptr;
 510 }
 511 
 512 void copy_bytes(const char* from, address to, uint size) {
 513   assert((int)size > 0, "sanity");
 514   memcpy(to, from, size);
 515   log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
 516 }
 517 
 518 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
 519   _cache = cache;
 520   _entry = entry;
 521   _load_buffer = cache->cache_buffer();
 522   _read_position = 0;
 523   _lookup_failed = false;
 524 }
 525 
 526 void AOTCodeReader::set_read_position(uint pos) {
 527   if (pos == _read_position) {
 528     return;
 529   }
 530   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 531   _read_position = pos;
 532 }
 533 
 534 bool AOTCodeCache::set_write_position(uint pos) {
 535   if (pos == _write_position) {
 536     return true;
 537   }
 538   if (_store_size < _write_position) {
 539     _store_size = _write_position; // Adjust during write
 540   }
 541   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 542   _write_position = pos;
 543   return true;
 544 }
 545 
 546 static char align_buffer[256] = { 0 };
 547 
 548 bool AOTCodeCache::align_write() {
 549   // We are not executing code from cache - we copy it by bytes first.
 550   // No need for big alignment (or at all).
 551   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 552   if (padding == DATA_ALIGNMENT) {
 553     return true;
 554   }
 555   uint n = write_bytes((const void*)&align_buffer, padding);
 556   if (n != padding) {
 557     return false;
 558   }
 559   log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
 560   return true;
 561 }
 562 
 563 // Check to see if AOT code cache has required space to store "nbytes" of data
 564 address AOTCodeCache::reserve_bytes(uint nbytes) {
 565   assert(for_dump(), "Code Cache file is not created");
 566   uint new_position = _write_position + nbytes;
 567   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 568     log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
 569                                nbytes, _write_position);
 570     set_failed();
 571     report_store_failure();
 572     return nullptr;
 573   }
 574   address buffer = (address)(_store_buffer + _write_position);
 575   log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
 576   _write_position += nbytes;
 577   if (_store_size < _write_position) {
 578     _store_size = _write_position;
 579   }
 580   return buffer;
 581 }
 582 
 583 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
 584   assert(for_dump(), "Code Cache file is not created");
 585   if (nbytes == 0) {
 586     return 0;
 587   }
 588   uint new_position = _write_position + nbytes;
 589   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 590     log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
 591                                 nbytes, _write_position);
 592     set_failed();
 593     report_store_failure();
 594     return 0;
 595   }
 596   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 597   log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
 598   _write_position += nbytes;
 599   if (_store_size < _write_position) {
 600     _store_size = _write_position;
 601   }
 602   return nbytes;
 603 }
 604 
 605 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
 606   return (void*)(cache->add_entry());
 607 }
 608 
 609 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
 610   if (entry->kind() == kind) {
 611     assert(entry->id() == id, "sanity");
 612     return true; // Found
 613   }
 614   return false;
 615 }
 616 
 617 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
 618   assert(_for_use, "sanity");
 619   uint count = _load_header->entries_count();
 620   if (_load_entries == nullptr) {
 621     // Read it
 622     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 623     _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
 624     log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
 625   }
 626   // Binary search
 627   int l = 0;
 628   int h = count - 1;
 629   while (l <= h) {
 630     int mid = (l + h) >> 1;
 631     int ix = mid * 2;
 632     uint is = _search_entries[ix];
 633     if (is == id) {
 634       int index = _search_entries[ix + 1];
 635       AOTCodeEntry* entry = &(_load_entries[index]);
 636       if (check_entry(kind, id, entry)) {
 637         return entry; // Found
 638       }
 639       // Linear search around to handle id collission
 640       for (int i = mid - 1; i >= l; i--) { // search back
 641         ix = i * 2;
 642         is = _search_entries[ix];
 643         if (is != id) {
 644           break;
 645         }
 646         index = _search_entries[ix + 1];
 647         AOTCodeEntry* entry = &(_load_entries[index]);
 648         if (check_entry(kind, id, entry)) {
 649           return entry; // Found
 650         }
 651       }
 652       for (int i = mid + 1; i <= h; i++) { // search forward
 653         ix = i * 2;
 654         is = _search_entries[ix];
 655         if (is != id) {
 656           break;
 657         }
 658         index = _search_entries[ix + 1];
 659         AOTCodeEntry* entry = &(_load_entries[index]);
 660         if (check_entry(kind, id, entry)) {
 661           return entry; // Found
 662         }
 663       }
 664       break; // Not found match
 665     } else if (is < id) {
 666       l = mid + 1;
 667     } else {
 668       h = mid - 1;
 669     }
 670   }
 671   return nullptr;
 672 }
 673 
 674 extern "C" {
 675   static int uint_cmp(const void *i, const void *j) {
 676     uint a = *(uint *)i;
 677     uint b = *(uint *)j;
 678     return a > b ? 1 : a < b ? -1 : 0;
 679   }
 680 }
 681 
 682 bool AOTCodeCache::finish_write() {
 683   if (!align_write()) {
 684     return false;
 685   }
 686   uint strings_offset = _write_position;
 687   int strings_count = store_strings();
 688   if (strings_count < 0) {
 689     return false;
 690   }
 691   if (!align_write()) {
 692     return false;
 693   }
 694   uint strings_size = _write_position - strings_offset;
 695 
 696   uint entries_count = 0; // Number of entrant (useful) code entries
 697   uint entries_offset = _write_position;
 698 
 699   uint store_count = _store_entries_cnt;
 700   if (store_count > 0) {
 701     uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header),  DATA_ALIGNMENT);
 702     uint code_count = store_count;
 703     uint search_count = code_count * 2;
 704     uint search_size = search_count * sizeof(uint);
 705     uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
 706     // _write_position includes size of code and strings
 707     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
 708     uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
 709     assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
 710 
 711     // Create ordered search table for entries [id, index];
 712     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
 713     // Allocate in AOT Cache buffer
 714     char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
 715     char* start = align_up(buffer, DATA_ALIGNMENT);
 716     char* current = start + header_size; // Skip header
 717 
 718     AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
 719     uint adapters_count = 0;
 720     uint shared_blobs_count = 0;
 721     uint C1_blobs_count = 0;
 722     uint C2_blobs_count = 0;
 723     uint max_size = 0;
 724     // AOTCodeEntry entries were allocated in reverse in store buffer.
 725     // Process them in reverse order to cache first code first.
 726     for (int i = store_count - 1; i >= 0; i--) {
 727       entries_address[i].set_next(nullptr); // clear pointers before storing data
 728       uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
 729       if (size > max_size) {
 730         max_size = size;
 731       }
 732       copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
 733       entries_address[i].set_offset(current - start); // New offset
 734       current += size;
 735       uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
 736       if (n != sizeof(AOTCodeEntry)) {
 737         FREE_C_HEAP_ARRAY(uint, search);
 738         return false;
 739       }
 740       search[entries_count*2 + 0] = entries_address[i].id();
 741       search[entries_count*2 + 1] = entries_count;
 742       entries_count++;
 743       AOTCodeEntry::Kind kind = entries_address[i].kind();
 744       if (kind == AOTCodeEntry::Adapter) {
 745         adapters_count++;
 746       } else if (kind == AOTCodeEntry::SharedBlob) {
 747         shared_blobs_count++;
 748       } else if (kind == AOTCodeEntry::C1Blob) {
 749         C1_blobs_count++;
 750       } else if (kind == AOTCodeEntry::C2Blob) {
 751         C2_blobs_count++;
 752       }
 753     }
 754     if (entries_count == 0) {
 755       log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
 756       FREE_C_HEAP_ARRAY(uint, search);
 757       return true; // Nothing to write
 758     }
 759     assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
 760     // Write strings
 761     if (strings_count > 0) {
 762       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
 763       strings_offset = (current - start); // New offset
 764       current += strings_size;
 765     }
 766 
 767     uint new_entries_offset = (current - start); // New offset
 768     // Sort and store search table
 769     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
 770     search_size = 2 * entries_count * sizeof(uint);
 771     copy_bytes((const char*)search, (address)current, search_size);
 772     FREE_C_HEAP_ARRAY(uint, search);
 773     current += search_size;
 774 
 775     // Write entries
 776     entries_size = entries_count * sizeof(AOTCodeEntry); // New size
 777     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
 778     current += entries_size;
 779     uint size = (current - start);
 780     assert(size <= total_size, "%d > %d", size , total_size);
 781 
 782     log_debug(aot, codecache, exit)("  Adapters:  total=%u", adapters_count);
 783     log_debug(aot, codecache, exit)("  Shared Blobs:  total=%d", shared_blobs_count);
 784     log_debug(aot, codecache, exit)("  C1 Blobs:      total=%d", C1_blobs_count);
 785     log_debug(aot, codecache, exit)("  C2 Blobs:      total=%d", C2_blobs_count);
 786     log_debug(aot, codecache, exit)("  AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
 787 
 788     // Finalize header
 789     AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
 790     header->init(size, (uint)strings_count, strings_offset,
 791                  entries_count, new_entries_offset,
 792                  adapters_count, shared_blobs_count,
 793                  C1_blobs_count, C2_blobs_count);
 794 
 795     log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
 796   }
 797   return true;
 798 }
 799 
 800 //------------------Store/Load AOT code ----------------------
 801 
 802 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
 803   AOTCodeCache* cache = open_for_dump();
 804   if (cache == nullptr) {
 805     return false;
 806   }
 807   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
 808 
 809   if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
 810     return false;
 811   }
 812   if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
 813     return false;
 814   }
 815   log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
 816 
 817 #ifdef ASSERT
 818   LogStreamHandle(Trace, aot, codecache, stubs) log;
 819   if (log.is_enabled()) {
 820     FlagSetting fs(PrintRelocations, true);
 821     blob.print_on(&log);
 822   }
 823 #endif
 824   // we need to take a lock to prevent race between compiler threads generating AOT code
 825   // and the main thread generating adapter
 826   MutexLocker ml(Compile_lock);
 827   if (!is_on()) {
 828     return false; // AOT code cache was already dumped and closed.
 829   }
 830   if (!cache->align_write()) {
 831     return false;
 832   }
 833   uint entry_position = cache->_write_position;
 834 
 835   // Write name
 836   uint name_offset = cache->_write_position - entry_position;
 837   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
 838   uint n = cache->write_bytes(name, name_size);
 839   if (n != name_size) {
 840     return false;
 841   }
 842 
 843   // Write CodeBlob
 844   if (!cache->align_write()) {
 845     return false;
 846   }
 847   uint blob_offset = cache->_write_position - entry_position;
 848   address archive_buffer = cache->reserve_bytes(blob.size());
 849   if (archive_buffer == nullptr) {
 850     return false;
 851   }
 852   CodeBlob::archive_blob(&blob, archive_buffer);
 853 
 854   uint reloc_data_size = blob.relocation_size();
 855   n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
 856   if (n != reloc_data_size) {
 857     return false;
 858   }
 859 
 860   bool has_oop_maps = false;
 861   if (blob.oop_maps() != nullptr) {
 862     if (!cache->write_oop_map_set(blob)) {
 863       return false;
 864     }
 865     has_oop_maps = true;
 866   }
 867 
 868 #ifndef PRODUCT
 869   // Write asm remarks
 870   if (!cache->write_asm_remarks(blob)) {
 871     return false;
 872   }
 873   if (!cache->write_dbg_strings(blob)) {
 874     return false;
 875   }
 876 #endif /* PRODUCT */
 877 
 878   if (!cache->write_relocations(blob)) {
 879     if (!cache->failed()) {
 880       // We may miss an address in AOT table - skip this code blob.
 881       cache->set_write_position(entry_position);
 882     }
 883     return false;
 884   }
 885 
 886   uint entry_size = cache->_write_position - entry_position;
 887   AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
 888                                                 entry_position, entry_size, name_offset, name_size,
 889                                                 blob_offset, has_oop_maps, blob.content_begin());
 890   log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
 891   return true;
 892 }
 893 
 894 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
 895   assert(AOTCodeEntry::is_blob(entry_kind),
 896          "wrong entry kind for blob id %s", StubInfo::name(id));
 897   return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
 898 }
 899 
 900 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
 901   AOTCodeCache* cache = open_for_use();
 902   if (cache == nullptr) {
 903     return nullptr;
 904   }
 905   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
 906 
 907   if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
 908     return nullptr;
 909   }
 910   if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
 911     return nullptr;
 912   }
 913   log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
 914 
 915   AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
 916   if (entry == nullptr) {
 917     return nullptr;
 918   }
 919   AOTCodeReader reader(cache, entry);
 920   CodeBlob* blob = reader.compile_code_blob(name);
 921 
 922   log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
 923                                    (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
 924   return blob;
 925 }
 926 
 927 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
 928   assert(AOTCodeEntry::is_blob(entry_kind),
 929          "wrong entry kind for blob id %s", StubInfo::name(id));
 930   return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
 931 }
 932 
 933 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
 934   uint entry_position = _entry->offset();
 935 
 936   // Read name
 937   uint name_offset = entry_position + _entry->name_offset();
 938   uint name_size = _entry->name_size(); // Includes '/0'
 939   const char* stored_name = addr(name_offset);
 940 
 941   if (strncmp(stored_name, name, (name_size - 1)) != 0) {
 942     log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
 943                                        stored_name, name);
 944     set_lookup_failed(); // Skip this blob
 945     return nullptr;
 946   }
 947 
 948   // Read archived code blob
 949   uint offset = entry_position + _entry->blob_offset();
 950   CodeBlob* archived_blob = (CodeBlob*)addr(offset);
 951   offset += archived_blob->size();
 952 
 953   address reloc_data = (address)addr(offset);
 954   offset += archived_blob->relocation_size();
 955   set_read_position(offset);
 956 
 957   ImmutableOopMapSet* oop_maps = nullptr;
 958   if (_entry->has_oop_maps()) {
 959     oop_maps = read_oop_map_set();
 960   }
 961 
 962   CodeBlob* code_blob = CodeBlob::create(archived_blob,
 963                                          stored_name,
 964                                          reloc_data,
 965                                          oop_maps
 966                                         );
 967   if (code_blob == nullptr) { // no space left in CodeCache
 968     return nullptr;
 969   }
 970 
 971 #ifndef PRODUCT
 972   code_blob->asm_remarks().init();
 973   read_asm_remarks(code_blob->asm_remarks());
 974   code_blob->dbg_strings().init();
 975   read_dbg_strings(code_blob->dbg_strings());
 976 #endif // PRODUCT
 977 
 978   fix_relocations(code_blob);
 979 
 980 #ifdef ASSERT
 981   LogStreamHandle(Trace, aot, codecache, stubs) log;
 982   if (log.is_enabled()) {
 983     FlagSetting fs(PrintRelocations, true);
 984     code_blob->print_on(&log);
 985   }
 986 #endif
 987   return code_blob;
 988 }
 989 
 990 // ------------ process code and data --------------
 991 
 992 // Can't use -1. It is valid value for jump to iteself destination
 993 // used by static call stub: see NativeJump::jump_destination().
 994 #define BAD_ADDRESS_ID -2
 995 
 996 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
 997   GrowableArray<uint> reloc_data;
 998   RelocIterator iter(&code_blob);
 999   LogStreamHandle(Trace, aot, codecache, reloc) log;
1000   while (iter.next()) {
1001     int idx = reloc_data.append(0); // default value
1002     switch (iter.type()) {
1003       case relocInfo::none:
1004         break;
1005       case relocInfo::runtime_call_type: {
1006         // Record offset of runtime destination
1007         CallRelocation* r = (CallRelocation*)iter.reloc();
1008         address dest = r->destination();
1009         if (dest == r->addr()) { // possible call via trampoline on Aarch64
1010           dest = (address)-1;    // do nothing in this case when loading this relocation
1011         }
1012         int id = _table->id_for_address(dest, iter, &code_blob);
1013         if (id == BAD_ADDRESS_ID) {
1014           return false;
1015         }
1016         reloc_data.at_put(idx, id);
1017         break;
1018       }
1019       case relocInfo::runtime_call_w_cp_type:
1020         log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1021         return false;
1022       case relocInfo::external_word_type: {
1023         // Record offset of runtime target
1024         address target = ((external_word_Relocation*)iter.reloc())->target();
1025         int id = _table->id_for_address(target, iter, &code_blob);
1026         if (id == BAD_ADDRESS_ID) {
1027           return false;
1028         }
1029         reloc_data.at_put(idx, id);
1030         break;
1031       }
1032       case relocInfo::internal_word_type:
1033         break;
1034       case relocInfo::section_word_type:
1035         break;
1036       case relocInfo::post_call_nop_type:
1037         break;
1038       default:
1039         log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1040         return false;
1041         break;
1042     }
1043     if (log.is_enabled()) {
1044       iter.print_current_on(&log);
1045     }
1046   }
1047 
1048   // Write additional relocation data: uint per relocation
1049   // Write the count first
1050   int count = reloc_data.length();
1051   write_bytes(&count, sizeof(int));
1052   for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1053        iter != reloc_data.end(); ++iter) {
1054     uint value = *iter;
1055     int n = write_bytes(&value, sizeof(uint));
1056     if (n != sizeof(uint)) {
1057       return false;
1058     }
1059   }
1060   return true;
1061 }
1062 
1063 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1064   LogStreamHandle(Trace, aot, reloc) log;
1065   uint offset = read_position();
1066   int count = *(int*)addr(offset);
1067   offset += sizeof(int);
1068   if (log.is_enabled()) {
1069     log.print_cr("======== extra relocations count=%d", count);
1070   }
1071   uint* reloc_data = (uint*)addr(offset);
1072   offset += (count * sizeof(uint));
1073   set_read_position(offset);
1074 
1075   RelocIterator iter(code_blob);
1076   int j = 0;
1077   while (iter.next()) {
1078     switch (iter.type()) {
1079       case relocInfo::none:
1080         break;
1081       case relocInfo::runtime_call_type: {
1082         address dest = _cache->address_for_id(reloc_data[j]);
1083         if (dest != (address)-1) {
1084           ((CallRelocation*)iter.reloc())->set_destination(dest);
1085         }
1086         break;
1087       }
1088       case relocInfo::runtime_call_w_cp_type:
1089         // this relocation should not be in cache (see write_relocations)
1090         assert(false, "runtime_call_w_cp_type relocation is not implemented");
1091         break;
1092       case relocInfo::external_word_type: {
1093         address target = _cache->address_for_id(reloc_data[j]);
1094         // Add external address to global table
1095         int index = ExternalsRecorder::find_index(target);
1096         // Update index in relocation
1097         Relocation::add_jint(iter.data(), index);
1098         external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1099         assert(reloc->target() == target, "sanity");
1100         reloc->set_value(target); // Patch address in the code
1101         break;
1102       }
1103       case relocInfo::internal_word_type: {
1104         internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1105         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1106         break;
1107       }
1108       case relocInfo::section_word_type: {
1109         section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1110         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1111         break;
1112       }
1113       case relocInfo::post_call_nop_type:
1114         break;
1115       default:
1116         assert(false,"relocation %d unimplemented", (int)iter.type());
1117         break;
1118     }
1119     if (log.is_enabled()) {
1120       iter.print_current_on(&log);
1121     }
1122     j++;
1123   }
1124   assert(j == count, "sanity");
1125 }
1126 
1127 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1128   ImmutableOopMapSet* oopmaps = cb.oop_maps();
1129   int oopmaps_size = oopmaps->nr_of_bytes();
1130   if (!write_bytes(&oopmaps_size, sizeof(int))) {
1131     return false;
1132   }
1133   uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1134   if (n != (uint)oopmaps->nr_of_bytes()) {
1135     return false;
1136   }
1137   return true;
1138 }
1139 
1140 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1141   uint offset = read_position();
1142   int size = *(int *)addr(offset);
1143   offset += sizeof(int);
1144   ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1145   offset += size;
1146   set_read_position(offset);
1147   return oopmaps;
1148 }
1149 
1150 #ifndef PRODUCT
1151 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1152   // Write asm remarks
1153   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1154   if (count_ptr == nullptr) {
1155     return false;
1156   }
1157   uint count = 0;
1158   bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1159     log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1160     uint n = write_bytes(&offset, sizeof(uint));
1161     if (n != sizeof(uint)) {
1162       return false;
1163     }
1164     const char* cstr = add_C_string(str);
1165     int id = _table->id_for_C_string((address)cstr);
1166     assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1167     n = write_bytes(&id, sizeof(int));
1168     if (n != sizeof(int)) {
1169       return false;
1170     }
1171     count += 1;
1172     return true;
1173   });
1174   *count_ptr = count;
1175   return result;
1176 }
1177 
1178 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1179   // Read asm remarks
1180   uint offset = read_position();
1181   uint count = *(uint *)addr(offset);
1182   offset += sizeof(uint);
1183   for (uint i = 0; i < count; i++) {
1184     uint remark_offset = *(uint *)addr(offset);
1185     offset += sizeof(uint);
1186     int remark_string_id = *(uint *)addr(offset);
1187     offset += sizeof(int);
1188     const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1189     asm_remarks.insert(remark_offset, remark);
1190   }
1191   set_read_position(offset);
1192 }
1193 
1194 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1195   // Write dbg strings
1196   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1197   if (count_ptr == nullptr) {
1198     return false;
1199   }
1200   uint count = 0;
1201   bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1202     log_trace(aot, codecache, stubs)("dbg string=%s", str);
1203     const char* cstr = add_C_string(str);
1204     int id = _table->id_for_C_string((address)cstr);
1205     assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1206     uint n = write_bytes(&id, sizeof(int));
1207     if (n != sizeof(int)) {
1208       return false;
1209     }
1210     count += 1;
1211     return true;
1212   });
1213   *count_ptr = count;
1214   return result;
1215 }
1216 
1217 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1218   // Read dbg strings
1219   uint offset = read_position();
1220   uint count = *(uint *)addr(offset);
1221   offset += sizeof(uint);
1222   for (uint i = 0; i < count; i++) {
1223     int string_id = *(uint *)addr(offset);
1224     offset += sizeof(int);
1225     const char* str = (const char*)_cache->address_for_C_string(string_id);
1226     dbg_strings.insert(str);
1227   }
1228   set_read_position(offset);
1229 }
1230 #endif // PRODUCT
1231 
1232 //======================= AOTCodeAddressTable ===============
1233 
1234 // address table ids for generated routines, external addresses and C
1235 // string addresses are partitioned into positive integer ranges
1236 // defined by the following positive base and max values
1237 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1238 //      [_blobs_base, _blobs_base + _blobs_max -1],
1239 //      ...
1240 //      [_c_str_base, _c_str_base + _c_str_max -1],
1241 
1242 #define _extrs_max 100
1243 #define _stubs_max 3
1244 
1245 #define _shared_blobs_max 20
1246 #define _C1_blobs_max 10
1247 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1248 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1249 
1250 #define _extrs_base 0
1251 #define _stubs_base (_extrs_base + _extrs_max)
1252 #define _shared_blobs_base (_stubs_base + _stubs_max)
1253 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1254 #define _blobs_end  (_shared_blobs_base + _blobs_max)
1255 
1256 #define SET_ADDRESS(type, addr)                           \
1257   {                                                       \
1258     type##_addr[type##_length++] = (address) (addr);      \
1259     assert(type##_length <= type##_max, "increase size"); \
1260   }
1261 
1262 static bool initializing_extrs = false;
1263 
1264 void AOTCodeAddressTable::init_extrs() {
1265   if (_extrs_complete || initializing_extrs) return; // Done already
1266 
1267   assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1268 
1269   initializing_extrs = true;
1270   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1271 
1272   _extrs_length = 0;
1273 
1274   // Record addresses of VM runtime methods
1275   SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1276   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1277   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1278   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1279 #if defined(AARCH64) && !defined(ZERO)
1280   SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1281 #endif
1282   {
1283     // Required by Shared blobs
1284     SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1285     SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1286     SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1287     SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1288     SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1289     SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1290     SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1291     SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1292     SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1293     SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1294     SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1295   }
1296 
1297 #ifdef COMPILER1
1298   {
1299     // Required by C1 blobs
1300     SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1301     SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1302     SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1303     SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1304     SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1305     SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1306     SET_ADDRESS(_extrs, Runtime1::new_instance);
1307     SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1308     SET_ADDRESS(_extrs, Runtime1::new_type_array);
1309     SET_ADDRESS(_extrs, Runtime1::new_object_array);
1310     SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1311     SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1312     SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1313     SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1314     SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1315     SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1316     SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1317     SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1318     SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1319     SET_ADDRESS(_extrs, Runtime1::monitorenter);
1320     SET_ADDRESS(_extrs, Runtime1::monitorexit);
1321     SET_ADDRESS(_extrs, Runtime1::deoptimize);
1322     SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1323     SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1324     SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1325     SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1326     SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1327     SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1328     SET_ADDRESS(_extrs, Thread::current);
1329     SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1330 #ifndef PRODUCT
1331     SET_ADDRESS(_extrs, os::breakpoint);
1332 #endif
1333   }
1334 #endif
1335 
1336 #ifdef COMPILER2
1337   {
1338     // Required by C2 blobs
1339     SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1340     SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1341     SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1342     SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1343     SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1344     SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1345     SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1346     SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1347     SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1348     SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1349 #if INCLUDE_JVMTI
1350     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
1351     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
1352     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
1353     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
1354 #endif
1355     SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1356     SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1357     SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1358     SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1359     SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1360     SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1361 #if defined(AARCH64)
1362     SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1363 #endif // AARCH64
1364   }
1365 #endif // COMPILER2
1366 
1367 #if INCLUDE_G1GC
1368   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
1369   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1370 #endif
1371 #if INCLUDE_SHENANDOAHGC
1372   SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1373   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1374   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1375 #endif
1376 #if INCLUDE_ZGC
1377   SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1378 #if defined(AMD64)
1379   SET_ADDRESS(_extrs, &ZPointerLoadShift);
1380 #endif
1381 #endif
1382 #ifndef ZERO
1383 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1384   SET_ADDRESS(_extrs, MacroAssembler::debug64);
1385 #endif
1386 #endif // ZERO
1387 
1388   _extrs_complete = true;
1389   log_debug(aot, codecache, init)("External addresses recorded");
1390 }
1391 
1392 static bool initializing_early_stubs = false;
1393 
1394 void AOTCodeAddressTable::init_early_stubs() {
1395   if (_complete || initializing_early_stubs) return; // Done already
1396   initializing_early_stubs = true;
1397   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1398   _stubs_length = 0;
1399   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1400 
1401   {
1402     // Required by C1 blobs
1403 #if defined(AMD64) && !defined(ZERO)
1404     SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1405     SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1406 #endif // AMD64
1407   }
1408 
1409   _early_stubs_complete = true;
1410   log_info(aot, codecache, init)("Early stubs recorded");
1411 }
1412 
1413 static bool initializing_shared_blobs = false;
1414 
1415 void AOTCodeAddressTable::init_shared_blobs() {
1416   if (_complete || initializing_shared_blobs) return; // Done already
1417   initializing_shared_blobs = true;
1418   address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1419 
1420   // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1421   _shared_blobs_addr = blobs_addr;
1422   _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1423 
1424   _shared_blobs_length = 0;
1425   _C1_blobs_length = 0;
1426 
1427   // clear the address table
1428   memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1429 
1430   // Record addresses of generated code blobs
1431   SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1432   SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1433   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1434   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1435   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1436   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1437 #if INCLUDE_JVMCI
1438   if (EnableJVMCI) {
1439     SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1440     SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1441   }
1442 #endif
1443 
1444   _shared_blobs_complete = true;
1445   log_debug(aot, codecache, init)("Early shared blobs recorded");
1446   _complete = true;
1447 }
1448 
1449 void AOTCodeAddressTable::init_early_c1() {
1450 #ifdef COMPILER1
1451   // Runtime1 Blobs
1452   StubId id = StubInfo::stub_base(StubGroup::C1);
1453   // include forward_exception in range we publish
1454   StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1455   for (; id != limit; id = StubInfo::next(id)) {
1456     if (Runtime1::blob_for(id) == nullptr) {
1457       log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1458       continue;
1459     }
1460     if (Runtime1::entry_for(id) == nullptr) {
1461       log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1462       continue;
1463     }
1464     address entry = Runtime1::entry_for(id);
1465     SET_ADDRESS(_C1_blobs, entry);
1466   }
1467 #endif // COMPILER1
1468   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1469   _early_c1_complete = true;
1470 }
1471 
1472 #undef SET_ADDRESS
1473 
1474 AOTCodeAddressTable::~AOTCodeAddressTable() {
1475   if (_extrs_addr != nullptr) {
1476     FREE_C_HEAP_ARRAY(address, _extrs_addr);
1477   }
1478   if (_stubs_addr != nullptr) {
1479     FREE_C_HEAP_ARRAY(address, _stubs_addr);
1480   }
1481   if (_shared_blobs_addr != nullptr) {
1482     FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1483   }
1484 }
1485 
1486 #ifdef PRODUCT
1487 #define MAX_STR_COUNT 200
1488 #else
1489 #define MAX_STR_COUNT 500
1490 #endif
1491 #define _c_str_max  MAX_STR_COUNT
1492 static const int _c_str_base = _all_max;
1493 
1494 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1495 static const char* _C_strings[MAX_STR_COUNT]    = {nullptr}; // Our duplicates
1496 static int _C_strings_count = 0;
1497 static int _C_strings_s[MAX_STR_COUNT] = {0};
1498 static int _C_strings_id[MAX_STR_COUNT] = {0};
1499 static int _C_strings_used = 0;
1500 
1501 void AOTCodeCache::load_strings() {
1502   uint strings_count  = _load_header->strings_count();
1503   if (strings_count == 0) {
1504     return;
1505   }
1506   uint strings_offset = _load_header->strings_offset();
1507   uint* string_lengths = (uint*)addr(strings_offset);
1508   strings_offset += (strings_count * sizeof(uint));
1509   uint strings_size = _load_header->entries_offset() - strings_offset;
1510   // We have to keep cached strings longer than _cache buffer
1511   // because they are refernced from compiled code which may
1512   // still be executed on VM exit after _cache is freed.
1513   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1514   memcpy(p, addr(strings_offset), strings_size);
1515   _C_strings_buf = p;
1516   assert(strings_count <= MAX_STR_COUNT, "sanity");
1517   for (uint i = 0; i < strings_count; i++) {
1518     _C_strings[i] = p;
1519     uint len = string_lengths[i];
1520     _C_strings_s[i] = i;
1521     _C_strings_id[i] = i;
1522     p += len;
1523   }
1524   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1525   _C_strings_count = strings_count;
1526   _C_strings_used  = strings_count;
1527   log_debug(aot, codecache, init)("  Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1528 }
1529 
1530 int AOTCodeCache::store_strings() {
1531   if (_C_strings_used > 0) {
1532     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1533     uint offset = _write_position;
1534     uint length = 0;
1535     uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1536     if (lengths == nullptr) {
1537       return -1;
1538     }
1539     for (int i = 0; i < _C_strings_used; i++) {
1540       const char* str = _C_strings[_C_strings_s[i]];
1541       uint len = (uint)strlen(str) + 1;
1542       length += len;
1543       assert(len < 1000, "big string: %s", str);
1544       lengths[i] = len;
1545       uint n = write_bytes(str, len);
1546       if (n != len) {
1547         return -1;
1548       }
1549     }
1550     log_debug(aot, codecache, exit)("  Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1551                                    _C_strings_used, length, offset);
1552   }
1553   return _C_strings_used;
1554 }
1555 
1556 const char* AOTCodeCache::add_C_string(const char* str) {
1557   if (is_on_for_dump() && str != nullptr) {
1558     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1559     AOTCodeAddressTable* table = addr_table();
1560     if (table != nullptr) {
1561       return table->add_C_string(str);
1562     }
1563   }
1564   return str;
1565 }
1566 
1567 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1568   if (_extrs_complete) {
1569     // Check previous strings address
1570     for (int i = 0; i < _C_strings_count; i++) {
1571       if (_C_strings_in[i] == str) {
1572         return _C_strings[i]; // Found previous one - return our duplicate
1573       } else if (strcmp(_C_strings[i], str) == 0) {
1574         return _C_strings[i];
1575       }
1576     }
1577     // Add new one
1578     if (_C_strings_count < MAX_STR_COUNT) {
1579       // Passed in string can be freed and used space become inaccessible.
1580       // Keep original address but duplicate string for future compare.
1581       _C_strings_id[_C_strings_count] = -1; // Init
1582       _C_strings_in[_C_strings_count] = str;
1583       const char* dup = os::strdup(str);
1584       _C_strings[_C_strings_count++] = dup;
1585       log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1586       return dup;
1587     } else {
1588       assert(false, "Number of C strings >= MAX_STR_COUNT");
1589     }
1590   }
1591   return str;
1592 }
1593 
1594 int AOTCodeAddressTable::id_for_C_string(address str) {
1595   if (str == nullptr) {
1596     return -1;
1597   }
1598   MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1599   for (int i = 0; i < _C_strings_count; i++) {
1600     if (_C_strings[i] == (const char*)str) { // found
1601       int id = _C_strings_id[i];
1602       if (id >= 0) {
1603         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1604         return id; // Found recorded
1605       }
1606       // Not found in recorded, add new
1607       id = _C_strings_used++;
1608       _C_strings_s[id] = i;
1609       _C_strings_id[i] = id;
1610       return id;
1611     }
1612   }
1613   return -1;
1614 }
1615 
1616 address AOTCodeAddressTable::address_for_C_string(int idx) {
1617   assert(idx < _C_strings_count, "sanity");
1618   return (address)_C_strings[idx];
1619 }
1620 
1621 static int search_address(address addr, address* table, uint length) {
1622   for (int i = 0; i < (int)length; i++) {
1623     if (table[i] == addr) {
1624       return i;
1625     }
1626   }
1627   return BAD_ADDRESS_ID;
1628 }
1629 
1630 address AOTCodeAddressTable::address_for_id(int idx) {
1631   assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1632   if (idx == -1) {
1633     return (address)-1;
1634   }
1635   uint id = (uint)idx;
1636   // special case for symbols based relative to os::init
1637   if (id > (_c_str_base + _c_str_max)) {
1638     return (address)os::init + idx;
1639   }
1640   if (idx < 0) {
1641     fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1642     return nullptr;
1643   }
1644   // no need to compare unsigned id against 0
1645   if (/* id >= _extrs_base && */ id < _extrs_length) {
1646     return _extrs_addr[id - _extrs_base];
1647   }
1648   if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1649     return _stubs_addr[id - _stubs_base];
1650   }
1651   if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1652     return _shared_blobs_addr[id - _shared_blobs_base];
1653   }
1654   if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1655     return _C1_blobs_addr[id - _C1_blobs_base];
1656   }
1657   if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1658     return address_for_C_string(id - _c_str_base);
1659   }
1660   fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1661   return nullptr;
1662 }
1663 
1664 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1665   assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1666   int id = -1;
1667   if (addr == (address)-1) { // Static call stub has jump to itself
1668     return id;
1669   }
1670   // Seach for C string
1671   id = id_for_C_string(addr);
1672   if (id >= 0) {
1673     return id + _c_str_base;
1674   }
1675   if (StubRoutines::contains(addr)) {
1676     // Search in stubs
1677     id = search_address(addr, _stubs_addr, _stubs_length);
1678     if (id < 0) {
1679       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1680       if (desc == nullptr) {
1681         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1682       }
1683       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1684       assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1685     } else {
1686       return id + _stubs_base;
1687     }
1688   } else {
1689     CodeBlob* cb = CodeCache::find_blob(addr);
1690     if (cb != nullptr) {
1691       // Search in code blobs
1692       int id_base = _shared_blobs_base;
1693       id = search_address(addr, _shared_blobs_addr, _blobs_max);
1694       if (id < 0) {
1695         assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1696       } else {
1697         return id_base + id;
1698       }
1699     } else {
1700       // Search in runtime functions
1701       id = search_address(addr, _extrs_addr, _extrs_length);
1702       if (id < 0) {
1703         ResourceMark rm;
1704         const int buflen = 1024;
1705         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1706         int offset = 0;
1707         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1708           if (offset > 0) {
1709             // Could be address of C string
1710             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1711             log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1712                                       p2i(addr), dist, (const char*)addr);
1713             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1714             return dist;
1715           }
1716 #ifdef ASSERT
1717           reloc.print_current_on(tty);
1718           code_blob->print_on(tty);
1719           code_blob->print_code_on(tty);
1720           assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1721 #endif
1722         } else {
1723 #ifdef ASSERT
1724           reloc.print_current_on(tty);
1725           code_blob->print_on(tty);
1726           code_blob->print_code_on(tty);
1727           os::find(addr, tty);
1728           assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1729 #endif
1730         }
1731       } else {
1732         return _extrs_base + id;
1733       }
1734     }
1735   }
1736   return id;
1737 }
1738 
1739 // This is called after initialize() but before init2()
1740 // and _cache is not set yet.
1741 void AOTCodeCache::print_on(outputStream* st) {
1742   if (opened_cache != nullptr && opened_cache->for_use()) {
1743     st->print_cr("\nAOT Code Cache");
1744     uint count = opened_cache->_load_header->entries_count();
1745     uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
1746     AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1747 
1748     for (uint i = 0; i < count; i++) {
1749       // Use search_entries[] to order ouput
1750       int index = search_entries[2*i + 1];
1751       AOTCodeEntry* entry = &(load_entries[index]);
1752 
1753       uint entry_position = entry->offset();
1754       uint name_offset = entry->name_offset() + entry_position;
1755       const char* saved_name = opened_cache->addr(name_offset);
1756 
1757       st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
1758                    i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
1759     }
1760   }
1761 }