1 /*
   2  * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/aotCacheAccess.hpp"
  28 #include "cds/aotMetaspace.hpp"
  29 #include "cds/cds_globals.hpp"
  30 #include "cds/cdsConfig.hpp"
  31 #include "cds/heapShared.hpp"
  32 #include "ci/ciUtilities.hpp"
  33 #include "classfile/javaAssertions.hpp"
  34 #include "code/aotCodeCache.hpp"
  35 #include "code/codeCache.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "gc/shared/barrierSetNMethod.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/gcConfig.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/memoryReserver.hpp"
  42 #include "prims/jvmtiThreadState.hpp"
  43 #include "prims/upcallLinker.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/flags/flagSetting.hpp"
  46 #include "runtime/globals_extension.hpp"
  47 #include "runtime/icache.hpp"
  48 #include "runtime/java.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/os.inline.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "runtime/stubInfo.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "utilities/copy.hpp"
  55 #ifdef COMPILER1
  56 #include "c1/c1_Runtime1.hpp"
  57 #endif
  58 #ifdef COMPILER2
  59 #include "opto/runtime.hpp"
  60 #endif
  61 #if INCLUDE_G1GC
  62 #include "gc/g1/g1BarrierSetRuntime.hpp"
  63 #include "gc/g1/g1HeapRegion.hpp"
  64 #endif
  65 #if INCLUDE_SHENANDOAHGC
  66 #include "gc/shenandoah/shenandoahRuntime.hpp"
  67 #endif
  68 #if INCLUDE_ZGC
  69 #include "gc/z/zBarrierSetRuntime.hpp"
  70 #endif
  71 
  72 #include <errno.h>
  73 #include <sys/stat.h>
  74 
  75 const char* aot_code_entry_kind_name[] = {
  76 #define DECL_KIND_STRING(kind) XSTR(kind),
  77   DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
  78 #undef DECL_KIND_STRING
  79 };
  80 
  81 // Stream to printing AOTCodeCache loading failure.
  82 // Print to error channel when -XX:AOTMode is set to "on"
  83 static LogStream& load_failure_log() {
  84   static LogStream err_stream(LogLevel::Error, LogTagSetMapping<LOG_TAGS(aot, codecache, init)>::tagset());
  85   static LogStream dbg_stream(LogLevel::Debug, LogTagSetMapping<LOG_TAGS(aot, codecache, init)>::tagset());
  86   if (RequireSharedSpaces) {
  87     return err_stream;
  88   } else {
  89     return dbg_stream;
  90   }
  91 }
  92 
  93 static void report_load_failure() {
  94   if (AbortVMOnAOTCodeFailure) {
  95     vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
  96   }
  97   load_failure_log().print_cr("Unable to use AOT Code Cache.");
  98   AOTCodeCache::disable_caching();
  99 }
 100 
 101 static void report_store_failure() {
 102   if (AbortVMOnAOTCodeFailure) {
 103     tty->print_cr("Unable to create AOT Code Cache.");
 104     vm_abort(false);
 105   }
 106   log_error(aot, codecache, exit)("Unable to create AOT Code Cache.");
 107   AOTCodeCache::disable_caching();
 108 }
 109 
 110 // The sequence of AOT code caching flags and parametters settings.
 111 //
 112 // 1. The initial AOT code caching flags setting is done
 113 // during call to CDSConfig::check_vm_args_consistency().
 114 //
 115 // 2. The earliest AOT code state check done in compilationPolicy_init()
 116 // where we set number of compiler threads for AOT assembly phase.
 117 //
 118 // 3. We determine presence of AOT code in AOT Cache in
 119 // AOTMetaspace::open_static_archive() which is calles
 120 // after compilationPolicy_init() but before codeCache_init().
 121 //
 122 // 4. AOTCodeCache::initialize() is called during universe_init()
 123 // and does final AOT state and flags settings.
 124 //
 125 // 5. Finally AOTCodeCache::init2() is called after universe_init()
 126 // when all GC settings are finalized.
 127 
 128 // Next methods determine which action we do with AOT code depending
 129 // on phase of AOT process: assembly or production.
 130 
 131 bool AOTCodeCache::is_dumping_adapter() {
 132   return AOTAdapterCaching && is_on_for_dump();
 133 }
 134 
 135 bool AOTCodeCache::is_using_adapter()   {
 136   return AOTAdapterCaching && is_on_for_use();
 137 }
 138 
 139 bool AOTCodeCache::is_dumping_stub() {
 140   return AOTStubCaching && is_on_for_dump();
 141 }
 142 
 143 bool AOTCodeCache::is_using_stub()   {
 144   return AOTStubCaching && is_on_for_use();
 145 }
 146 
 147 // Next methods could be called regardless AOT code cache status.
 148 // Initially they are called during flags parsing and finilized
 149 // in AOTCodeCache::initialize().
 150 void AOTCodeCache::enable_caching() {
 151   FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
 152   FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
 153 }
 154 
 155 void AOTCodeCache::disable_caching() {
 156   FLAG_SET_ERGO(AOTStubCaching, false);
 157   FLAG_SET_ERGO(AOTAdapterCaching, false);
 158 }
 159 
 160 bool AOTCodeCache::is_caching_enabled() {
 161   return AOTStubCaching || AOTAdapterCaching;
 162 }
 163 
 164 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
 165   assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
 166   // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
 167   // becasue both id and kind are used to find an entry, and that combination should be unique
 168   if (kind == AOTCodeEntry::Adapter) {
 169     return id;
 170   } else if (kind == AOTCodeEntry::SharedBlob) {
 171     assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
 172     return id;
 173   } else if (kind == AOTCodeEntry::C1Blob) {
 174     assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
 175     return id;
 176   } else if (kind == AOTCodeEntry::C2Blob) {
 177     assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
 178     return id;
 179   } else {
 180     // kind must be AOTCodeEntry::StubGenBlob
 181     assert(StubInfo::is_stubgen(static_cast<BlobId>(id)), "not a stubgen blob id %d", id);
 182     return id;
 183   }
 184 }
 185 
 186 static uint _max_aot_code_size = 0;
 187 uint AOTCodeCache::max_aot_code_size() {
 188   return _max_aot_code_size;
 189 }
 190 
 191 // It is called from AOTMetaspace::initialize_shared_spaces()
 192 // which is called from universe_init().
 193 // At this point all AOT class linking seetings are finilized
 194 // and AOT cache is open so we can map AOT code region.
 195 void AOTCodeCache::initialize() {
 196 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
 197   log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
 198   disable_caching();
 199   return;
 200 #else
 201   if (FLAG_IS_DEFAULT(AOTCache)) {
 202     log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
 203     disable_caching();
 204     return; // AOTCache must be specified to dump and use AOT code
 205   }
 206 
 207   if (VerifyOops) {
 208     // Disable AOT stub caching when VerifyOops flag is on.
 209     // Verify oops code generated a lot of C strings which overflow
 210     // AOT C string table (which has fixed size).
 211     // AOT C string table will be reworked later to handle such cases.
 212     log_info(aot, codecache, init)("AOT Stub Caching is not supported with VerifyOops.");
 213     FLAG_SET_ERGO(AOTStubCaching, false);
 214     if (InlineTypePassFieldsAsArgs) {
 215       log_info(aot, codecache, init)("AOT Adapter Caching is not supported with VerifyOops + InlineTypePassFieldsAsArgs.");
 216       FLAG_SET_ERGO(AOTAdapterCaching, false);
 217     }
 218   }
 219 
 220   bool is_dumping = false;
 221   bool is_using   = false;
 222   if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
 223     is_dumping = true;
 224     enable_caching();
 225     is_dumping = is_caching_enabled();
 226   } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
 227     enable_caching();
 228     is_using = is_caching_enabled();
 229   } else {
 230     log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
 231     disable_caching();
 232     return; // nothing to do
 233   }
 234   if (!(is_dumping || is_using)) {
 235     disable_caching();
 236     return; // AOT code caching disabled on command line
 237   }
 238   _max_aot_code_size = AOTCodeMaxSize;
 239   if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
 240     if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
 241       _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
 242       log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
 243     }
 244   }
 245   size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
 246   if (is_using && aot_code_size == 0) {
 247     log_info(aot, codecache, init)("AOT Code Cache is empty");
 248     disable_caching();
 249     return;
 250   }
 251   if (!open_cache(is_dumping, is_using)) {
 252     if (is_using) {
 253       report_load_failure();
 254     } else {
 255       report_store_failure();
 256     }
 257     return;
 258   }
 259   if (is_dumping) {
 260     FLAG_SET_DEFAULT(ForceUnreachable, true);
 261   }
 262   FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 263 #endif // defined(AMD64) || defined(AARCH64)
 264 }
 265 
 266 static AOTCodeCache*  opened_cache = nullptr; // Use this until we verify the cache
 267 AOTCodeCache* AOTCodeCache::_cache = nullptr;
 268 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
 269 
 270 // It is called after universe_init() when all GC settings are finalized.
 271 void AOTCodeCache::init2() {
 272   DEBUG_ONLY( _passed_init2 = true; )
 273   if (opened_cache == nullptr) {
 274     return;
 275   }
 276   if (!opened_cache->verify_config()) {
 277     delete opened_cache;
 278     opened_cache = nullptr;
 279     report_load_failure();
 280     return;
 281   }
 282 
 283   // initialize aot runtime constants as appropriate to this runtime
 284   AOTRuntimeConstants::initialize_from_runtime();
 285 
 286   // initialize the table of external routines so we can save
 287   // generated code blobs that reference them
 288   AOTCodeAddressTable* table = opened_cache->_table;
 289   assert(table != nullptr, "should be initialized already");
 290   table->init_extrs();
 291 
 292   // Now cache and address table are ready for AOT code generation
 293   _cache = opened_cache;
 294 }
 295 
 296 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
 297   opened_cache = new AOTCodeCache(is_dumping, is_using);
 298   if (opened_cache->failed()) {
 299     delete opened_cache;
 300     opened_cache = nullptr;
 301     return false;
 302   }
 303   return true;
 304 }
 305 
 306 // Called after continuations_init() when continuation stub callouts
 307 // have been initialized
 308 void AOTCodeCache::init3() {
 309   if (opened_cache == nullptr) {
 310     return;
 311   }
 312   // initialize external routines for continuations so we can save
 313   // generated continuation blob that references them
 314   AOTCodeAddressTable* table = opened_cache->_table;
 315   assert(table != nullptr, "should be initialized already");
 316   table->init_extrs2();
 317 }
 318 
 319 void AOTCodeCache::dump() {
 320   if (is_on()) {
 321     assert(is_on_for_dump(), "should be called only when dumping AOT code");
 322     MutexLocker ml(Compile_lock);
 323     _cache->finish_write();
 324   }
 325 }
 326 
 327 #define DATA_ALIGNMENT HeapWordSize
 328 
 329 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
 330   _load_header(nullptr),
 331   _load_buffer(nullptr),
 332   _store_buffer(nullptr),
 333   _C_store_buffer(nullptr),
 334   _write_position(0),
 335   _load_size(0),
 336   _store_size(0),
 337   _for_use(is_using),
 338   _for_dump(is_dumping),
 339   _failed(false),
 340   _lookup_failed(false),
 341   _table(nullptr),
 342   _load_entries(nullptr),
 343   _search_entries(nullptr),
 344   _store_entries(nullptr),
 345   _C_strings_buf(nullptr),
 346   _store_entries_cnt(0)
 347 {
 348   // Read header at the begining of cache
 349   if (_for_use) {
 350     // Read cache
 351     size_t load_size = AOTCacheAccess::get_aot_code_region_size();
 352     ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
 353     if (!rs.is_reserved()) {
 354       log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
 355       set_failed();
 356       return;
 357     }
 358     if (!AOTCacheAccess::map_aot_code_region(rs)) {
 359       log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
 360       set_failed();
 361       return;
 362     }
 363 
 364     _load_size = (uint)load_size;
 365     _load_buffer = (char*)rs.base();
 366     assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
 367     log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
 368 
 369     _load_header = (Header*)addr(0);
 370     if (!_load_header->verify(_load_size)) {
 371       set_failed();
 372       return;
 373     }
 374     log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
 375     log_debug(aot, codecache, init)("  Adapters:  total=%u", _load_header->adapters_count());
 376     log_debug(aot, codecache, init)("  Shared Blobs: total=%u", _load_header->shared_blobs_count());
 377     log_debug(aot, codecache, init)("  StubGen Blobs:  total=%d", _load_header->stubgen_blobs_count());
 378     log_debug(aot, codecache, init)("  C1 Blobs: total=%u", _load_header->C1_blobs_count());
 379     log_debug(aot, codecache, init)("  C2 Blobs: total=%u", _load_header->C2_blobs_count());
 380     log_debug(aot, codecache, init)("  AOT code cache size: %u bytes", _load_header->cache_size());
 381 
 382     // Read strings
 383     load_strings();
 384   }
 385   if (_for_dump) {
 386     _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
 387     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 388     // Entries allocated at the end of buffer in reverse (as on stack).
 389     _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
 390     log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
 391   }
 392   _table = new AOTCodeAddressTable();
 393 }
 394 
 395 void AOTCodeCache::add_stub_entries(StubId stub_id, address start, GrowableArray<address> *entries, int begin_idx) {
 396   EntryId entry_id = StubInfo::entry_base(stub_id);
 397   add_stub_entry(entry_id, start);
 398   // skip past first entry
 399   entry_id = StubInfo::next_in_stub(stub_id, entry_id);
 400   // now check for any more entries
 401   int count = StubInfo::entry_count(stub_id) - 1;
 402   assert(start != nullptr, "invalid start address for stub %s", StubInfo::name(stub_id));
 403   assert(entries == nullptr || begin_idx + count <= entries->length(), "sanity");
 404   // write any extra entries
 405   for (int i = 0; i < count; i++) {
 406     assert(entry_id != EntryId::NO_ENTRYID, "not enough entries for stub %s", StubInfo::name(stub_id));
 407     address a = entries->at(begin_idx + i);
 408     add_stub_entry(entry_id, a);
 409     entry_id = StubInfo::next_in_stub(stub_id, entry_id);
 410   }
 411   assert(entry_id == EntryId::NO_ENTRYID, "too many entries for stub %s", StubInfo::name(stub_id));
 412 }
 413 
 414 void AOTCodeCache::add_stub_entry(EntryId entry_id, address a) {
 415   if (a != nullptr) {
 416     if (_table != nullptr) {
 417       log_trace(aot, codecache, stubs)("Publishing stub entry %s at address " INTPTR_FORMAT, StubInfo::name(entry_id), p2i(a));
 418       return _table->add_stub_entry(entry_id, a);
 419     }
 420   }
 421 }
 422 
 423 void AOTCodeCache::set_shared_stubs_complete() {
 424   AOTCodeAddressTable* table = addr_table();
 425   if (table != nullptr) {
 426     table->set_shared_stubs_complete();
 427   }
 428 }
 429 
 430 void AOTCodeCache::set_c1_stubs_complete() {
 431   AOTCodeAddressTable* table = addr_table();
 432   if (table != nullptr) {
 433     table->set_c1_stubs_complete();
 434   }
 435 }
 436 
 437 void AOTCodeCache::set_c2_stubs_complete() {
 438   AOTCodeAddressTable* table = addr_table();
 439   if (table != nullptr) {
 440     table->set_c2_stubs_complete();
 441   }
 442 }
 443 
 444 void AOTCodeCache::set_stubgen_stubs_complete() {
 445   AOTCodeAddressTable* table = addr_table();
 446   if (table != nullptr) {
 447     table->set_stubgen_stubs_complete();
 448   }
 449 }
 450 
 451 void AOTCodeCache::Config::record(uint cpu_features_offset) {
 452 
 453 #define AOTCODECACHE_SAVE_VAR(type, name) _saved_ ## name =  name;
 454 #define AOTCODECACHE_SAVE_FUN(type, name, fun) _saved_ ## name =  fun;
 455 
 456   AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_SAVE_VAR, AOTCODECACHE_SAVE_FUN);
 457 
 458   // Special configs that cannot be checked with macros
 459   _compressedOopBase     = CompressedOops::base();
 460 
 461 #if defined(X86) && !defined(ZERO)
 462   _useUnalignedLoadStores = UseUnalignedLoadStores;
 463 #endif
 464 
 465 #if defined(AARCH64)  && !defined(ZERO)
 466   _avoidUnalignedAccesses = AvoidUnalignedAccesses;
 467 #endif
 468 
 469   _cpu_features_offset   = cpu_features_offset;
 470 }
 471 
 472 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
 473   LogStreamHandle(Debug, aot, codecache, init) log;
 474   uint offset = _cpu_features_offset;
 475   uint cpu_features_size = *(uint *)cache->addr(offset);
 476   assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
 477   offset += sizeof(uint);
 478 
 479   void* cached_cpu_features_buffer = (void *)cache->addr(offset);
 480   if (log.is_enabled()) {
 481     ResourceMark rm; // required for stringStream::as_string()
 482     stringStream ss;
 483     VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
 484     log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
 485   }
 486 
 487   if (VM_Version::supports_features(cached_cpu_features_buffer)) {
 488     if (log.is_enabled()) {
 489       ResourceMark rm; // required for stringStream::as_string()
 490       stringStream ss;
 491       char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
 492       VM_Version::store_cpu_features(runtime_cpu_features);
 493       VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
 494       if (!ss.is_empty()) {
 495         log.print_cr("Additional runtime CPU features: %s", ss.as_string());
 496       }
 497     }
 498   } else {
 499     if (load_failure_log().is_enabled()) {
 500       ResourceMark rm; // required for stringStream::as_string()
 501       stringStream ss;
 502       char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
 503       VM_Version::store_cpu_features(runtime_cpu_features);
 504       VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
 505       load_failure_log().print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
 506     }
 507     return false;
 508   }
 509   return true;
 510 }
 511 
 512 #define AOTCODECACHE_DISABLED_MSG "AOT Code Cache disabled: it was created with %s = "
 513 
 514 // Special case, print "GC = ..." to be more understandable.
 515 inline void log_config_mismatch(CollectedHeap::Name saved, CollectedHeap::Name current, const char* name/*unused*/) {
 516   load_failure_log().print_cr("AOT Code Cache disabled: it was created with GC = \"%s\" vs current \"%s\"",
 517                               GCConfig::hs_err_name(saved), GCConfig::hs_err_name(current));
 518 }
 519 
 520 inline void log_config_mismatch(bool saved, bool current, const char* name) {
 521   load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%s vs current %s", name,
 522                               saved ? "true" : "false", current ? "true" : "false");
 523 }
 524 
 525 inline void log_config_mismatch(int saved, int current, const char* name) {
 526   load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%d vs current %d", name, saved, current);
 527 }
 528 
 529 inline void log_config_mismatch(uint saved, uint current, const char* name) {
 530   load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%u vs current %u", name, saved, current);
 531 }
 532 
 533 #ifdef _LP64
 534 inline void log_config_mismatch(intx saved, intx current, const char* name) {
 535   load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zd vs current %zd", name, saved, current);
 536 }
 537 
 538 inline void log_config_mismatch(uintx saved, uintx current, const char* name) {
 539   load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zu vs current %zu", name, saved, current);
 540 }
 541 #endif
 542 
 543 template <typename T>
 544 bool check_config(T saved, T current, const char* name) {
 545   if (saved != current) {
 546     log_config_mismatch(saved, current, name);
 547     return false;
 548   } else {
 549     return true;
 550   }
 551 }
 552 
 553 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
 554   // check CPU features before checking flags that may be
 555   // auto-configured in response to them
 556   if (!verify_cpu_features(cache)) {
 557     return false;
 558   }
 559 
 560   // Tests for config options which might affect validity of adapters,
 561   // stubs or nmethods. Currently we take a pessemistic stand and
 562   // drop the whole cache if any of these are changed.
 563 
 564 #define AOTCODECACHE_CHECK_VAR(type, name) \
 565   if (!check_config(_saved_ ## name, name, #name)) { return false; }
 566 #define AOTCODECACHE_CHECK_FUN(type, name, fun) \
 567   if (!check_config(_saved_ ## name, fun, #fun)) { return false; }
 568 
 569   AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_CHECK_VAR, AOTCODECACHE_CHECK_FUN);
 570 
 571   // Special configs that cannot be checked with macros
 572 
 573   if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
 574     load_failure_log().print_cr("AOT Code Cache disabled: incompatible CompressedOops::base(): %p vs current %p",
 575                                 _compressedOopBase, CompressedOops::base());
 576     return false;
 577   }
 578 
 579 #if defined(X86) && !defined(ZERO)
 580   // switching off UseUnalignedLoadStores can affect validity of fill
 581   // stubs
 582   if (_useUnalignedLoadStores && !UseUnalignedLoadStores) {
 583     log_config_mismatch(_useUnalignedLoadStores, UseUnalignedLoadStores, "UseUnalignedLoadStores");
 584     return false;
 585   }
 586 #endif // defined(X86) && !defined(ZERO)
 587 
 588 #if defined(AARCH64) && !defined(ZERO)
 589   // switching on AvoidUnalignedAccesses may affect validity of array
 590   // copy stubs and nmethods
 591   if (!_avoidUnalignedAccesses && AvoidUnalignedAccesses) {
 592     log_config_mismatch(_avoidUnalignedAccesses, AvoidUnalignedAccesses, "AvoidUnalignedAccesses");
 593     return false;
 594   }
 595 #endif // defined(AARCH64) && !defined(ZERO)
 596 
 597   return true;
 598 }
 599 
 600 bool AOTCodeCache::Header::verify(uint load_size) const {
 601   if (_version != AOT_CODE_VERSION) {
 602     load_failure_log().print_cr("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
 603     return false;
 604   }
 605   if (load_size < _cache_size) {
 606     load_failure_log().print_cr("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
 607     return false;
 608   }
 609   return true;
 610 }
 611 
 612 AOTCodeCache* AOTCodeCache::open_for_use() {
 613   if (AOTCodeCache::is_on_for_use()) {
 614     return AOTCodeCache::cache();
 615   }
 616   return nullptr;
 617 }
 618 
 619 AOTCodeCache* AOTCodeCache::open_for_dump() {
 620   if (AOTCodeCache::is_on_for_dump()) {
 621     AOTCodeCache* cache = AOTCodeCache::cache();
 622     cache->clear_lookup_failed(); // Reset bit
 623     return cache;
 624   }
 625   return nullptr;
 626 }
 627 
 628 void copy_bytes(const char* from, address to, uint size) {
 629   assert((int)size > 0, "sanity");
 630   memcpy(to, from, size);
 631   log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
 632 }
 633 
 634 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
 635   _cache = cache;
 636   _entry = entry;
 637   _load_buffer = cache->cache_buffer();
 638   _read_position = 0;
 639   _lookup_failed = false;
 640   _name          = nullptr;
 641   _reloc_data    = nullptr;
 642   _reloc_count   = 0;
 643   _oop_maps      = nullptr;
 644   _entry_kind    = AOTCodeEntry::None;
 645   _stub_data     = nullptr;
 646   _id            = -1;
 647 }
 648 
 649 void AOTCodeReader::set_read_position(uint pos) {
 650   if (pos == _read_position) {
 651     return;
 652   }
 653   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 654   _read_position = pos;
 655 }
 656 
 657 uint AOTCodeReader::align_read_int() {
 658   return align_up(_read_position, sizeof(int));
 659 }
 660 
 661 bool AOTCodeCache::set_write_position(uint pos) {
 662   if (pos == _write_position) {
 663     return true;
 664   }
 665   if (_store_size < _write_position) {
 666     _store_size = _write_position; // Adjust during write
 667   }
 668   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 669   _write_position = pos;
 670   return true;
 671 }
 672 
 673 static char align_buffer[256] = { 0 };
 674 
 675 bool AOTCodeCache::align_write_bytes(uint alignment) {
 676   uint padding = alignment - (_write_position & (alignment - 1));
 677   if (padding == alignment) {
 678     return true;
 679   }
 680   uint n = write_bytes((const void*)&align_buffer, padding);
 681   if (n != padding) {
 682     return false;
 683   }
 684   log_trace(aot, codecache)("Adjust write alignment to %d bytes in AOT Code Cache", alignment);
 685   return true;
 686 }
 687 
 688 bool AOTCodeCache::align_write() {
 689   // We are not executing code from cache - we copy it by bytes first.
 690   // No need for big alignment (or at all).
 691   return align_write_bytes(DATA_ALIGNMENT);
 692 }
 693 
 694 bool AOTCodeCache::align_write_int() {
 695   return align_write_bytes(sizeof(int));
 696 }
 697 
 698 // Check to see if AOT code cache has required space to store "nbytes" of data
 699 address AOTCodeCache::reserve_bytes(uint nbytes) {
 700   assert(for_dump(), "Code Cache file is not created");
 701   uint new_position = _write_position + nbytes;
 702   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 703     log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
 704                                nbytes, _write_position);
 705     set_failed();
 706     report_store_failure();
 707     return nullptr;
 708   }
 709   address buffer = (address)(_store_buffer + _write_position);
 710   log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
 711   _write_position += nbytes;
 712   if (_store_size < _write_position) {
 713     _store_size = _write_position;
 714   }
 715   return buffer;
 716 }
 717 
 718 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
 719   assert(for_dump(), "Code Cache file is not created");
 720   if (nbytes == 0) {
 721     return 0;
 722   }
 723   uint new_position = _write_position + nbytes;
 724   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 725     log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
 726                                 nbytes, _write_position);
 727     set_failed();
 728     report_store_failure();
 729     return 0;
 730   }
 731   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 732   log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
 733   _write_position += nbytes;
 734   if (_store_size < _write_position) {
 735     _store_size = _write_position;
 736   }
 737   return nbytes;
 738 }
 739 
 740 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
 741   return (void*)(cache->add_entry());
 742 }
 743 
 744 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
 745   if (entry->kind() == kind) {
 746     assert(entry->id() == id, "sanity");
 747     return true; // Found
 748   }
 749   return false;
 750 }
 751 
 752 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
 753   assert(_for_use, "sanity");
 754   uint count = _load_header->entries_count();
 755   if (_load_entries == nullptr) {
 756     // Read it
 757     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 758     _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
 759     log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
 760   }
 761   // Binary search
 762   int l = 0;
 763   int h = count - 1;
 764   while (l <= h) {
 765     int mid = (l + h) >> 1;
 766     int ix = mid * 2;
 767     uint is = _search_entries[ix];
 768     if (is == id) {
 769       int index = _search_entries[ix + 1];
 770       AOTCodeEntry* entry = &(_load_entries[index]);
 771       if (check_entry(kind, id, entry)) {
 772         return entry; // Found
 773       }
 774       // Linear search around to handle id collission
 775       for (int i = mid - 1; i >= l; i--) { // search back
 776         ix = i * 2;
 777         is = _search_entries[ix];
 778         if (is != id) {
 779           break;
 780         }
 781         index = _search_entries[ix + 1];
 782         AOTCodeEntry* entry = &(_load_entries[index]);
 783         if (check_entry(kind, id, entry)) {
 784           return entry; // Found
 785         }
 786       }
 787       for (int i = mid + 1; i <= h; i++) { // search forward
 788         ix = i * 2;
 789         is = _search_entries[ix];
 790         if (is != id) {
 791           break;
 792         }
 793         index = _search_entries[ix + 1];
 794         AOTCodeEntry* entry = &(_load_entries[index]);
 795         if (check_entry(kind, id, entry)) {
 796           return entry; // Found
 797         }
 798       }
 799       break; // Not found match
 800     } else if (is < id) {
 801       l = mid + 1;
 802     } else {
 803       h = mid - 1;
 804     }
 805   }
 806   return nullptr;
 807 }
 808 
 809 extern "C" {
 810   static int uint_cmp(const void *i, const void *j) {
 811     uint a = *(uint *)i;
 812     uint b = *(uint *)j;
 813     return a > b ? 1 : a < b ? -1 : 0;
 814   }
 815 }
 816 
 817 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
 818   uint* size_ptr = (uint *)buffer;
 819   *size_ptr = buffer_size;
 820   buffer += sizeof(uint);
 821 
 822   VM_Version::store_cpu_features(buffer);
 823   log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
 824   buffer += buffer_size;
 825   buffer = align_up(buffer, DATA_ALIGNMENT);
 826 }
 827 
 828 bool AOTCodeCache::finish_write() {
 829   if (!align_write()) {
 830     return false;
 831   }
 832   uint strings_offset = _write_position;
 833   int strings_count = store_strings();
 834   if (strings_count < 0) {
 835     return false;
 836   }
 837   if (!align_write()) {
 838     return false;
 839   }
 840   uint strings_size = _write_position - strings_offset;
 841 
 842   uint entries_count = 0; // Number of entrant (useful) code entries
 843   uint entries_offset = _write_position;
 844 
 845   uint store_count = _store_entries_cnt;
 846   if (store_count > 0) {
 847     uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
 848     uint code_count = store_count;
 849     uint search_count = code_count * 2;
 850     uint search_size = search_count * sizeof(uint);
 851     uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
 852     // _write_position includes size of code and strings
 853     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
 854     uint cpu_features_size = VM_Version::cpu_features_size();
 855     uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
 856     uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
 857                       align_up(total_cpu_features_size, DATA_ALIGNMENT);
 858     assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
 859 
 860     // Allocate in AOT Cache buffer
 861     char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
 862     char* start = align_up(buffer, DATA_ALIGNMENT);
 863     char* current = start + header_size; // Skip header
 864 
 865     uint cpu_features_offset = current - start;
 866     store_cpu_features(current, cpu_features_size);
 867     assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
 868     assert(current < start + total_size, "sanity check");
 869 
 870     // Create ordered search table for entries [id, index];
 871     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
 872 
 873     AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
 874     uint adapters_count = 0;
 875     uint shared_blobs_count = 0;
 876     uint stubgen_blobs_count = 0;
 877     uint C1_blobs_count = 0;
 878     uint C2_blobs_count = 0;
 879     uint max_size = 0;
 880     // AOTCodeEntry entries were allocated in reverse in store buffer.
 881     // Process them in reverse order to cache first code first.
 882     for (int i = store_count - 1; i >= 0; i--) {
 883       entries_address[i].set_next(nullptr); // clear pointers before storing data
 884       uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
 885       if (size > max_size) {
 886         max_size = size;
 887       }
 888       copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
 889       entries_address[i].set_offset(current - start); // New offset
 890       current += size;
 891       uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
 892       if (n != sizeof(AOTCodeEntry)) {
 893         FREE_C_HEAP_ARRAY(search);
 894         return false;
 895       }
 896       search[entries_count*2 + 0] = entries_address[i].id();
 897       search[entries_count*2 + 1] = entries_count;
 898       entries_count++;
 899       AOTCodeEntry::Kind kind = entries_address[i].kind();
 900       if (kind == AOTCodeEntry::Adapter) {
 901         adapters_count++;
 902       } else if (kind == AOTCodeEntry::SharedBlob) {
 903         shared_blobs_count++;
 904       } else if (kind == AOTCodeEntry::StubGenBlob) {
 905         stubgen_blobs_count++;
 906       } else if (kind == AOTCodeEntry::C1Blob) {
 907         C1_blobs_count++;
 908       } else if (kind == AOTCodeEntry::C2Blob) {
 909         C2_blobs_count++;
 910       }
 911     }
 912     if (entries_count == 0) {
 913       log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
 914       FREE_C_HEAP_ARRAY(search);
 915       return true; // Nothing to write
 916     }
 917     assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
 918     // Write strings
 919     if (strings_count > 0) {
 920       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
 921       strings_offset = (current - start); // New offset
 922       current += strings_size;
 923     }
 924 
 925     uint new_entries_offset = (current - start); // New offset
 926     // Sort and store search table
 927     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
 928     search_size = 2 * entries_count * sizeof(uint);
 929     copy_bytes((const char*)search, (address)current, search_size);
 930     FREE_C_HEAP_ARRAY(search);
 931     current += search_size;
 932 
 933     // Write entries
 934     entries_size = entries_count * sizeof(AOTCodeEntry); // New size
 935     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
 936     current += entries_size;
 937     uint size = (current - start);
 938     assert(size <= total_size, "%d > %d", size , total_size);
 939 
 940     log_debug(aot, codecache, exit)("  Adapters:  total=%u", adapters_count);
 941     log_debug(aot, codecache, exit)("  Shared Blobs:  total=%d", shared_blobs_count);
 942     log_debug(aot, codecache, exit)("  StubGen Blobs:  total=%d", stubgen_blobs_count);
 943     log_debug(aot, codecache, exit)("  C1 Blobs:      total=%d", C1_blobs_count);
 944     log_debug(aot, codecache, exit)("  C2 Blobs:      total=%d", C2_blobs_count);
 945     log_debug(aot, codecache, exit)("  AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
 946 
 947     // Finalize header
 948     AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
 949     header->init(size, (uint)strings_count, strings_offset,
 950                  entries_count, new_entries_offset,
 951                  adapters_count, shared_blobs_count,
 952                  stubgen_blobs_count, C1_blobs_count,
 953                  C2_blobs_count, cpu_features_offset);
 954 
 955     log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
 956   }
 957   return true;
 958 }
 959 
 960 //------------------Store/Load AOT code ----------------------
 961 
 962 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data, CodeBuffer* code_buffer) {
 963   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
 964 
 965   // we only expect stub data and a code buffer for a multi stub blob
 966   assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr),
 967          "entry_kind %d does not match stub_data pointer %p",
 968          entry_kind, stub_data);
 969 
 970   assert((stub_data == nullptr) == (code_buffer == nullptr),
 971          "stub data and code buffer must both be null or both non null");
 972 
 973   // If this is a stub and the cache is on for either load or dump we
 974   // need to insert the stub entries into the AOTCacheAddressTable so
 975   // that relocs which refer to entries defined by this blob get
 976   // translated correctly.
 977   //
 978   // Entry insertion needs to be be done up front before writing the
 979   // blob because some blobs rely on internal daisy-chain references
 980   // from one entry to another.
 981   //
 982   // Entry insertion also needs to be done even if the cache is open
 983   // for use but not for dump. This may be needed when an archived
 984   // blob omits some entries -- either because of a config change or a
 985   // load failure -- with the result that the entries end up being
 986   // generated. These generated entry addresses may be needed to
 987   // resolve references from subsequently loaded blobs (for either
 988   // stubs or nmethods).
 989 
 990   if (is_on() && AOTCodeEntry::is_blob(entry_kind)) {
 991     publish_stub_addresses(blob, (BlobId)id, stub_data);
 992   }
 993 
 994   AOTCodeCache* cache = open_for_dump();
 995   if (cache == nullptr) {
 996     return false;
 997   }
 998   if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
 999     return false;
1000   }
1001   if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1002     return false;
1003   }
1004   // we do not currently store C2 stubs because we are seeing weird
1005   // memory errors when loading them -- see JDK-8357593
1006   if (entry_kind == AOTCodeEntry::C2Blob) {
1007     return false;
1008   }
1009   log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1010 
1011 #ifdef ASSERT
1012   LogStreamHandle(Trace, aot, codecache, stubs) log;
1013   if (log.is_enabled()) {
1014     FlagSetting fs(PrintRelocations, true);
1015     blob.print_on(&log);
1016   }
1017 #endif
1018   // we need to take a lock to prevent race between compiler threads generating AOT code
1019   // and the main thread generating adapter
1020   MutexLocker ml(Compile_lock);
1021   if (!is_on()) {
1022     return false; // AOT code cache was already dumped and closed.
1023   }
1024   if (!cache->align_write()) {
1025     return false;
1026   }
1027   uint entry_position = cache->_write_position;
1028 
1029   uint blob_offset = cache->_write_position - entry_position;
1030   // Code blob's size is aligned to oopSize
1031   address archive_buffer = cache->reserve_bytes(blob.size());
1032   if (archive_buffer == nullptr) {
1033     return false;
1034   }
1035   CodeBlob::archive_blob(&blob, archive_buffer);
1036 
1037   // For a relocatable code blob its relocations are linked from the
1038   // blob. However, for a non-relocatable (stubgen) blob we only have
1039   // transient relocations attached to the code buffer that are added
1040   // in order to support AOT-load time patching. in either case, we
1041   // need to explicitly save these relocs when storing the blob to the
1042   // archive so we can then reload them and reattach them to either
1043   // the blob or to a code buffer when we reload the blob into a
1044   // production JVM.
1045   //
1046   // Either way we are then in a position to iterate over the relocs
1047   // and AOT patch the ones that refer to code that may move between
1048   // assembly and production time. We also need to save and restore
1049   // AOT address table indexes for the target addresses of affected
1050   // relocs. That happens below.
1051 
1052   int reloc_count;
1053   address reloc_data;
1054   if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1055     CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS);
1056     reloc_count = (cs->has_locs() ? cs->locs_count() : 0);
1057     reloc_data = (reloc_count > 0 ? (address)cs->locs_start() : nullptr);
1058   } else {
1059     reloc_count = blob.relocation_size() / sizeof(relocInfo);
1060     reloc_data = (address)blob.relocation_begin();
1061   }
1062   uint n = cache->write_bytes(&reloc_count, sizeof(int));
1063   if (n != sizeof(int)) {
1064     return false;
1065   }
1066   if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1067     // align to heap word size before writing the relocs so we can
1068     // install them into a code buffer when they get restored
1069     if (!cache->align_write()) {
1070       return false;
1071     }
1072   }
1073   uint reloc_data_size = (uint)(reloc_count * sizeof(relocInfo));
1074   n = cache->write_bytes(reloc_data, reloc_data_size);
1075   if (n != reloc_data_size) {
1076     return false;
1077   }
1078 
1079   bool has_oop_maps = false;
1080   if (blob.oop_maps() != nullptr) {
1081     if (!cache->write_oop_map_set(blob)) {
1082       return false;
1083     }
1084     has_oop_maps = true;
1085   }
1086 
1087   // In the case of a multi-stub blob we need to write start, end,
1088   // secondary entries and extras. For any other blob entry addresses
1089   // beyond the blob start will be stored in the blob as offsets.
1090   if (stub_data != nullptr) {
1091     if (!cache->write_stub_data(blob, stub_data)) {
1092       return false;
1093     }
1094   }
1095 
1096   // now we have added all the other data we can write details of any
1097   // extra the AOT relocations
1098 
1099   bool write_ok = true;
1100   if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1101     if (reloc_count > 0) {
1102       CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS);
1103       RelocIterator iter(cs);
1104       write_ok = cache->write_relocations(blob, iter);
1105     }
1106   } else {
1107     RelocIterator iter(&blob);
1108     write_ok = cache->write_relocations(blob, iter);
1109   }
1110 
1111   if (!write_ok) {
1112     if (!cache->failed()) {
1113       // We may miss an address in AOT table - skip this code blob.
1114       cache->set_write_position(entry_position);
1115     }
1116     return false;
1117   }
1118 
1119 #ifndef PRODUCT
1120   // Write asm remarks after relocation info
1121   if (!cache->write_asm_remarks(blob)) {
1122     return false;
1123   }
1124   if (!cache->write_dbg_strings(blob)) {
1125     return false;
1126   }
1127 #endif /* PRODUCT */
1128 
1129   // Write name after code comments
1130   uint name_offset = cache->_write_position - entry_position;
1131   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1132   n = cache->write_bytes(name, name_size);
1133   if (n != name_size) {
1134     return false;
1135   }
1136 
1137   uint entry_size = cache->_write_position - entry_position;
1138 
1139   AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1140                                                 entry_position, entry_size, name_offset, name_size,
1141                                                 blob_offset, has_oop_maps, blob.content_begin());
1142   log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1143   return true;
1144 }
1145 
1146 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1147   assert(!AOTCodeEntry::is_blob(entry_kind),
1148          "wrong entry kind for numeric id %d", id);
1149   return store_code_blob(blob, entry_kind, (uint)id, name, nullptr, nullptr);
1150 }
1151 
1152 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1153   assert(AOTCodeEntry::is_single_stub_blob(entry_kind),
1154          "wrong entry kind for blob id %s", StubInfo::name(id));
1155   return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), nullptr, nullptr);
1156 }
1157 
1158 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data, CodeBuffer* code_buffer) {
1159   assert(AOTCodeEntry::is_multi_stub_blob(entry_kind),
1160          "wrong entry kind for multi stub blob id %s", StubInfo::name(id));
1161   return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), stub_data, code_buffer);
1162 }
1163 
1164 bool AOTCodeCache::write_stub_data(CodeBlob &blob, AOTStubData *stub_data) {
1165   if (!align_write_int()) {
1166     return false;
1167   }
1168   BlobId blob_id = stub_data->blob_id();
1169   StubId stub_id = StubInfo::stub_base(blob_id);
1170   address blob_base = blob.code_begin();
1171   int stub_cnt = StubInfo::stub_count(blob_id);
1172   int n;
1173 
1174   LogStreamHandle(Trace, aot, codecache, stubs) log;
1175 
1176   if (log.is_enabled()) {
1177     log.print_cr("======== Stub data starts at offset %d", _write_position);
1178   }
1179 
1180   for (int i = 0; i < stub_cnt; i++, stub_id = StubInfo::next_in_blob(blob_id, stub_id)) {
1181     // for each stub we find in the ranges list we write an int
1182     // sequence <stubid,start,end,N,offset1, ... offsetN> where
1183     //
1184     // - start_pos is the stub start address encoded as a code section offset
1185     //
1186     // - end is the stub end address encoded as an offset from start
1187     //
1188     // - N counts the number of stub-local entries/extras
1189     //
1190     // - offseti is a stub-local entry/extra address encoded as len for
1191     // a null address otherwise as an offset in range [1,len-1]
1192 
1193     StubAddrRange& range = stub_data->get_range(i);
1194     GrowableArray<address>& addresses = stub_data->address_array();
1195     int base = range.start_index();
1196     if (base >= 0) {
1197       n = write_bytes(&stub_id, sizeof(StubId));
1198       if (n != sizeof(StubId)) {
1199         return false;
1200       }
1201       address start = addresses.at(base);
1202       assert (blob_base <= start, "sanity");
1203       uint offset = (uint)(start - blob_base);
1204       n = write_bytes(&offset, sizeof(uint));
1205       if (n != sizeof(int)) {
1206         return false;
1207       }
1208       address end = addresses.at(base + 1);
1209       assert (start < end, "sanity");
1210       offset = (uint)(end - start);
1211       n = write_bytes(&offset, sizeof(uint));
1212       if (n != sizeof(int)) {
1213         return false;
1214       }
1215       // write number of secondary and extra entries
1216       int count =  range.count() - 2;
1217       n = write_bytes(&count, sizeof(int));
1218       if (n != sizeof(int)) {
1219         return false;
1220       }
1221       for (int j = 0; j < count; j++) {
1222         address next = addresses.at(base + 2 + j);
1223         if (next != nullptr) {
1224           // n.b. This maps next == end to the stub length which
1225           // means we will reconstitute the address as nullptr. That
1226           // happens when we have a handler range covers the end of
1227           // a stub and needs to be handled specially by the client
1228           // that restores the extras.
1229           assert(start <= next && next <= end, "sanity");
1230           offset = (uint)(next - start);
1231         } else {
1232           // this can happen when a stub is not generated or an
1233           // extra is the common handler target
1234           offset = NULL_ADDRESS_MARKER;
1235         }
1236         n = write_bytes(&offset, sizeof(uint));
1237         if (n != sizeof(int)) {
1238           return false;
1239         }
1240       }
1241       if (log.is_enabled()) {
1242         log.print_cr("======== wrote stub %s and %d addresses up to offset %d",
1243                      StubInfo::name(stub_id), range.count(), _write_position);
1244       }
1245     }
1246   }
1247   // we should have exhausted all stub ids in the blob
1248   assert(stub_id == StubId::NO_STUBID, "sanity");
1249   // write NO_STUBID as an end marker
1250   n = write_bytes(&stub_id, sizeof(StubId));
1251   if (n != sizeof(StubId)) {
1252     return false;
1253   }
1254 
1255   if (log.is_enabled()) {
1256     log.print_cr("======== Stub data ends at offset %d", _write_position);
1257   }
1258 
1259   return true;
1260 }
1261 
1262 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data) {
1263   AOTCodeCache* cache = open_for_use();
1264   if (cache == nullptr) {
1265     return nullptr;
1266   }
1267   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1268 
1269   assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr),
1270          "entry_kind %d does not match stub_data pointer %p",
1271          entry_kind, stub_data);
1272 
1273   if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1274     return nullptr;
1275   }
1276   if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1277     return nullptr;
1278   }
1279   // we do not currently load C2 stubs because we are seeing weird
1280   // memory errors when loading them -- see JDK-8357593
1281   if (entry_kind == AOTCodeEntry::C2Blob) {
1282     return nullptr;
1283   }
1284   log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1285 
1286   AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1287   if (entry == nullptr) {
1288     return nullptr;
1289   }
1290   AOTCodeReader reader(cache, entry);
1291   CodeBlob* blob = reader.compile_code_blob(name, entry_kind, id, stub_data);
1292 
1293   log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1294                                    (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1295   return blob;
1296 }
1297 
1298 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1299   assert(!AOTCodeEntry::is_blob(entry_kind),
1300          "wrong entry kind for numeric id %d", id);
1301   return load_code_blob(entry_kind, (uint)id, name, nullptr);
1302 }
1303 
1304 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1305   assert(AOTCodeEntry::is_single_stub_blob(entry_kind),
1306          "wrong entry kind for blob id %s", StubInfo::name(id));
1307   return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), nullptr);
1308 }
1309 
1310 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data) {
1311   assert(AOTCodeEntry::is_multi_stub_blob(entry_kind),
1312          "wrong entry kind for blob id %s", StubInfo::name(id));
1313   return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), stub_data);
1314 }
1315 
1316 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, AOTCodeEntry::Kind entry_kind, int id, AOTStubData* stub_data) {
1317   uint entry_position = _entry->offset();
1318 
1319   // Read name
1320   uint name_offset = entry_position + _entry->name_offset();
1321   uint name_size = _entry->name_size(); // Includes '/0'
1322   const char* stored_name = addr(name_offset);
1323 
1324   if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1325     log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1326                                        stored_name, name);
1327     set_lookup_failed(); // Skip this blob
1328     return nullptr;
1329   }
1330   _name = stored_name;
1331 
1332   // Read archived code blob and related info
1333   uint offset = entry_position + _entry->blob_offset();
1334   CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1335   offset += archived_blob->size();
1336 
1337   _reloc_count = *(int*)addr(offset);
1338   offset += sizeof(int);
1339   if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1340     // position of relocs will have been aligned to heap word size so
1341     // we can install them into a code buffer
1342     offset = align_up(offset, DATA_ALIGNMENT);
1343   }
1344   _reloc_data = (address)addr(offset);
1345   offset += _reloc_count * sizeof(relocInfo);
1346   set_read_position(offset);
1347 
1348   if (_entry->has_oop_maps()) {
1349     _oop_maps = read_oop_map_set();
1350   }
1351 
1352   // record current context for use by that callback
1353   _stub_data = stub_data;
1354   _entry_kind = entry_kind;
1355   _id = id;
1356 
1357   // CodeBlob::restore() calls AOTCodeReader::restore()
1358 
1359   CodeBlob* code_blob = CodeBlob::create(archived_blob, this);
1360 
1361   if (code_blob == nullptr) { // no space left in CodeCache
1362     return nullptr;
1363   }
1364 
1365 #ifdef ASSERT
1366   LogStreamHandle(Trace, aot, codecache, stubs) log;
1367   if (log.is_enabled()) {
1368     FlagSetting fs(PrintRelocations, true);
1369     code_blob->print_on(&log);
1370   }
1371 #endif
1372   return code_blob;
1373 }
1374 
1375 void AOTCodeReader::restore(CodeBlob* code_blob) {
1376   precond(AOTCodeCache::is_on_for_use());
1377   precond(_name != nullptr);
1378   precond(_reloc_data != nullptr);
1379 
1380   code_blob->set_name(_name);
1381   // Saved relocations need restoring except for the case of a
1382   // multi-stub blob which has no runtime relocations. However, we may
1383   // still have saved some (re-)load time relocs that were attached to
1384   // the generator's code buffer. We don't attach them to the blob but
1385   // they get processed below by fix_relocations.
1386   if (!AOTCodeEntry::is_multi_stub_blob(_entry_kind)) {
1387     code_blob->restore_mutable_data(_reloc_data);
1388   }
1389   code_blob->set_oop_maps(_oop_maps);
1390 
1391   // if this is a multi stub blob load its entries
1392   if (AOTCodeEntry::is_blob(_entry_kind)) {
1393     BlobId blob_id = static_cast<BlobId>(_id);
1394     if (StubInfo::is_stubgen(blob_id)) {
1395       assert(_stub_data != nullptr, "sanity");
1396       read_stub_data(code_blob, _stub_data);
1397     }
1398     // publish entries found either in stub_data or as offsets in blob
1399     AOTCodeCache::publish_stub_addresses(*code_blob, blob_id, _stub_data);
1400   }
1401 
1402   // Now that all the entry points are in the address table we can
1403   // read all the extra reloc info and fix up any addresses that need
1404   // patching to adjust for a new location in a new JVM. We can be
1405   // sure to correctly update all runtime references, including
1406   // cross-linked stubs that are internally daisy-chained. If
1407   // relocation fails and we have to re-generate any of the stubs then
1408   // the entry points for newly generated stubs will get updated,
1409   // ensuring that any other stubs or nmethods we need to relocate
1410   // will use the correct address.
1411 
1412   // if we have a relocatable code blob then the relocs are already
1413   // attached to the blob and we can iterate over it to find the ones
1414   // we need to patch. With a non-relocatable code blob we need to
1415   // wrap it with a CodeBuffer and then reattach the relocs to the
1416   // code buffer.
1417 
1418   if (AOTCodeEntry::is_multi_stub_blob(_entry_kind)) {
1419     // the blob doesn't have any proper runtime relocs but we can
1420     // reinstate the AOT-load time relocs we saved from the code
1421     // buffer that generated this blob in a new code buffer and use
1422     // the latter to iterate over them
1423     if (_reloc_count > 0) {
1424       CodeBuffer code_buffer(code_blob);
1425       relocInfo* locs = (relocInfo*)_reloc_data;
1426       code_buffer.insts()->initialize_shared_locs(locs, _reloc_count);
1427       code_buffer.insts()->set_locs_end(locs + _reloc_count);
1428       CodeSection *cs = code_buffer.code_section(CodeBuffer::SECT_INSTS);
1429       RelocIterator reloc_iter(cs);
1430       fix_relocations(code_blob, reloc_iter);
1431     }
1432   } else {
1433     // the AOT-load time relocs will be in the blob's restored relocs
1434     RelocIterator reloc_iter(code_blob);
1435     fix_relocations(code_blob, reloc_iter);
1436   }
1437 
1438 #ifndef PRODUCT
1439   code_blob->asm_remarks().init();
1440   read_asm_remarks(code_blob->asm_remarks());
1441   code_blob->dbg_strings().init();
1442   read_dbg_strings(code_blob->dbg_strings());
1443 #endif // PRODUCT
1444 }
1445 
1446 void AOTCodeReader::read_stub_data(CodeBlob* code_blob, AOTStubData* stub_data) {
1447   GrowableArray<address>& addresses = stub_data->address_array();
1448   // Read the list of stub ids and associated start, end, secondary
1449   // and extra addresses and install them in the stub data.
1450   //
1451   // Also insert all start and secondary addresses into the AOTCache
1452   // address table so we correctly relocate this blob and any followng
1453   // blobs/nmethods.
1454   //
1455   // n.b. if an error occurs and we need to regenerate any of these
1456   // stubs the address table will be updated as a side-effect of
1457   // regeneration.
1458 
1459   address blob_base = code_blob->code_begin();
1460   uint blob_size = (uint)(code_blob->code_end() - blob_base);
1461   uint offset = align_read_int();
1462   LogStreamHandle(Trace, aot, codecache, stubs) log;
1463   if (log.is_enabled()) {
1464     log.print_cr("======== Stub data starts at offset %d", offset);
1465   }
1466   // read stub and entries until we see NO_STUBID
1467   StubId stub_id = *(StubId*)addr(offset); offset += sizeof(StubId);
1468   // we ought to have at least one saved stub in the blob
1469   assert(stub_id != StubId::NO_STUBID, "blob %s contains no stubs!", StubInfo::name(stub_data->blob_id()));
1470   while (stub_id != StubId::NO_STUBID) {
1471     assert(StubInfo::blob(stub_id) == stub_data->blob_id(), "sanity");
1472     int idx = StubInfo::stubgen_offset_in_blob(stub_data->blob_id(), stub_id);
1473     StubAddrRange& range = stub_data->get_range(idx);
1474     // we should only see a stub once
1475     assert(range.start_index() < 0, "repeated entry for stub %s", StubInfo::name(stub_id));
1476     int address_base = addresses.length();
1477     // start is an offset from the blob base
1478     uint start = *(uint*)addr(offset); offset += sizeof(uint);
1479     assert(start < blob_size, "stub %s start offset %d exceeds buffer length %d", StubInfo::name(stub_id), start, blob_size);
1480     address stub_start = blob_base + start;
1481     addresses.append(stub_start);
1482     // end is an offset from the stub start
1483     uint end = *(uint*)addr(offset); offset += sizeof(uint);
1484     assert(start + end <= blob_size, "stub %s end offset %d exceeds remaining buffer length %d", StubInfo::name(stub_id), end, blob_size - start);
1485     addresses.append(stub_start + end);
1486     // read count of secondary entries plus extras
1487     int entries_count = *(int*)addr(offset); offset += sizeof(int);
1488     assert(entries_count >= (StubInfo::entry_count(stub_id) - 1), "not enough entries for %s", StubInfo::name(stub_id));
1489     for (int i = 0; i < entries_count; i++) {
1490       // entry offset is an offset from the stub start less than or
1491       // equal to end
1492       uint entry = *(uint*)addr(offset); offset += sizeof(uint);
1493       if (entry <= end) {
1494         // entry addresses may not address end but extras can
1495         assert(entry < end || i >= StubInfo::entry_count(stub_id),
1496                "entry offset 0x%x exceeds stub length 0x%x for stub %s",
1497                entry, end, StubInfo::name(stub_id));
1498         addresses.append(stub_start + entry);
1499       } else {
1500         // special case: entry encodes a nullptr
1501         assert(entry == AOTCodeCache::NULL_ADDRESS_MARKER, "stub %s entry offset %d lies beyond stub end %d and does not equal NULL_ADDRESS_MARKER", StubInfo::name(stub_id), entry, end);
1502         addresses.append(nullptr);
1503       }
1504     }
1505     if (log.is_enabled()) {
1506       log.print_cr("======== read stub %s and %d addresses up to offset %d",
1507                    StubInfo::name(stub_id),  2 + entries_count, offset);
1508     }
1509     range.init_entry(address_base, 2 + entries_count);
1510     // move on to next stub or NO_STUBID
1511     stub_id = *(StubId*)addr(offset); offset += sizeof(StubId);
1512   }
1513   if (log.is_enabled()) {
1514     log.print_cr("======== Stub data ends at offset %d", offset);
1515   }
1516 
1517   set_read_position(offset);
1518 }
1519 
1520 void AOTCodeCache::publish_external_addresses(GrowableArray<address>& addresses) {
1521   DEBUG_ONLY( _passed_init2 = true; )
1522   if (opened_cache == nullptr) {
1523     return;
1524   }
1525 
1526   cache()->_table->add_external_addresses(addresses);
1527 }
1528 
1529 void AOTCodeCache::publish_stub_addresses(CodeBlob &code_blob, BlobId blob_id, AOTStubData *stub_data) {
1530   if (stub_data != nullptr) {
1531     // register all entries in stub
1532     assert(StubInfo::stub_count(blob_id) > 1,
1533            "multiple stub data provided for single stub blob %s",
1534            StubInfo::name(blob_id));
1535     assert(blob_id == stub_data->blob_id(),
1536            "blob id %s does not match id in stub data %s",
1537            StubInfo::name(blob_id),
1538            StubInfo::name(stub_data->blob_id()));
1539     // iterate over all stubs in the blob
1540     StubId stub_id = StubInfo::stub_base(blob_id);
1541     int stub_cnt = StubInfo::stub_count(blob_id);
1542     GrowableArray<address>& addresses = stub_data->address_array();
1543     for (int i = 0; i < stub_cnt; i++) {
1544       assert(stub_id != StubId::NO_STUBID, "sanity");
1545       StubAddrRange& range = stub_data->get_range(i);
1546       int base = range.start_index();
1547       if (base >= 0) {
1548         cache()->add_stub_entries(stub_id, addresses.at(base), &addresses, base + 2);
1549       }
1550       stub_id = StubInfo::next_in_blob(blob_id, stub_id);
1551     }
1552     // we should have exhausted all stub ids in the blob
1553     assert(stub_id == StubId::NO_STUBID, "sanity");
1554   } else {
1555     // register entry or entries for a single stub blob
1556     StubId stub_id = StubInfo::stub_base(blob_id);
1557     assert(StubInfo::stub_count(blob_id) == 1,
1558            "multiple stub blob %s provided without stub data",
1559            StubInfo::name(blob_id));
1560     address start = code_blob.code_begin();
1561     if (StubInfo::entry_count(stub_id) == 1) {
1562       assert(!code_blob.is_deoptimization_stub(), "expecting multiple entries for stub %s", StubInfo::name(stub_id));
1563       // register the blob base address as the only entry
1564       cache()->add_stub_entries(stub_id, start);
1565     } else {
1566       assert(code_blob.is_deoptimization_stub(), "only expecting one entry for stub %s", StubInfo::name(stub_id));
1567       DeoptimizationBlob *deopt_blob = code_blob.as_deoptimization_blob();
1568       assert(deopt_blob->unpack() == start, "unexpected offset 0x%x for deopt stub entry", (int)(deopt_blob->unpack() - start));
1569       GrowableArray<address> addresses;
1570       addresses.append(deopt_blob->unpack_with_exception());
1571       addresses.append(deopt_blob->unpack_with_reexecution());
1572       addresses.append(deopt_blob->unpack_with_exception_in_tls());
1573 #if INCLUDE_JVMCI
1574       addresses.append(deopt_blob->uncommon_trap());
1575       addresses.append(deopt_blob->implicit_exception_uncommon_trap());
1576 #endif // INCLUDE_JVMCI
1577       cache()->add_stub_entries(stub_id, start, &addresses, 0);
1578     }
1579   }
1580 }
1581 
1582 // ------------ process code and data --------------
1583 
1584 // Can't use -1. It is valid value for jump to iteself destination
1585 // used by static call stub: see NativeJump::jump_destination().
1586 #define BAD_ADDRESS_ID -2
1587 
1588 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, RelocIterator& iter) {
1589   if (!align_write_int()) {
1590     return false;
1591   }
1592   GrowableArray<uint> reloc_data;
1593   LogStreamHandle(Trace, aot, codecache, reloc) log;
1594   while (iter.next()) {
1595     int idx = reloc_data.append(0); // default value
1596     switch (iter.type()) {
1597       case relocInfo::none:
1598         break;
1599       case relocInfo::runtime_call_type: {
1600         // Record offset of runtime destination
1601         CallRelocation* r = (CallRelocation*)iter.reloc();
1602         address dest = r->destination();
1603         if (dest == r->addr()) { // possible call via trampoline on Aarch64
1604           dest = (address)-1;    // do nothing in this case when loading this relocation
1605         }
1606         int id = _table->id_for_address(dest, iter, &code_blob);
1607         if (id == BAD_ADDRESS_ID) {
1608           return false;
1609         }
1610         reloc_data.at_put(idx, id);
1611         break;
1612       }
1613       case relocInfo::runtime_call_w_cp_type:
1614         log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1615         return false;
1616       case relocInfo::external_word_type: {
1617         // Record offset of runtime target
1618         address target = ((external_word_Relocation*)iter.reloc())->target();
1619         int id = _table->id_for_address(target, iter, &code_blob);
1620         if (id == BAD_ADDRESS_ID) {
1621           return false;
1622         }
1623         reloc_data.at_put(idx, id);
1624         break;
1625       }
1626       case relocInfo::internal_word_type:
1627         break;
1628       case relocInfo::section_word_type:
1629         break;
1630       case relocInfo::post_call_nop_type:
1631         break;
1632       default:
1633         log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1634         return false;
1635         break;
1636     }
1637     if (log.is_enabled()) {
1638       iter.print_current_on(&log);
1639     }
1640   }
1641 
1642   // Write additional relocation data: uint per relocation
1643   // Write the count first
1644   int count = reloc_data.length();
1645   write_bytes(&count, sizeof(int));
1646   if (log.is_enabled()) {
1647     log.print_cr("======== extra relocations count=%d", count);
1648     log.print(   "  {");
1649   }
1650   bool first = true;
1651   for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1652        iter != reloc_data.end(); ++iter) {
1653     uint value = *iter;
1654     int n = write_bytes(&value, sizeof(uint));
1655     if (n != sizeof(uint)) {
1656       return false;
1657     }
1658     if (log.is_enabled()) {
1659       if (first) {
1660         first = false;
1661         log.print("%d", value);
1662       } else {
1663         log.print(", %d", value);
1664       }
1665     }
1666   }
1667   if (log.is_enabled()) {
1668     log.print_cr("}");
1669   }
1670   return true;
1671 }
1672 
1673 void AOTCodeReader::fix_relocations(CodeBlob *code_blob, RelocIterator& iter) {
1674   uint offset = align_read_int();
1675   int reloc_count = *(int*)addr(offset);
1676   offset += sizeof(int);
1677   uint* reloc_data = (uint*)addr(offset);
1678   offset += (reloc_count * sizeof(uint));
1679   set_read_position(offset);
1680 
1681   LogStreamHandle(Trace, aot, codecache, reloc) log;
1682   if (log.is_enabled()) {
1683     log.print_cr("======== extra relocations count=%d", reloc_count);
1684     log.print("  {");
1685     for(int i = 0; i < reloc_count; i++) {
1686       if (i == 0) {
1687         log.print("%d", reloc_data[i]);
1688       } else {
1689         log.print(", %d", reloc_data[i]);
1690       }
1691     }
1692     log.print_cr("}");
1693   }
1694 
1695   int j = 0;
1696   while (iter.next()) {
1697     switch (iter.type()) {
1698       case relocInfo::none:
1699         break;
1700       case relocInfo::runtime_call_type: {
1701         address dest = _cache->address_for_id(reloc_data[j]);
1702         if (dest != (address)-1) {
1703           ((CallRelocation*)iter.reloc())->set_destination(dest);
1704         }
1705         break;
1706       }
1707       case relocInfo::runtime_call_w_cp_type:
1708         // this relocation should not be in cache (see write_relocations)
1709         assert(false, "runtime_call_w_cp_type relocation is not implemented");
1710         break;
1711       case relocInfo::external_word_type: {
1712         address target = _cache->address_for_id(reloc_data[j]);
1713         // Add external address to global table
1714         int index = ExternalsRecorder::find_index(target);
1715         // Update index in relocation
1716         Relocation::add_jint(iter.data(), index);
1717         external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1718         assert(reloc->target() == target, "sanity");
1719         reloc->set_value(target); // Patch address in the code
1720         break;
1721       }
1722       case relocInfo::internal_word_type: {
1723         internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1724         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1725         break;
1726       }
1727       case relocInfo::section_word_type: {
1728         section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1729         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1730         break;
1731       }
1732       case relocInfo::post_call_nop_type:
1733         break;
1734       default:
1735         assert(false,"relocation %d unimplemented", (int)iter.type());
1736         break;
1737     }
1738     if (log.is_enabled()) {
1739       iter.print_current_on(&log);
1740     }
1741     j++;
1742   }
1743   assert(j == reloc_count, "sanity");
1744 }
1745 
1746 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1747   if (!align_write_int()) {
1748     return false;
1749   }
1750   ImmutableOopMapSet* oopmaps = cb.oop_maps();
1751   int oopmaps_size = oopmaps->nr_of_bytes();
1752   if (!write_bytes(&oopmaps_size, sizeof(int))) {
1753     return false;
1754   }
1755   uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1756   if (n != (uint)oopmaps->nr_of_bytes()) {
1757     return false;
1758   }
1759   return true;
1760 }
1761 
1762 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1763   uint offset = align_read_int();
1764   int size = *(int *)addr(offset);
1765   offset += sizeof(int);
1766   ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1767   offset += size;
1768   set_read_position(offset);
1769   return oopmaps;
1770 }
1771 
1772 #ifndef PRODUCT
1773 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1774   if (!align_write_int()) {
1775     return false;
1776   }
1777   // Write asm remarks
1778   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1779   if (count_ptr == nullptr) {
1780     return false;
1781   }
1782   uint count = 0;
1783   bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1784     log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1785     uint n = write_bytes(&offset, sizeof(uint));
1786     if (n != sizeof(uint)) {
1787       return false;
1788     }
1789     const char* cstr = add_C_string(str);
1790     int id = _table->id_for_C_string((address)cstr);
1791     assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1792     n = write_bytes(&id, sizeof(int));
1793     if (n != sizeof(int)) {
1794       return false;
1795     }
1796     count += 1;
1797     return true;
1798   });
1799   *count_ptr = count;
1800   return result;
1801 }
1802 
1803 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1804   // Read asm remarks
1805   uint offset = align_read_int();
1806   uint count = *(uint *)addr(offset);
1807   offset += sizeof(uint);
1808   for (uint i = 0; i < count; i++) {
1809     uint remark_offset = *(uint *)addr(offset);
1810     offset += sizeof(uint);
1811     int remark_string_id = *(uint *)addr(offset);
1812     offset += sizeof(int);
1813     const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1814     asm_remarks.insert(remark_offset, remark);
1815   }
1816   set_read_position(offset);
1817 }
1818 
1819 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1820   if (!align_write_int()) {
1821     return false;
1822   }
1823   // Write dbg strings
1824   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1825   if (count_ptr == nullptr) {
1826     return false;
1827   }
1828   uint count = 0;
1829   bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1830     log_trace(aot, codecache, stubs)("dbg string=%s", str);
1831     const char* cstr = add_C_string(str);
1832     int id = _table->id_for_C_string((address)cstr);
1833     assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1834     uint n = write_bytes(&id, sizeof(int));
1835     if (n != sizeof(int)) {
1836       return false;
1837     }
1838     count += 1;
1839     return true;
1840   });
1841   *count_ptr = count;
1842   return result;
1843 }
1844 
1845 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1846   // Read dbg strings
1847   uint offset = align_read_int();
1848   uint count = *(uint *)addr(offset);
1849   offset += sizeof(uint);
1850   for (uint i = 0; i < count; i++) {
1851     int string_id = *(uint *)addr(offset);
1852     offset += sizeof(int);
1853     const char* str = (const char*)_cache->address_for_C_string(string_id);
1854     dbg_strings.insert(str);
1855   }
1856   set_read_position(offset);
1857 }
1858 #endif // PRODUCT
1859 
1860 //======================= AOTCodeAddressTable ===============
1861 
1862 // address table ids for generated routine entry adresses, external
1863 // addresses and C string addresses are partitioned into positive
1864 // integer ranges defined by the following positive base and max
1865 // values i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1866 // [_stubs_base, _stubs_base + _stubs_max -1], [_c_str_base,
1867 // _c_str_base + _c_str_max -1],
1868 
1869 #define _extrs_max 380
1870 #define _stubs_max static_cast<int>(EntryId::NUM_ENTRYIDS)
1871 
1872 #define _extrs_base 0
1873 #define _stubs_base (_extrs_base + _extrs_max)
1874 #define _all_max    (_stubs_base + _stubs_max)
1875 
1876 // setter for external addresses and string addresses inserts new
1877 // addresses in the order they are encountered them which must remain
1878 // the same across an assembly run and subsequent production run
1879 
1880 #define ADD_EXTERNAL_ADDRESS(addr)                               \
1881   {                                                              \
1882     hash_address((address) addr, _extrs_base + _extrs_length);   \
1883     _extrs_addr[_extrs_length++] = (address) (addr);             \
1884     assert(_extrs_length <= _extrs_max, "increase size");        \
1885   }
1886 
1887 // insert into to the address hash table the index of an external
1888 // address or a stub address in the list of external or stub
1889 // addresses, respectively, keyed by the relevant address
1890 
1891 void AOTCodeAddressTable::hash_address(address addr, int idx) {
1892   // only do this if we have a non-null address to record and the
1893   // cache is open for dumping
1894   if (addr == nullptr) {
1895     return;
1896   }
1897   // check opened_cache because this can be called before the cache is
1898   // properly initialized and only continue when dumping is enabled
1899   if (opened_cache != nullptr && opened_cache->for_dump()) {
1900     if (_hash_table == nullptr) {
1901       _hash_table = new (mtCode) AOTCodeAddressHashTable();
1902     }
1903     assert(_hash_table->get(addr) == nullptr, "repeated insert of address " INTPTR_FORMAT, p2i(addr));
1904     _hash_table->put(addr, idx);
1905     log_trace(aot, codecache)("Address " INTPTR_FORMAT " inserted into AOT Code Cache address hash table with index '%d'",
1906                               p2i(addr), idx);
1907   }
1908 }
1909 
1910 static bool initializing_extrs = false;
1911 
1912 void AOTCodeAddressTable::init_extrs() {
1913   if (_extrs_complete || initializing_extrs) return; // Done already
1914 
1915   initializing_extrs = true;
1916   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1917 
1918   _extrs_length = 0;
1919 
1920   {
1921     // Required by initial stubs
1922     ADD_EXTERNAL_ADDRESS(SharedRuntime::exception_handler_for_return_address); // used by forward_exception
1923     ADD_EXTERNAL_ADDRESS(CompressedOops::base_addr()); // used by call_stub
1924     ADD_EXTERNAL_ADDRESS(Thread::current); // used by call_stub
1925     ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_StackOverflowError);
1926     ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError);
1927   }
1928 
1929   // Record addresses of VM runtime methods
1930   ADD_EXTERNAL_ADDRESS(SharedRuntime::fixup_callers_callsite);
1931   ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method);
1932   ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_abstract);
1933   ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_ic_miss);
1934   ADD_EXTERNAL_ADDRESS(SharedRuntime::allocate_inline_types);
1935 #if defined(AARCH64) && !defined(ZERO)
1936   ADD_EXTERNAL_ADDRESS(JavaThread::aarch64_get_thread_helper);
1937   ADD_EXTERNAL_ADDRESS(BarrierSetAssembler::patching_epoch_addr());
1938 #endif
1939 
1940 #ifndef PRODUCT
1941   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jbyte_array_copy_ctr); // used by arraycopy stub on arm32 and x86_64
1942   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jshort_array_copy_ctr); // used by arraycopy stub
1943   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jint_array_copy_ctr); // used by arraycopy stub
1944   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jlong_array_copy_ctr); // used by arraycopy stub
1945   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_oop_array_copy_ctr); // used by arraycopy stub
1946   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_checkcast_array_copy_ctr); // used by arraycopy stub
1947   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_array_copy_ctr); // used by arraycopy stub
1948   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_generic_array_copy_ctr); // used by arraycopy stub
1949   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_set_memory_ctr); // used by arraycopy stub
1950 #endif /* PRODUCT */
1951 
1952   ADD_EXTERNAL_ADDRESS(SharedRuntime::enable_stack_reserved_zone);
1953 
1954 #if defined(AMD64) && !defined(ZERO)
1955   ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_multiply);
1956   ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_square);
1957 #endif // defined(AMD64) && !defined(ZERO)
1958 
1959   ADD_EXTERNAL_ADDRESS(SharedRuntime::d2f);
1960   ADD_EXTERNAL_ADDRESS(SharedRuntime::d2i);
1961   ADD_EXTERNAL_ADDRESS(SharedRuntime::d2l);
1962   ADD_EXTERNAL_ADDRESS(SharedRuntime::dcos);
1963   ADD_EXTERNAL_ADDRESS(SharedRuntime::dexp);
1964   ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog);
1965   ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog10);
1966   ADD_EXTERNAL_ADDRESS(SharedRuntime::dpow);
1967 #ifndef ZERO
1968   ADD_EXTERNAL_ADDRESS(SharedRuntime::drem);
1969 #endif
1970   ADD_EXTERNAL_ADDRESS(SharedRuntime::dsin);
1971   ADD_EXTERNAL_ADDRESS(SharedRuntime::dtan);
1972   ADD_EXTERNAL_ADDRESS(SharedRuntime::f2i);
1973   ADD_EXTERNAL_ADDRESS(SharedRuntime::f2l);
1974 #ifndef ZERO
1975   ADD_EXTERNAL_ADDRESS(SharedRuntime::frem);
1976 #endif
1977   ADD_EXTERNAL_ADDRESS(SharedRuntime::l2d);
1978   ADD_EXTERNAL_ADDRESS(SharedRuntime::l2f);
1979   ADD_EXTERNAL_ADDRESS(SharedRuntime::ldiv);
1980   ADD_EXTERNAL_ADDRESS(SharedRuntime::lmul);
1981   ADD_EXTERNAL_ADDRESS(SharedRuntime::lrem);
1982 
1983 #if INCLUDE_JVMTI
1984   ADD_EXTERNAL_ADDRESS(&JvmtiExport::_should_notify_object_alloc);
1985 #endif /* INCLUDE_JVMTI */
1986 
1987   ADD_EXTERNAL_ADDRESS(ThreadIdentifier::unsafe_offset());
1988   // already added
1989   // ADD_EXTERNAL_ADDRESS(Thread::current);
1990 
1991   ADD_EXTERNAL_ADDRESS(os::javaTimeMillis);
1992   ADD_EXTERNAL_ADDRESS(os::javaTimeNanos);
1993 #ifndef PRODUCT
1994   ADD_EXTERNAL_ADDRESS(os::breakpoint);
1995 #endif
1996 
1997   ADD_EXTERNAL_ADDRESS(StubRoutines::crc_table_addr());
1998 #ifndef PRODUCT
1999   ADD_EXTERNAL_ADDRESS(&SharedRuntime::_partial_subtype_ctr);
2000 #endif
2001 
2002 #if INCLUDE_JFR
2003   ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::write_checkpoint);
2004   ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::return_lease);
2005 #endif
2006 
2007   ADD_EXTERNAL_ADDRESS(UpcallLinker::handle_uncaught_exception); // used by upcall_stub_exception_handler
2008 
2009   {
2010     // Required by Shared blobs
2011     ADD_EXTERNAL_ADDRESS(Deoptimization::fetch_unroll_info);
2012     ADD_EXTERNAL_ADDRESS(Deoptimization::unpack_frames);
2013     ADD_EXTERNAL_ADDRESS(SafepointSynchronize::handle_polling_page_exception);
2014     ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_opt_virtual_call_C);
2015     ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_virtual_call_C);
2016     ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_static_call_C);
2017     // already added
2018     // ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError);
2019     ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_AbstractMethodError);
2020     ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_IncompatibleClassChangeError);
2021     ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_NullPointerException_at_call);
2022   }
2023 
2024 #ifdef COMPILER1
2025   {
2026     // Required by C1 blobs
2027     ADD_EXTERNAL_ADDRESS(static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
2028     ADD_EXTERNAL_ADDRESS(SharedRuntime::register_finalizer);
2029     ADD_EXTERNAL_ADDRESS(Runtime1::is_instance_of);
2030     ADD_EXTERNAL_ADDRESS(Runtime1::exception_handler_for_pc);
2031     ADD_EXTERNAL_ADDRESS(Runtime1::check_abort_on_vm_exception);
2032     ADD_EXTERNAL_ADDRESS(Runtime1::new_instance);
2033     ADD_EXTERNAL_ADDRESS(Runtime1::counter_overflow);
2034     ADD_EXTERNAL_ADDRESS(Runtime1::new_type_array);
2035     ADD_EXTERNAL_ADDRESS(Runtime1::new_object_array);
2036     ADD_EXTERNAL_ADDRESS(Runtime1::new_multi_array);
2037     ADD_EXTERNAL_ADDRESS(Runtime1::throw_range_check_exception);
2038     ADD_EXTERNAL_ADDRESS(Runtime1::throw_index_exception);
2039     ADD_EXTERNAL_ADDRESS(Runtime1::throw_div0_exception);
2040     ADD_EXTERNAL_ADDRESS(Runtime1::throw_null_pointer_exception);
2041     ADD_EXTERNAL_ADDRESS(Runtime1::throw_array_store_exception);
2042     ADD_EXTERNAL_ADDRESS(Runtime1::throw_class_cast_exception);
2043     ADD_EXTERNAL_ADDRESS(Runtime1::throw_incompatible_class_change_error);
2044     ADD_EXTERNAL_ADDRESS(Runtime1::monitorenter);
2045     ADD_EXTERNAL_ADDRESS(Runtime1::monitorexit);
2046     ADD_EXTERNAL_ADDRESS(Runtime1::deoptimize);
2047     ADD_EXTERNAL_ADDRESS(Runtime1::access_field_patching);
2048     ADD_EXTERNAL_ADDRESS(Runtime1::move_klass_patching);
2049     ADD_EXTERNAL_ADDRESS(Runtime1::move_mirror_patching);
2050     ADD_EXTERNAL_ADDRESS(Runtime1::move_appendix_patching);
2051     ADD_EXTERNAL_ADDRESS(Runtime1::predicate_failed_trap);
2052     ADD_EXTERNAL_ADDRESS(Runtime1::unimplemented_entry);
2053     ADD_EXTERNAL_ADDRESS(Runtime1::new_null_free_array);
2054     ADD_EXTERNAL_ADDRESS(Runtime1::load_flat_array);
2055     ADD_EXTERNAL_ADDRESS(Runtime1::store_flat_array);
2056     ADD_EXTERNAL_ADDRESS(Runtime1::substitutability_check);
2057     ADD_EXTERNAL_ADDRESS(Runtime1::buffer_inline_args);
2058     ADD_EXTERNAL_ADDRESS(Runtime1::buffer_inline_args_no_receiver);
2059     ADD_EXTERNAL_ADDRESS(Runtime1::throw_identity_exception);
2060     ADD_EXTERNAL_ADDRESS(Runtime1::throw_illegal_monitor_state_exception);
2061     // already added
2062     // ADD_EXTERNAL_ADDRESS(Thread::current);
2063     ADD_EXTERNAL_ADDRESS(CompressedKlassPointers::base_addr());
2064   }
2065 #endif
2066 
2067 #ifdef COMPILER2
2068   {
2069     // Required by C2 blobs
2070     ADD_EXTERNAL_ADDRESS(Deoptimization::uncommon_trap);
2071     ADD_EXTERNAL_ADDRESS(OptoRuntime::handle_exception_C);
2072     ADD_EXTERNAL_ADDRESS(OptoRuntime::new_instance_C);
2073     ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_C);
2074     ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_nozero_C);
2075     ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray2_C);
2076     ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray3_C);
2077     ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray4_C);
2078     ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray5_C);
2079     ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarrayN_C);
2080     ADD_EXTERNAL_ADDRESS(OptoRuntime::complete_monitor_locking_C);
2081     ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notify_C);
2082     ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notifyAll_C);
2083     ADD_EXTERNAL_ADDRESS(OptoRuntime::rethrow_C);
2084     ADD_EXTERNAL_ADDRESS(OptoRuntime::slow_arraycopy_C);
2085     ADD_EXTERNAL_ADDRESS(OptoRuntime::register_finalizer_C);
2086     ADD_EXTERNAL_ADDRESS(OptoRuntime::load_unknown_inline_C);
2087     ADD_EXTERNAL_ADDRESS(OptoRuntime::store_unknown_inline_C);
2088     ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_first_transition_C);
2089     ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_final_transition_C);
2090     ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_transition_C);
2091     ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_transition_C);
2092     // already added for
2093 #if defined(AARCH64) && ! defined(PRODUCT)
2094     ADD_EXTERNAL_ADDRESS(JavaThread::verify_cross_modify_fence_failure);
2095 #endif // AARCH64 && !PRODUCT
2096   }
2097 #endif // COMPILER2
2098 
2099 #if INCLUDE_G1GC
2100   ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_field_pre_entry);
2101   ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry); // used by arraycopy stubs
2102   ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_oop_entry); // used by arraycopy stubs
2103   ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_post_entry); // used by arraycopy stubs
2104   ADD_EXTERNAL_ADDRESS(BarrierSetNMethod::nmethod_stub_entry_barrier); // used by method_entry_barrier
2105 
2106 #endif
2107 #if INCLUDE_SHENANDOAHGC
2108   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::write_barrier_pre);
2109   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong);
2110   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong_narrow);
2111   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak);
2112   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak_narrow);
2113   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom);
2114   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom_narrow);
2115   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_oop);
2116   ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_narrow_oop);
2117 #endif
2118 #if INCLUDE_ZGC
2119   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
2120   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good_addr());
2121   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr());
2122   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
2123   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr());
2124   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr());
2125   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr());
2126   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr());
2127   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr());
2128   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr());
2129   ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_array_addr());
2130 
2131   ADD_EXTERNAL_ADDRESS(ZPointerVectorLoadBadMask);
2132   ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreBadMask);
2133   ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreGoodMask);
2134 #if defined(AMD64)
2135   ADD_EXTERNAL_ADDRESS(&ZPointerLoadShift);
2136   ADD_EXTERNAL_ADDRESS(&ZPointerLoadShiftTable);
2137 #endif
2138 #endif
2139 #ifndef ZERO
2140 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
2141   ADD_EXTERNAL_ADDRESS(MacroAssembler::debug64);
2142 #endif // defined(AMD64) || defined(AARCH64) || defined(RISCV64)
2143 #if defined(AMD64)
2144   ADD_EXTERNAL_ADDRESS(warning);
2145 #endif // defined(AMD64)
2146 #endif // ZERO
2147 
2148   // addresses of fields in AOT runtime constants area
2149   address* p = AOTRuntimeConstants::field_addresses_list();
2150   while (*p != nullptr) {
2151     address to_add = (address)*p++;
2152     ADD_EXTERNAL_ADDRESS(to_add);
2153   }
2154 
2155   log_debug(aot, codecache, init)("External addresses opened and recorded");
2156   // allocate storage for stub entries
2157   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
2158   log_debug(aot, codecache, init)("Stub addresses opened");
2159 }
2160 
2161 void AOTCodeAddressTable::init_extrs2() {
2162   assert(initializing_extrs && !_extrs_complete,
2163          "invalid sequence for init_extrs2");
2164 
2165   {
2166   ADD_EXTERNAL_ADDRESS(Continuation::prepare_thaw); // used by cont_thaw
2167   ADD_EXTERNAL_ADDRESS(Continuation::thaw_entry()); // used by cont_thaw
2168   ADD_EXTERNAL_ADDRESS(ContinuationEntry::thaw_call_pc_address()); // used by cont_preempt_stub
2169   }
2170   _extrs_complete = true;
2171   initializing_extrs = false;
2172   log_debug(aot, codecache, init)("External addresses recorded and closed");
2173 }
2174 
2175 void AOTCodeAddressTable::add_external_addresses(GrowableArray<address>& addresses) {
2176   assert(initializing_extrs && !_extrs_complete,
2177          "invalid sequence for add_external_addresses");
2178   for (int i = 0; i < addresses.length(); i++) {
2179     ADD_EXTERNAL_ADDRESS(addresses.at(i));
2180   }
2181   log_debug(aot, codecache, init)("Recorded %d additional external addresses",
2182                                   addresses.length());
2183 }
2184 
2185 void AOTCodeAddressTable::add_stub_entry(EntryId entry_id, address a) {
2186   assert(_extrs_complete || initializing_extrs,
2187          "recording stub entry address before external addresses complete");
2188   assert(!(StubInfo::is_shared(StubInfo::stub(entry_id)) && _shared_stubs_complete), "too late to add shared entry");
2189   assert(!(StubInfo::is_stubgen(StubInfo::stub(entry_id)) && _stubgen_stubs_complete), "too late to add stubgen entry");
2190   assert(!(StubInfo::is_c1(StubInfo::stub(entry_id)) && _c1_stubs_complete), "too late to add c1 entry");
2191   assert(!(StubInfo::is_c2(StubInfo::stub(entry_id)) && _c2_stubs_complete), "too late to add c2 entry");
2192   log_debug(aot, stubs)("Recording address 0x%p for %s entry %s", a, StubInfo::name(StubInfo::stubgroup(entry_id)), StubInfo::name(entry_id));
2193   int idx = static_cast<int>(entry_id);
2194   hash_address(a, _stubs_base + idx);
2195   _stubs_addr[idx] = a;
2196 }
2197 
2198 void AOTCodeAddressTable::set_shared_stubs_complete() {
2199   assert(!_shared_stubs_complete, "repeated close for shared stubs!");
2200   _shared_stubs_complete = true;
2201   log_debug(aot, codecache, init)("Shared stubs closed");
2202 }
2203 
2204 void AOTCodeAddressTable::set_c1_stubs_complete() {
2205   assert(!_c1_stubs_complete, "repeated close for c1 stubs!");
2206   _c1_stubs_complete = true;
2207   log_debug(aot, codecache, init)("C1 stubs closed");
2208 }
2209 
2210 void AOTCodeAddressTable::set_c2_stubs_complete() {
2211   assert(!_c2_stubs_complete, "repeated close for c2 stubs!");
2212   _c2_stubs_complete = true;
2213   log_debug(aot, codecache, init)("C2 stubs closed");
2214 }
2215 
2216 void AOTCodeAddressTable::set_stubgen_stubs_complete() {
2217   assert(!_stubgen_stubs_complete, "repeated close for stubgen stubs!");
2218   _stubgen_stubs_complete = true;
2219   log_debug(aot, codecache, init)("StubGen stubs closed");
2220 }
2221 
2222 #ifdef PRODUCT
2223 #define MAX_STR_COUNT 200
2224 #else
2225 #define MAX_STR_COUNT 2000
2226 #endif
2227 #define _c_str_max  MAX_STR_COUNT
2228 static const int _c_str_base = _all_max;
2229 
2230 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
2231 static const char* _C_strings[MAX_STR_COUNT]    = {nullptr}; // Our duplicates
2232 static int _C_strings_count = 0;
2233 static int _C_strings_s[MAX_STR_COUNT] = {0};
2234 static int _C_strings_id[MAX_STR_COUNT] = {0};
2235 static int _C_strings_used = 0;
2236 
2237 void AOTCodeCache::load_strings() {
2238   uint strings_count  = _load_header->strings_count();
2239   if (strings_count == 0) {
2240     return;
2241   }
2242   if (strings_count > MAX_STR_COUNT) {
2243     fatal("Invalid strings_count loaded from AOT Code Cache: %d > MAX_STR_COUNT [%d]", strings_count, MAX_STR_COUNT);
2244     return;
2245   }
2246   uint strings_offset = _load_header->strings_offset();
2247   uint* string_lengths = (uint*)addr(strings_offset);
2248   strings_offset += (strings_count * sizeof(uint));
2249   uint strings_size = _load_header->entries_offset() - strings_offset;
2250   // We have to keep cached strings longer than _cache buffer
2251   // because they are refernced from compiled code which may
2252   // still be executed on VM exit after _cache is freed.
2253   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
2254   memcpy(p, addr(strings_offset), strings_size);
2255   _C_strings_buf = p;
2256   for (uint i = 0; i < strings_count; i++) {
2257     _C_strings[i] = p;
2258     uint len = string_lengths[i];
2259     _C_strings_s[i] = i;
2260     _C_strings_id[i] = i;
2261     log_trace(aot, codecache, stringtable)("load_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(p), p);
2262     p += len;
2263   }
2264   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
2265   _C_strings_count = strings_count;
2266   _C_strings_used  = strings_count;
2267   log_debug(aot, codecache, init)("  Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
2268 }
2269 
2270 int AOTCodeCache::store_strings() {
2271   if (_C_strings_used > 0) {
2272     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2273     uint offset = _write_position;
2274     uint length = 0;
2275     uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
2276     if (lengths == nullptr) {
2277       return -1;
2278     }
2279     for (int i = 0; i < _C_strings_used; i++) {
2280       const char* str = _C_strings[_C_strings_s[i]];
2281       log_trace(aot, codecache, stringtable)("store_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(str), str);
2282       uint len = (uint)strlen(str) + 1;
2283       length += len;
2284       assert(len < 1000, "big string: %s", str);
2285       lengths[i] = len;
2286       uint n = write_bytes(str, len);
2287       if (n != len) {
2288         return -1;
2289       }
2290     }
2291     log_debug(aot, codecache, exit)("  Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
2292                                    _C_strings_used, length, offset);
2293   }
2294   return _C_strings_used;
2295 }
2296 
2297 const char* AOTCodeCache::add_C_string(const char* str) {
2298   if (is_on_for_dump() && str != nullptr) {
2299     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2300     AOTCodeAddressTable* table = addr_table();
2301     if (table != nullptr) {
2302       return table->add_C_string(str);
2303     }
2304   }
2305   return str;
2306 }
2307 
2308 const char* AOTCodeAddressTable::add_C_string(const char* str) {
2309   if (_extrs_complete || initializing_extrs) {
2310     // Check previous strings address
2311     for (int i = 0; i < _C_strings_count; i++) {
2312       if (_C_strings_in[i] == str) {
2313         return _C_strings[i]; // Found previous one - return our duplicate
2314       } else if (strcmp(_C_strings[i], str) == 0) {
2315         return _C_strings[i];
2316       }
2317     }
2318     // Add new one
2319     if (_C_strings_count < MAX_STR_COUNT) {
2320       // Passed in string can be freed and used space become inaccessible.
2321       // Keep original address but duplicate string for future compare.
2322       _C_strings_id[_C_strings_count] = -1; // Init
2323       _C_strings_in[_C_strings_count] = str;
2324       const char* dup = os::strdup(str);
2325       _C_strings[_C_strings_count++] = dup;
2326       log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
2327       return dup;
2328     } else {
2329       assert(false, "Number of C strings >= MAX_STR_COUNT");
2330     }
2331   }
2332   return str;
2333 }
2334 
2335 int AOTCodeAddressTable::id_for_C_string(address str) {
2336   if (str == nullptr) {
2337     return BAD_ADDRESS_ID;
2338   }
2339   MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2340   for (int i = 0; i < _C_strings_count; i++) {
2341     if (_C_strings[i] == (const char*)str) { // found
2342       int id = _C_strings_id[i];
2343       if (id >= 0) {
2344         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
2345         return id; // Found recorded
2346       }
2347       log_trace(aot, codecache, stringtable)("id_for_C_string: _C_strings[%d ==> %d] " INTPTR_FORMAT " '%s'", i, _C_strings_used, p2i(str), str);
2348       // Not found in recorded, add new
2349       id = _C_strings_used++;
2350       _C_strings_s[id] = i;
2351       _C_strings_id[i] = id;
2352       return id;
2353     }
2354   }
2355   return BAD_ADDRESS_ID;
2356 }
2357 
2358 address AOTCodeAddressTable::address_for_C_string(int idx) {
2359   assert(idx < _C_strings_count, "sanity");
2360   return (address)_C_strings[idx];
2361 }
2362 
2363 static int search_address(address addr, address* table, uint length) {
2364   for (int i = 0; i < (int)length; i++) {
2365     if (table[i] == addr) {
2366       return i;
2367     }
2368   }
2369   return BAD_ADDRESS_ID;
2370 }
2371 
2372 address AOTCodeAddressTable::address_for_id(int idx) {
2373   assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete");
2374   if (idx == -1) {
2375     return (address)-1;
2376   }
2377   uint id = (uint)idx;
2378   // special case for symbols based relative to os::init
2379   if (id > (_c_str_base + _c_str_max)) {
2380     return (address)os::init + idx;
2381   }
2382   if (idx < 0) {
2383     fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2384     return nullptr;
2385   }
2386   // no need to compare unsigned id against 0
2387   if (/* id >= _extrs_base && */ id < _extrs_length) {
2388     return _extrs_addr[id - _extrs_base];
2389   }
2390   if (id >= _stubs_base && id < _c_str_base) {
2391     return _stubs_addr[id - _stubs_base];
2392   }
2393   if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
2394     return address_for_C_string(id - _c_str_base);
2395   }
2396   fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2397   return nullptr;
2398 }
2399 
2400 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
2401   assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete");
2402   int id = -1;
2403   if (addr == (address)-1) { // Static call stub has jump to itself
2404     return id;
2405   }
2406   // Check card_table_base address first since it can point to any address
2407   BarrierSet* bs = BarrierSet::barrier_set();
2408   bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
2409   guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
2410   // fast path for stubs and external addresses
2411   if (_hash_table != nullptr) {
2412     int *result = _hash_table->get(addr);
2413     if (result != nullptr) {
2414       id = *result;
2415       log_trace(aot, codecache)("Address " INTPTR_FORMAT " retrieved from AOT Code Cache address hash table with index '%d'",
2416                                 p2i(addr), id);
2417       return id;
2418     }
2419   }
2420   // Seach for C string
2421   id = id_for_C_string(addr);
2422   if (id != BAD_ADDRESS_ID) {
2423     return id + _c_str_base;
2424   }
2425   if (StubRoutines::contains(addr) || CodeCache::find_blob(addr) != nullptr) {
2426     // Search for a matching stub entry
2427     id = search_address(addr, _stubs_addr, _stubs_max);
2428     if (id == BAD_ADDRESS_ID) {
2429       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
2430       if (desc == nullptr) {
2431         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
2432       }
2433       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
2434       assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
2435     } else {
2436       return id + _stubs_base;
2437     }
2438   } else {
2439     // Search in runtime functions
2440     id = search_address(addr, _extrs_addr, _extrs_length);
2441     if (id == BAD_ADDRESS_ID) {
2442       ResourceMark rm;
2443       const int buflen = 1024;
2444       char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
2445       int offset = 0;
2446       if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
2447         if (offset > 0) {
2448           // Could be address of C string
2449           uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
2450           log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
2451                                     p2i(addr), dist, (const char*)addr);
2452           assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
2453           return dist;
2454         }
2455 #ifdef ASSERT
2456         reloc.print_current_on(tty);
2457         code_blob->print_on(tty);
2458         code_blob->print_code_on(tty);
2459         assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
2460 #endif
2461       } else {
2462 #ifdef ASSERT
2463         reloc.print_current_on(tty);
2464         code_blob->print_on(tty);
2465         code_blob->print_code_on(tty);
2466         os::find(addr, tty);
2467         assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
2468 #endif
2469       }
2470     } else {
2471       return _extrs_base + id;
2472     }
2473   }
2474   return id;
2475 }
2476 
2477 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
2478 
2479 void AOTRuntimeConstants::initialize_from_runtime() {
2480   BarrierSet* bs = BarrierSet::barrier_set();
2481   address card_table_base = nullptr;
2482   uint grain_shift = 0;
2483 #if INCLUDE_G1GC
2484   if (bs->is_a(BarrierSet::G1BarrierSet)) {
2485     grain_shift = G1HeapRegion::LogOfHRGrainBytes;
2486   } else
2487 #endif
2488 #if INCLUDE_SHENANDOAHGC
2489   if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
2490     grain_shift = 0;
2491   } else
2492 #endif
2493   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
2494     CardTable::CardValue* base = ci_card_table_address_const();
2495     assert(base != nullptr, "unexpected byte_map_base");
2496     card_table_base = base;
2497     CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
2498     grain_shift = ctbs->grain_shift();
2499   }
2500   _aot_runtime_constants._card_table_base = card_table_base;
2501   _aot_runtime_constants._grain_shift = grain_shift;
2502 }
2503 
2504 address AOTRuntimeConstants::_field_addresses_list[] = {
2505   ((address)&_aot_runtime_constants._card_table_base),
2506   ((address)&_aot_runtime_constants._grain_shift),
2507   nullptr
2508 };
2509 
2510 address AOTRuntimeConstants::card_table_base_address() {
2511   assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
2512   return (address)&_aot_runtime_constants._card_table_base;
2513 }
2514 
2515 // This is called after initialize() but before init2()
2516 // and _cache is not set yet.
2517 void AOTCodeCache::print_on(outputStream* st) {
2518   if (opened_cache != nullptr && opened_cache->for_use()) {
2519     st->print_cr("\nAOT Code Cache");
2520     uint count = opened_cache->_load_header->entries_count();
2521     uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
2522     AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
2523 
2524     for (uint i = 0; i < count; i++) {
2525       // Use search_entries[] to order ouput
2526       int index = search_entries[2*i + 1];
2527       AOTCodeEntry* entry = &(load_entries[index]);
2528 
2529       uint entry_position = entry->offset();
2530       uint name_offset = entry->name_offset() + entry_position;
2531       const char* saved_name = opened_cache->addr(name_offset);
2532 
2533       st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
2534                    i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
2535     }
2536   }
2537 }
2538 
2539 // methods for managing entries in multi-stub blobs
2540 
2541 
2542 AOTStubData::AOTStubData(BlobId blob_id) :
2543   _blob_id(blob_id),
2544   _cached_blob(nullptr),
2545   _stub_cnt(0),
2546   _ranges(nullptr),
2547   _flags(0) {
2548   assert(StubInfo::is_stubgen(blob_id),
2549          "AOTStubData expects a multi-stub blob not %s",
2550          StubInfo::name(blob_id));
2551 
2552   // we cannot save or restore preuniversestubs because the cache
2553   // cannot be accessed before initialising the universe
2554   if (blob_id == BlobId::stubgen_preuniverse_id) {
2555     // invalidate any attempt to use this
2556     _flags = INVALID;
2557     return;
2558   }
2559   if (AOTCodeCache::is_on()) {
2560     _flags = OPEN;
2561     // allow update of stub entry addresses
2562     if (AOTCodeCache::is_using_stub()) {
2563       // allow stub loading
2564       _flags |= USING;
2565     }
2566     if (AOTCodeCache::is_dumping_stub()) {
2567       // allow stub saving
2568       _flags |= DUMPING;
2569     }
2570     // we need to track all the blob's entries
2571     _stub_cnt = StubInfo::stub_count(_blob_id);
2572     _ranges = NEW_C_HEAP_ARRAY(StubAddrRange, _stub_cnt, mtCode);
2573     for (int i = 0; i < _stub_cnt; i++) {
2574       _ranges[i].default_init();
2575     }
2576   }
2577 }
2578 
2579 bool AOTStubData::load_code_blob() {
2580   assert(is_using(), "should not call");
2581   assert(!is_invalid() && _cached_blob == nullptr, "repeated init");
2582   _cached_blob = AOTCodeCache::load_code_blob(AOTCodeEntry::StubGenBlob,
2583                                               _blob_id,
2584                                               this);
2585   if (_cached_blob == nullptr) {
2586     set_invalid();
2587     return false;
2588   } else {
2589     return true;
2590   }
2591 }
2592 
2593 bool AOTStubData::store_code_blob(CodeBlob& new_blob, CodeBuffer *code_buffer) {
2594   assert(is_dumping(), "should not call");
2595   assert(_cached_blob == nullptr, "should not be loading and storing!");
2596   if (!AOTCodeCache::store_code_blob(new_blob,
2597                                      AOTCodeEntry::StubGenBlob,
2598                                      _blob_id, this, code_buffer)) {
2599     set_invalid();
2600     return false;
2601   } else {
2602     return true;
2603   }
2604 }
2605 
2606 address AOTStubData::load_archive_data(StubId stub_id, address& end, GrowableArray<address>* entries, GrowableArray<address>* extras) {
2607   assert(StubInfo::blob(stub_id) == _blob_id, "sanity check");
2608   if (is_invalid()) {
2609     return nullptr;
2610   }
2611   int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2612   assert(idx >= 0 && idx < _stub_cnt, "invalid index %d for stub count %d", idx, _stub_cnt);
2613   // ensure we have a valid associated range
2614   StubAddrRange &range = _ranges[idx];
2615   int base = range.start_index();
2616   if (base < 0) {
2617 #ifdef DEBUG
2618     // reset index so we can idenitfy which ones we failed to find
2619     range.init_entry(-2, 0);
2620 #endif
2621     return nullptr;
2622   }
2623   int count = range.count();
2624   assert(base >= 0, "sanity");
2625   assert(count >= 2, "sanity");
2626   // first two saved addresses are start and end
2627   address start = _address_array.at(base);
2628   end = _address_array.at(base + 1);
2629   assert(start != nullptr, "failed to load start address of stub %s", StubInfo::name(stub_id));
2630   assert(end != nullptr, "failed to load end address of stub %s", StubInfo::name(stub_id));
2631   assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id));
2632 
2633   int entry_count = StubInfo::entry_count(stub_id);
2634   // the address count must at least include the stub start, end
2635   // and secondary addresses
2636   assert(count >= entry_count + 1, "stub %s requires %d saved addresses but only has %d", StubInfo::name(stub_id), entry_count + 1, count);
2637 
2638   // caller must retrieve secondary entries if and only if they exist
2639   assert((entry_count == 1) == (entries == nullptr), "trying to retrieve wrong number of entries for stub %s", StubInfo::name(stub_id));
2640   int index = 2;
2641   if (entries != nullptr) {
2642     assert(entries->length() == 0, "non-empty array when retrieving entries for stub %s!", StubInfo::name(stub_id));
2643     while (index < entry_count + 1) {
2644       address entry = _address_array.at(base + index++);
2645       assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id));
2646       entries->append(entry);
2647     }
2648   }
2649   // caller must retrieve extras if and only if they exist
2650   assert((index < count) == (extras != nullptr), "trying to retrieve wrong number of extras for stub %s", StubInfo::name(stub_id));
2651   if (extras != nullptr) {
2652     assert(extras->length() == 0, "non-empty array when retrieving extras for stub %s!", StubInfo::name(stub_id));
2653     while (index < count) {
2654       address extra = _address_array.at(base + index++);
2655       assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id));
2656       extras->append(extra);
2657     }
2658   }
2659 
2660   return start;
2661 }
2662 
2663 void AOTStubData::store_archive_data(StubId stub_id, address start, address end, GrowableArray<address>* entries, GrowableArray<address>* extras) {
2664   assert(StubInfo::blob(stub_id) == _blob_id, "sanity check");
2665   assert(start != nullptr, "start address cannot be null");
2666   assert(end != nullptr, "end address cannot be null");
2667   assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id));
2668   int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2669   StubAddrRange& range = _ranges[idx];
2670   assert(range.start_index() == -1, "sanity");
2671   int base = _address_array.length();
2672   assert(base >= 0, "sanity");
2673   // first two saved addresses are start and end
2674   _address_array.append(start);
2675   _address_array.append(end);
2676   // caller must save secondary entries if and only if they exist
2677   assert((StubInfo::entry_count(stub_id) == 1) == (entries == nullptr), "trying to save wrong number of entries for stub %s", StubInfo::name(stub_id));
2678   if (entries != nullptr) {
2679     assert(entries->length() == StubInfo::entry_count(stub_id) - 1, "incorrect entry count %d when saving entries for stub %s!", entries->length(), StubInfo::name(stub_id));
2680     for (int i = 0; i < entries->length(); i++) {
2681       address entry = entries->at(i);
2682       assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id));
2683       _address_array.append(entry);
2684     }
2685   }
2686   // caller may wish to save extra addresses
2687   if (extras != nullptr) {
2688     for (int i = 0; i < extras->length(); i++) {
2689       address extra = extras->at(i);
2690       // handler range end may be end -- it gets restored as nullptr
2691       assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id));
2692       _address_array.append(extra);
2693     }
2694   }
2695   range.init_entry(base, _address_array.length() - base);
2696 }