1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/aotCacheAccess.hpp"
  28 #include "cds/aotMetaspace.hpp"
  29 #include "cds/cds_globals.hpp"
  30 #include "cds/cdsConfig.hpp"
  31 #include "cds/heapShared.hpp"
  32 #include "ci/ciConstant.hpp"
  33 #include "ci/ciEnv.hpp"
  34 #include "ci/ciField.hpp"
  35 #include "ci/ciMethod.hpp"
  36 #include "ci/ciMethodData.hpp"
  37 #include "ci/ciObject.hpp"
  38 #include "ci/ciUtilities.inline.hpp"
  39 #include "classfile/javaAssertions.hpp"
  40 #include "classfile/stringTable.hpp"
  41 #include "classfile/symbolTable.hpp"
  42 #include "classfile/systemDictionary.hpp"
  43 #include "classfile/vmClasses.hpp"
  44 #include "classfile/vmIntrinsics.hpp"
  45 #include "code/aotCodeCache.hpp"
  46 #include "code/codeBlob.hpp"
  47 #include "code/codeCache.hpp"
  48 #include "code/oopRecorder.inline.hpp"
  49 #include "compiler/abstractCompiler.hpp"
  50 #include "compiler/compilationPolicy.hpp"
  51 #include "compiler/compileBroker.hpp"
  52 #include "compiler/compileTask.hpp"
  53 #include "gc/g1/g1BarrierSetRuntime.hpp"
  54 #include "gc/shared/gcConfig.hpp"
  55 #include "logging/logStream.hpp"
  56 #include "memory/memoryReserver.hpp"
  57 #include "memory/universe.hpp"
  58 #include "oops/klass.inline.hpp"
  59 #include "oops/method.inline.hpp"
  60 #include "oops/trainingData.hpp"
  61 #include "prims/jvmtiThreadState.hpp"
  62 #include "runtime/atomic.hpp"
  63 #include "runtime/deoptimization.hpp"
  64 #include "runtime/flags/flagSetting.hpp"
  65 #include "runtime/globals_extension.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/java.hpp"
  68 #include "runtime/jniHandles.inline.hpp"
  69 #include "runtime/mutexLocker.hpp"
  70 #include "runtime/os.inline.hpp"
  71 #include "runtime/sharedRuntime.hpp"
  72 #include "runtime/stubCodeGenerator.hpp"
  73 #include "runtime/stubRoutines.hpp"
  74 #include "runtime/threadIdentifier.hpp"
  75 #include "runtime/timerTrace.hpp"
  76 #include "utilities/copy.hpp"
  77 #include "utilities/formatBuffer.hpp"
  78 #include "utilities/ostream.hpp"
  79 #include "utilities/spinYield.hpp"
  80 #ifdef COMPILER1
  81 #include "c1/c1_LIRAssembler.hpp"
  82 #include "c1/c1_Runtime1.hpp"
  83 #include "gc/g1/c1/g1BarrierSetC1.hpp"
  84 #include "gc/shared/c1/barrierSetC1.hpp"
  85 #if INCLUDE_SHENANDOAHGC
  86 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  87 #endif // INCLUDE_SHENANDOAHGC
  88 #include "gc/z/c1/zBarrierSetC1.hpp"
  89 #endif // COMPILER1
  90 #ifdef COMPILER2
  91 #include "opto/runtime.hpp"
  92 #endif
  93 #if INCLUDE_JVMCI
  94 #include "jvmci/jvmci.hpp"
  95 #endif
  96 #if INCLUDE_G1GC
  97 #include "gc/g1/g1BarrierSetRuntime.hpp"
  98 #endif
  99 #if INCLUDE_SHENANDOAHGC
 100 #include "gc/shenandoah/shenandoahRuntime.hpp"
 101 #endif
 102 #if INCLUDE_ZGC
 103 #include "gc/z/zBarrierSetRuntime.hpp"
 104 #endif
 105 #if defined(X86) && !defined(ZERO)
 106 #include "rdtsc_x86.hpp"
 107 #endif
 108 
 109 #include <errno.h>
 110 #include <sys/stat.h>
 111 
 112 const char* aot_code_entry_kind_name[] = {
 113 #define DECL_KIND_STRING(kind) XSTR(kind),
 114   DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
 115 #undef DECL_KIND_STRING
 116 };
 117 
 118 static elapsedTimer _t_totalLoad;
 119 static elapsedTimer _t_totalPreload;
 120 static elapsedTimer _t_totalRegister;
 121 static elapsedTimer _t_totalFind;
 122 static elapsedTimer _t_totalStore;
 123 
 124 static bool enable_timers() {
 125   return CITime || log_is_enabled(Info, init);
 126 }
 127 
 128 static void report_load_failure() {
 129   if (AbortVMOnAOTCodeFailure) {
 130     vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
 131   }
 132   log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
 133   AOTCodeCache::disable_caching();
 134 }
 135 
 136 static void report_store_failure() {
 137   if (AbortVMOnAOTCodeFailure) {
 138     tty->print_cr("Unable to create AOT Code Cache.");
 139     vm_abort(false);
 140   }
 141   log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
 142   AOTCodeCache::disable_caching();
 143 }
 144 
 145 // The sequence of AOT code caching flags and parametters settings.
 146 //
 147 // 1. The initial AOT code caching flags setting is done
 148 // during call to CDSConfig::check_vm_args_consistency().
 149 //
 150 // 2. The earliest AOT code state check done in compilationPolicy_init()
 151 // where we set number of compiler threads for AOT assembly phase.
 152 //
 153 // 3. We determine presence of AOT code in AOT Cache in
 154 // AOTMetaspace::open_static_archive() which is calles
 155 // after compilationPolicy_init() but before codeCache_init().
 156 //
 157 // 4. AOTCodeCache::initialize() is called during universe_init()
 158 // and does final AOT state and flags settings.
 159 //
 160 // 5. Finally AOTCodeCache::init2() is called after universe_init()
 161 // when all GC settings are finalized.
 162 
 163 // Next methods determine which action we do with AOT code depending
 164 // on phase of AOT process: assembly or production.
 165 
 166 bool AOTCodeCache::is_dumping_adapter() {
 167   return AOTAdapterCaching && is_on_for_dump();
 168 }
 169 
 170 bool AOTCodeCache::is_using_adapter()   {
 171   return AOTAdapterCaching && is_on_for_use();
 172 }
 173 
 174 bool AOTCodeCache::is_dumping_stub() {
 175   return AOTStubCaching && is_on_for_dump();
 176 }
 177 
 178 bool AOTCodeCache::is_using_stub()   {
 179   return AOTStubCaching && is_on_for_use();
 180 }
 181 
 182 bool AOTCodeCache::is_dumping_code() {
 183   return AOTCodeCaching && is_on_for_dump();
 184 }
 185 
 186 bool AOTCodeCache::is_using_code() {
 187   return AOTCodeCaching && is_on_for_use();
 188 }
 189 
 190 // This is used before AOTCodeCahe is initialized
 191 // but after AOT (CDS) Cache flags consistency is checked.
 192 bool AOTCodeCache::maybe_dumping_code() {
 193   return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
 194 }
 195 
 196 // Next methods could be called regardless of AOT code cache status.
 197 // Initially they are called during AOT flags parsing and finilized
 198 // in AOTCodeCache::initialize().
 199 void AOTCodeCache::enable_caching() {
 200   FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
 201   FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
 202   FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
 203 }
 204 
 205 void AOTCodeCache::disable_caching() {
 206   FLAG_SET_ERGO(AOTCodeCaching, false);
 207   FLAG_SET_ERGO(AOTStubCaching, false);
 208   FLAG_SET_ERGO(AOTAdapterCaching, false);
 209 }
 210 
 211 bool AOTCodeCache::is_caching_enabled() {
 212   return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
 213 }
 214 
 215 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
 216   assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
 217   // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
 218   // becasue both id and kind are used to find an entry, and that combination should be unique
 219   if (kind == AOTCodeEntry::Adapter) {
 220     return id;
 221   } else if (kind == AOTCodeEntry::SharedBlob) {
 222     assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
 223     return id;
 224   } else if (kind == AOTCodeEntry::C1Blob) {
 225     assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
 226     return id;
 227   } else {
 228     // kind must be AOTCodeEntry::C2Blob
 229     assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
 230     return id;
 231   }
 232 }
 233 
 234 static uint _max_aot_code_size = 0;
 235 uint AOTCodeCache::max_aot_code_size() {
 236   return _max_aot_code_size;
 237 }
 238 
 239 bool AOTCodeCache::is_code_load_thread_on() {
 240   return UseAOTCodeLoadThread && AOTCodeCaching;
 241 }
 242 
 243 bool AOTCodeCache::allow_const_field(ciConstant& value) {
 244   ciEnv* env = CURRENT_ENV;
 245   precond(env != nullptr);
 246   assert(!env->is_precompile() || is_dumping_code(), "AOT compilation should be enabled");
 247   return !env->is_precompile() // Restrict only when we generate AOT code
 248         // Can not trust primitive too   || !is_reference_type(value.basic_type())
 249         // May disable this too for now  || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
 250         ;
 251 }
 252 
 253 // It is called from AOTMetaspace::initialize_shared_spaces()
 254 // which is called from universe_init().
 255 // At this point all AOT class linking seetings are finilized
 256 // and AOT cache is open so we can map AOT code region.
 257 void AOTCodeCache::initialize() {
 258   if (!is_caching_enabled()) {
 259     log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
 260     return;
 261   }
 262 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
 263   log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
 264   disable_caching();
 265   return;
 266 #else
 267   assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
 268 
 269   // Disable stubs caching until JDK-8357398 is fixed.
 270   FLAG_SET_ERGO(AOTStubCaching, false);
 271 
 272   if (VerifyOops) {
 273     // Disable AOT stubs caching when VerifyOops flag is on.
 274     // Verify oops code generated a lot of C strings which overflow
 275     // AOT C string table (which has fixed size).
 276     // AOT C string table will be reworked later to handle such cases.
 277     //
 278     // Note: AOT adapters are not affected - they don't have oop operations.
 279     log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
 280     FLAG_SET_ERGO(AOTStubCaching, false);
 281   }
 282 
 283   bool is_dumping = false;
 284   bool is_using   = false;
 285   if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
 286     is_dumping = is_caching_enabled();
 287   } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
 288     is_using = is_caching_enabled();
 289   }
 290   if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
 291     log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
 292     FLAG_SET_ERGO(ClassInitBarrierMode, 0);
 293   }
 294   if (!(is_dumping || is_using)) {
 295     log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
 296     disable_caching();
 297     return; // AOT code caching disabled on command line
 298   }
 299   // Reserve AOT Cache region when we dumping AOT code.
 300   _max_aot_code_size = AOTCodeMaxSize;
 301   if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
 302     if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
 303       _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
 304       log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
 305     }
 306   }
 307   size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
 308   if (is_using && aot_code_size == 0) {
 309     log_info(aot, codecache, init)("AOT Code Cache is empty");
 310     disable_caching();
 311     return;
 312   }
 313   if (!open_cache(is_dumping, is_using)) {
 314     if (is_using) {
 315       report_load_failure();
 316     } else {
 317       report_store_failure();
 318     }
 319     return;
 320   }
 321   if (is_dumping) {
 322     FLAG_SET_DEFAULT(FoldStableValues, false);
 323     FLAG_SET_DEFAULT(ForceUnreachable, true);
 324   }
 325   FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 326 #endif // defined(AMD64) || defined(AARCH64)
 327 }
 328 
 329 static AOTCodeCache*  opened_cache = nullptr; // Use this until we verify the cache
 330 AOTCodeCache* AOTCodeCache::_cache = nullptr;
 331 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
 332 
 333 // It is called after universe_init() when all GC settings are finalized.
 334 void AOTCodeCache::init2() {
 335   DEBUG_ONLY( _passed_init2 = true; )
 336   if (opened_cache == nullptr) {
 337     return;
 338   }
 339   // After Universe initialized
 340   BarrierSet* bs = BarrierSet::barrier_set();
 341   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
 342     address byte_map_base = ci_card_table_address_as<address>();
 343     if (is_on_for_dump() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
 344       // Bail out since we can't encode card table base address with relocation
 345       log_warning(aot, codecache, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
 346       close();
 347       report_load_failure();
 348       return;
 349     }
 350   }
 351   if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
 352     delete opened_cache;
 353     opened_cache = nullptr;
 354     report_load_failure();
 355     return;
 356   }
 357 
 358   // initialize aot runtime constants as appropriate to this runtime
 359   AOTRuntimeConstants::initialize_from_runtime();
 360 
 361   // initialize the table of external routines and initial stubs so we can save
 362   // generated code blobs that reference them
 363   AOTCodeAddressTable* table = opened_cache->_table;
 364   assert(table != nullptr, "should be initialized already");
 365   table->init_extrs();
 366 
 367   // Now cache and address table are ready for AOT code generation
 368   _cache = opened_cache;
 369 
 370   // Set ClassInitBarrierMode after all checks since it affects code generation
 371   if (is_dumping_code()) {
 372     FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
 373   } else {
 374     FLAG_SET_ERGO(ClassInitBarrierMode, 0);
 375   }
 376 }
 377 
 378 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
 379   opened_cache = new AOTCodeCache(is_dumping, is_using);
 380   if (opened_cache->failed()) {
 381     delete opened_cache;
 382     opened_cache = nullptr;
 383     return false;
 384   }
 385   return true;
 386 }
 387 
 388 static void print_helper(nmethod* nm, outputStream* st) {
 389   AOTCodeCache::iterate([&](AOTCodeEntry* e) {
 390     if (e->method() == nm->method()) {
 391       ResourceMark rm;
 392       stringStream ss;
 393       ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
 394       ss.print("[%s%s%s]",
 395                (e->is_loaded()   ? "L" : ""),
 396                (e->load_fail()   ? "F" : ""),
 397                (e->not_entrant() ? "I" : ""));
 398       ss.print("#%d", e->comp_id());
 399 
 400       st->print(" %s", ss.freeze());
 401     }
 402   });
 403 }
 404 
 405 void AOTCodeCache::close() {
 406   if (is_on()) {
 407     delete _cache; // Free memory
 408     _cache = nullptr;
 409     opened_cache = nullptr;
 410   }
 411 }
 412 
 413 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
 414 public:
 415   uint _aot_code_size;
 416   char* _aot_code_data;
 417 
 418   void set_aot_code_data(uint size, char* aot_data) {
 419     _aot_code_size = size;
 420     AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
 421   }
 422 
 423   static CachedCodeDirectory* create();
 424 };
 425 
 426 // Storing AOT code in the AOT code region (ac) of AOT Cache:
 427 //
 428 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
 429 //     E.g., you can build a hashtable to record what methods have been archived.
 430 //
 431 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
 432 //     allocated using AOTCacheAccess::allocate_aot_code_region().
 433 //
 434 // [3] CachedCodeDirectory must be the very first allocation.
 435 //
 436 // [4] Two kinds of pointer can be stored:
 437 //     - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
 438 //     - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
 439 //       (It's OK to point to an interior location within this buffer).
 440 //     Such pointers must be stored using AOTCacheAccess::set_pointer()
 441 //
 442 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
 443 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
 444 // (e.g., to account for the runtime location of the CodeCache).
 445 //
 446 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
 447 static CachedCodeDirectory* _aot_code_directory = nullptr;
 448 
 449 CachedCodeDirectory* CachedCodeDirectory::create() {
 450   assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
 451   CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
 452   dir->dumptime_init_internal();
 453   return dir;
 454 }
 455 
 456 #define DATA_ALIGNMENT HeapWordSize
 457 
 458 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
 459   _load_header(nullptr),
 460   _load_buffer(nullptr),
 461   _store_buffer(nullptr),
 462   _C_store_buffer(nullptr),
 463   _write_position(0),
 464   _load_size(0),
 465   _store_size(0),
 466   _for_use(is_using),
 467   _for_dump(is_dumping),
 468   _closing(false),
 469   _failed(false),
 470   _lookup_failed(false),
 471   _for_preload(false),
 472   _has_clinit_barriers(false),
 473   _table(nullptr),
 474   _load_entries(nullptr),
 475   _search_entries(nullptr),
 476   _store_entries(nullptr),
 477   _C_strings_buf(nullptr),
 478   _store_entries_cnt(0),
 479   _compile_id(0),
 480   _comp_level(0)
 481 {
 482   // Read header at the begining of cache
 483   if (_for_use) {
 484     // Read cache
 485     size_t load_size = AOTCacheAccess::get_aot_code_region_size();
 486     ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
 487     if (!rs.is_reserved()) {
 488       log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
 489       set_failed();
 490       return;
 491     }
 492     if (!AOTCacheAccess::map_aot_code_region(rs)) {
 493       log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
 494       set_failed();
 495       return;
 496     }
 497     _aot_code_directory = (CachedCodeDirectory*)rs.base();
 498     _aot_code_directory->runtime_init_internal();
 499 
 500     _load_size = _aot_code_directory->_aot_code_size;
 501     _load_buffer = _aot_code_directory->_aot_code_data;
 502     assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
 503     log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
 504 
 505     _load_header = (Header*)addr(0);
 506     if (!_load_header->verify(_load_size)) {
 507       set_failed();
 508       return;
 509     }
 510     log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
 511     log_debug(aot, codecache, init)("  %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Adapter], _load_header->adapters_count());
 512     log_debug(aot, codecache, init)("  %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::SharedBlob], _load_header->shared_blobs_count());
 513     log_debug(aot, codecache, init)("  %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C1Blob], _load_header->C1_blobs_count());
 514     log_debug(aot, codecache, init)("  %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C2Blob], _load_header->C2_blobs_count());
 515     log_debug(aot, codecache, init)("  %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Stub], _load_header->stubs_count());
 516     log_debug(aot, codecache, init)("  %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Nmethod], _load_header->nmethods_count());
 517     log_debug(aot, codecache, init)("  AOT code cache size: %u bytes", _load_header->cache_size());
 518 
 519     // Read strings
 520     load_strings();
 521   }
 522   if (_for_dump) {
 523     _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
 524     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 525     // Entries allocated at the end of buffer in reverse (as on stack).
 526     _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
 527     log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
 528   }
 529   _table = new AOTCodeAddressTable();
 530 }
 531 
 532 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
 533   // This could be concurent execution
 534   if (entry != nullptr && is_on()) { // Request could come after cache is closed.
 535     _cache->invalidate_entry(entry);
 536   }
 537 }
 538 
 539 void AOTCodeCache::init_early_stubs_table() {
 540   AOTCodeAddressTable* table = addr_table();
 541   if (table != nullptr) {
 542     table->init_early_stubs();
 543   }
 544 }
 545 
 546 void AOTCodeCache::init_shared_blobs_table() {
 547   AOTCodeAddressTable* table = addr_table();
 548   if (table != nullptr) {
 549     table->init_shared_blobs();
 550   }
 551 }
 552 
 553 void AOTCodeCache::init_stubs_table() {
 554   AOTCodeAddressTable* table = addr_table();
 555   if (table != nullptr) {
 556     table->init_stubs();
 557   }
 558 }
 559 
 560 void AOTCodeCache::init_early_c1_table() {
 561   AOTCodeAddressTable* table = addr_table();
 562   if (table != nullptr) {
 563     table->init_early_c1();
 564   }
 565 }
 566 
 567 void AOTCodeCache::init_c1_table() {
 568   AOTCodeAddressTable* table = addr_table();
 569   if (table != nullptr) {
 570     table->init_c1();
 571   }
 572 }
 573 
 574 void AOTCodeCache::init_c2_table() {
 575   AOTCodeAddressTable* table = addr_table();
 576   if (table != nullptr) {
 577     table->init_c2();
 578   }
 579 }
 580 
 581 AOTCodeCache::~AOTCodeCache() {
 582   if (_closing) {
 583     return; // Already closed
 584   }
 585   // Stop any further access to cache.
 586   // Checked on entry to load_nmethod() and store_nmethod().
 587   _closing = true;
 588   if (_for_use) {
 589     // Wait for all load_nmethod() finish.
 590     wait_for_no_nmethod_readers();
 591   }
 592   // Prevent writing code into cache while we are closing it.
 593   // This lock held by ciEnv::register_method() which calls store_nmethod().
 594   MutexLocker ml(Compile_lock);
 595   if (for_dump()) { // Finalize cache
 596     finish_write();
 597   }
 598   _load_buffer = nullptr;
 599   if (_C_store_buffer != nullptr) {
 600     FREE_C_HEAP_ARRAY(char, _C_store_buffer);
 601     _C_store_buffer = nullptr;
 602     _store_buffer = nullptr;
 603   }
 604   if (_table != nullptr) {
 605     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
 606     delete _table;
 607     _table = nullptr;
 608   }
 609 }
 610 
 611 void AOTCodeCache::Config::record(uint cpu_features_offset) {
 612   _flags = 0;
 613 #ifdef ASSERT
 614   _flags |= debugVM;
 615 #endif
 616   if (UseCompressedOops) {
 617     _flags |= compressedOops;
 618   }
 619   if (UseCompressedClassPointers) {
 620     _flags |= compressedClassPointers;
 621   }
 622   if (UseTLAB) {
 623     _flags |= useTLAB;
 624   }
 625   if (JavaAssertions::systemClassDefault()) {
 626     _flags |= systemClassAssertions;
 627   }
 628   if (JavaAssertions::userClassDefault()) {
 629     _flags |= userClassAssertions;
 630   }
 631   if (EnableContended) {
 632     _flags |= enableContendedPadding;
 633   }
 634   if (RestrictContended) {
 635     _flags |= restrictContendedPadding;
 636   }
 637   if (PreserveFramePointer) {
 638     _flags |= preserveFramePointer;
 639   }
 640   _codeCacheSize         = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
 641   _compressedOopShift    = CompressedOops::shift();
 642   _compressedOopBase     = CompressedOops::base();
 643   _compressedKlassShift  = CompressedKlassPointers::shift();
 644   _compressedKlassBase   = CompressedKlassPointers::base();
 645   _contendedPaddingWidth = ContendedPaddingWidth;
 646   _objectAlignment       = ObjectAlignmentInBytes;
 647   _gc                    = (uint)Universe::heap()->kind();
 648   _cpu_features_offset   = cpu_features_offset;
 649 }
 650 
 651 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
 652   // First checks affect all cached AOT code
 653 #ifdef ASSERT
 654   if ((_flags & debugVM) == 0) {
 655     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
 656     return false;
 657   }
 658 #else
 659   if ((_flags & debugVM) != 0) {
 660     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
 661     return false;
 662   }
 663 #endif
 664 
 665   size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
 666   if (_codeCacheSize != codeCacheSize) {
 667     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
 668     return false;
 669   }
 670 
 671   CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
 672   if (aot_gc != Universe::heap()->kind()) {
 673     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
 674     return false;
 675   }
 676 
 677   if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
 678     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
 679     return false;
 680   }
 681 
 682   if (((_flags & enableContendedPadding) != 0) != EnableContended) {
 683     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (EnableContended ? "false" : "true"), (EnableContended ? "true" : "false"));
 684     return false;
 685   }
 686   if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
 687     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (RestrictContended ? "false" : "true"), (RestrictContended ? "true" : "false"));
 688     return false;
 689   }
 690   if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
 691     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
 692     return false;
 693   }
 694 
 695   if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
 696     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (PreserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
 697     return false;
 698   }
 699 
 700   if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
 701     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (UseCompressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
 702     return false;
 703   }
 704   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 705     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
 706     return false;
 707   }
 708   if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
 709     log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
 710     return false;
 711   }
 712 
 713   if (((_flags & compressedOops) != 0) != UseCompressedOops) {
 714     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (UseCompressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
 715     return false;
 716   }
 717   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 718     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
 719     return false;
 720   }
 721   if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
 722     log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
 723     return false;
 724   }
 725 
 726   LogStreamHandle(Debug, aot, codecache, init) log;
 727   if (log.is_enabled()) {
 728     log.print_cr("Available CPU features: %s", VM_Version::features_string());
 729   }
 730 
 731   uint offset = _cpu_features_offset;
 732   uint cpu_features_size = *(uint *)cache->addr(offset);
 733   assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
 734   offset += sizeof(uint);
 735 
 736   void* cached_cpu_features_buffer = (void *)cache->addr(offset);
 737   if (log.is_enabled()) {
 738     ResourceMark rm; // required for stringStream::as_string()
 739     stringStream ss;
 740     VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
 741     log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
 742   }
 743 
 744   if (AOTCodeCPUFeatureCheck && !VM_Version::supports_features(cached_cpu_features_buffer)) {
 745     if (log.is_enabled()) {
 746       ResourceMark rm; // required for stringStream::as_string()
 747       stringStream ss;
 748       VM_Version::get_missing_features_name(cached_cpu_features_buffer, ss);
 749       log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
 750     }
 751     return false;
 752   }
 753 
 754   // Next affects only AOT nmethod
 755   if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
 756     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (JavaAssertions::systemClassDefault() ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
 757      FLAG_SET_ERGO(AOTCodeCaching, false);
 758   }
 759   if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
 760     log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (JavaAssertions::userClassDefault() ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
 761     FLAG_SET_ERGO(AOTCodeCaching, false);
 762   }
 763 
 764   return true;
 765 }
 766 
 767 bool AOTCodeCache::Header::verify(uint load_size) const {
 768   if (_version != AOT_CODE_VERSION) {
 769     log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
 770     return false;
 771   }
 772   if (load_size < _cache_size) {
 773     log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
 774     return false;
 775   }
 776   return true;
 777 }
 778 
 779 volatile int AOTCodeCache::_nmethod_readers = 0;
 780 
 781 AOTCodeCache* AOTCodeCache::open_for_use() {
 782   if (AOTCodeCache::is_on_for_use()) {
 783     return AOTCodeCache::cache();
 784   }
 785   return nullptr;
 786 }
 787 
 788 AOTCodeCache* AOTCodeCache::open_for_dump() {
 789   if (AOTCodeCache::is_on_for_dump()) {
 790     AOTCodeCache* cache = AOTCodeCache::cache();
 791     cache->clear_lookup_failed(); // Reset bit
 792     return cache;
 793   }
 794   return nullptr;
 795 }
 796 
 797 bool AOTCodeCache::is_address_in_aot_cache(address p) {
 798   AOTCodeCache* cache = open_for_use();
 799   if (cache == nullptr) {
 800     return false;
 801   }
 802   if ((p >= (address)cache->cache_buffer()) &&
 803       (p < (address)(cache->cache_buffer() + cache->load_size()))) {
 804     return true;
 805   }
 806   return false;
 807 }
 808 
 809 static void copy_bytes(const char* from, address to, uint size) {
 810   assert((int)size > 0, "sanity");
 811   memcpy(to, from, size);
 812   log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
 813 }
 814 
 815 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
 816   _cache = cache;
 817   _entry = entry;
 818   _load_buffer = cache->cache_buffer();
 819   _read_position = 0;
 820   if (task != nullptr) {
 821     _compile_id = task->compile_id();
 822     _comp_level = task->comp_level();
 823     _preload    = task->preload();
 824   } else {
 825     _compile_id = 0;
 826     _comp_level = 0;
 827     _preload    = false;
 828   }
 829   _lookup_failed = false;
 830 }
 831 
 832 void AOTCodeReader::set_read_position(uint pos) {
 833   if (pos == _read_position) {
 834     return;
 835   }
 836   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 837   _read_position = pos;
 838 }
 839 
 840 bool AOTCodeCache::set_write_position(uint pos) {
 841   if (pos == _write_position) {
 842     return true;
 843   }
 844   if (_store_size < _write_position) {
 845     _store_size = _write_position; // Adjust during write
 846   }
 847   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 848   _write_position = pos;
 849   return true;
 850 }
 851 
 852 static char align_buffer[256] = { 0 };
 853 
 854 bool AOTCodeCache::align_write() {
 855   // We are not executing code from cache - we copy it by bytes first.
 856   // No need for big alignment (or at all).
 857   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 858   if (padding == DATA_ALIGNMENT) {
 859     return true;
 860   }
 861   uint n = write_bytes((const void*)&align_buffer, padding);
 862   if (n != padding) {
 863     return false;
 864   }
 865   log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
 866   return true;
 867 }
 868 
 869 // Check to see if AOT code cache has required space to store "nbytes" of data
 870 address AOTCodeCache::reserve_bytes(uint nbytes) {
 871   assert(for_dump(), "Code Cache file is not created");
 872   uint new_position = _write_position + nbytes;
 873   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 874     log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
 875                                nbytes, _write_position);
 876     set_failed();
 877     report_store_failure();
 878     return nullptr;
 879   }
 880   address buffer = (address)(_store_buffer + _write_position);
 881   log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
 882   _write_position += nbytes;
 883   if (_store_size < _write_position) {
 884     _store_size = _write_position;
 885   }
 886   return buffer;
 887 }
 888 
 889 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
 890   assert(for_dump(), "Code Cache file is not created");
 891   if (nbytes == 0) {
 892     return 0;
 893   }
 894   uint new_position = _write_position + nbytes;
 895   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 896     log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
 897                                 nbytes, _write_position);
 898     set_failed();
 899     report_store_failure();
 900     return 0;
 901   }
 902   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 903   log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
 904   _write_position += nbytes;
 905   if (_store_size < _write_position) {
 906     _store_size = _write_position;
 907   }
 908   return nbytes;
 909 }
 910 
 911 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
 912   assert(is_using_code(), "AOT code caching should be enabled");
 913   if (!method->in_aot_cache()) {
 914     return nullptr;
 915   }
 916   switch (comp_level) {
 917     case CompLevel_simple:
 918       if ((DisableAOTCode & (1 << 0)) != 0) {
 919         return nullptr;
 920       }
 921       break;
 922     case CompLevel_limited_profile:
 923       if ((DisableAOTCode & (1 << 1)) != 0) {
 924         return nullptr;
 925       }
 926       break;
 927     case CompLevel_full_optimization:
 928       if ((DisableAOTCode & (1 << 2)) != 0) {
 929         return nullptr;
 930       }
 931       break;
 932 
 933     default: return nullptr; // Level 1, 2, and 4 only
 934   }
 935   TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
 936   if (is_on() && _cache->cache_buffer() != nullptr) {
 937     uint id = AOTCacheAccess::convert_method_to_offset(method());
 938     AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Nmethod, id, comp_level);
 939     if (entry == nullptr) {
 940       LogStreamHandle(Info, aot, codecache, nmethod) log;
 941       if (log.is_enabled()) {
 942         ResourceMark rm;
 943         const char* target_name = method->name_and_sig_as_C_string();
 944         log.print("Missing entry for '%s' (comp_level %d, id: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, id);
 945       }
 946 #ifdef ASSERT
 947     } else {
 948       ResourceMark rm;
 949       assert(method() == entry->method(), "AOTCodeCache: saved nmethod's method %p (name: %s id: " UINT32_FORMAT_X_0
 950              ") is different from the method %p (name: %s, id: " UINT32_FORMAT_X_0 " being looked up" ,
 951              entry->method(), entry->method()->name_and_sig_as_C_string(), entry->id(), method(), method()->name_and_sig_as_C_string(), id);
 952 #endif
 953     }
 954 
 955     DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
 956     if (directives->IgnorePrecompiledOption) {
 957       LogStreamHandle(Info, aot, codecache, compilation) log;
 958       if (log.is_enabled()) {
 959         log.print("Ignore AOT code entry on level %d for ", comp_level);
 960         method->print_value_on(&log);
 961       }
 962       return nullptr;
 963     }
 964 
 965     return entry;
 966   }
 967   return nullptr;
 968 }
 969 
 970 Method* AOTCodeEntry::method() {
 971   assert(_kind == Nmethod, "invalid kind %d", _kind);
 972   assert(AOTCodeCache::is_on_for_use(), "must be");
 973   return AOTCacheAccess::convert_offset_to_method(_id);
 974 }
 975 
 976 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
 977   return (void*)(cache->add_entry());
 978 }
 979 
 980 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
 981   if (entry->kind() == kind) {
 982     assert(entry->id() == id, "sanity");
 983     if (kind != AOTCodeEntry::Nmethod || // addapters and stubs have only one version
 984         // Look only for normal AOT code entry, preload code is handled separately
 985         (!entry->not_entrant() && !entry->has_clinit_barriers() && (entry->comp_level() == comp_level))) {
 986       return true; // Found
 987     }
 988   }
 989   return false;
 990 }
 991 
 992 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
 993   assert(_for_use, "sanity");
 994   uint count = _load_header->entries_count();
 995   if (_load_entries == nullptr) {
 996     // Read it
 997     _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
 998     _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
 999     log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1000   }
1001   // Binary search
1002   int l = 0;
1003   int h = count - 1;
1004   while (l <= h) {
1005     int mid = (l + h) >> 1;
1006     int ix = mid * 2;
1007     uint is = _search_entries[ix];
1008     if (is == id) {
1009       int index = _search_entries[ix + 1];
1010       AOTCodeEntry* entry = &(_load_entries[index]);
1011       if (check_entry(kind, id, comp_level, entry)) {
1012         return entry; // Found
1013       }
1014       // Leaner search around
1015       for (int i = mid - 1; i >= l; i--) { // search back
1016         ix = i * 2;
1017         is = _search_entries[ix];
1018         if (is != id) {
1019           break;
1020         }
1021         index = _search_entries[ix + 1];
1022         AOTCodeEntry* entry = &(_load_entries[index]);
1023         if (check_entry(kind, id, comp_level, entry)) {
1024           return entry; // Found
1025         }
1026       }
1027       for (int i = mid + 1; i <= h; i++) { // search forward
1028         ix = i * 2;
1029         is = _search_entries[ix];
1030         if (is != id) {
1031           break;
1032         }
1033         index = _search_entries[ix + 1];
1034         AOTCodeEntry* entry = &(_load_entries[index]);
1035         if (check_entry(kind, id, comp_level, entry)) {
1036           return entry; // Found
1037         }
1038       }
1039       break; // No match found
1040     } else if (is < id) {
1041       l = mid + 1;
1042     } else {
1043       h = mid - 1;
1044     }
1045   }
1046   return nullptr;
1047 }
1048 
1049 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1050   assert(entry!= nullptr, "all entries should be read already");
1051   if (entry->not_entrant()) {
1052     return; // Someone invalidated it already
1053   }
1054 #ifdef ASSERT
1055   assert(_load_entries != nullptr, "sanity");
1056   {
1057     uint name_offset = entry->offset() + entry->name_offset();
1058     const char* name = _load_buffer + name_offset;;
1059     uint level       = entry->comp_level();
1060     uint comp_id     = entry->comp_id();
1061     bool for_preload = entry->for_preload();
1062     bool clinit_brs  = entry->has_clinit_barriers();
1063     log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1064                                       name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1065   }
1066   assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1067   bool found = false;
1068   uint i = 0;
1069   uint count = 0;
1070   if (entry->for_preload()) {
1071     count = _load_header->preload_entries_count();
1072     AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1073     for (; i < count; i++) {
1074       if (entry == &preload_entry[i]) {
1075         break;
1076       }
1077     }
1078   } else {
1079     count = _load_header->entries_count();
1080     for(; i < count; i++) {
1081       if (entry == &(_load_entries[i])) {
1082         break;
1083       }
1084     }
1085   }
1086   found = (i < count);
1087   assert(found, "entry should exist");
1088 #endif
1089   entry->set_not_entrant();
1090   uint name_offset = entry->offset() + entry->name_offset();
1091   const char* name = _load_buffer + name_offset;;
1092   uint level       = entry->comp_level();
1093   uint comp_id     = entry->comp_id();
1094   bool for_preload = entry->for_preload();
1095   bool clinit_brs  = entry->has_clinit_barriers();
1096   log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1097                                     name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1098 
1099   if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1100     // Invalidate preload code if normal AOT C2 code is invalidated,
1101     // most likely because some dependencies changed during run.
1102     // We can still use normal AOT code if preload code is
1103     // invalidated - normal AOT code has less restrictions.
1104     Method* method = entry->method();
1105     AOTCodeEntry* preload_entry = method->aot_code_entry();
1106     if (preload_entry != nullptr) {
1107       assert(preload_entry->for_preload(), "expecting only such entries here");
1108       invalidate_entry(preload_entry);
1109     }
1110   }
1111 }
1112 
1113 static int uint_cmp(const void *i, const void *j) {
1114   uint a = *(uint *)i;
1115   uint b = *(uint *)j;
1116   return a > b ? 1 : a < b ? -1 : 0;
1117 }
1118 
1119 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1120   uint* size_ptr = (uint *)buffer;
1121   *size_ptr = buffer_size;
1122   buffer += sizeof(uint);
1123 
1124   VM_Version::store_cpu_features(buffer);
1125   log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1126   buffer += buffer_size;
1127   buffer = align_up(buffer, DATA_ALIGNMENT);
1128 }
1129 
1130 bool AOTCodeCache::finish_write() {
1131   if (!align_write()) {
1132     return false;
1133   }
1134   uint strings_offset = _write_position;
1135   int strings_count = store_strings();
1136   if (strings_count < 0) {
1137     return false;
1138   }
1139   if (!align_write()) {
1140     return false;
1141   }
1142   uint strings_size = _write_position - strings_offset;
1143 
1144   uint code_count = _store_entries_cnt;
1145   if (code_count > 0) {
1146     _aot_code_directory = CachedCodeDirectory::create();
1147     assert(_aot_code_directory != nullptr, "Sanity check");
1148 
1149     uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1150     uint search_count = code_count * 2;
1151     uint search_size = search_count * sizeof(uint);
1152     uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1153     // _write_position should include code and strings
1154     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1155     uint cpu_features_size = VM_Version::cpu_features_size();
1156     uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1157     uint total_size = _write_position + header_size + code_alignment +
1158                       search_size + entries_size +
1159                       align_up(total_cpu_features_size, DATA_ALIGNMENT);
1160     assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1161 
1162     // Allocate in AOT Cache buffer
1163     char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1164     char* start = align_up(buffer, DATA_ALIGNMENT);
1165     char* current = start + header_size; // Skip header
1166 
1167     uint cpu_features_offset = current - start;
1168     store_cpu_features(current, cpu_features_size);
1169     assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1170     assert(current < start + total_size, "sanity check");
1171 
1172     // Create ordered search table for entries [id, index];
1173     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1174 
1175     AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1176     AOTCodeStats stats;
1177     uint max_size = 0;
1178     // AOTCodeEntry entries were allocated in reverse in store buffer.
1179     // Process them in reverse order to cache first code first.
1180 
1181     // Store AOTCodeEntry-s for preload code
1182     current = align_up(current, DATA_ALIGNMENT);
1183     uint preload_entries_cnt = 0;
1184     uint preload_entries_offset = current - start;
1185     AOTCodeEntry* preload_entries = (AOTCodeEntry*)current;
1186     for (int i = code_count - 1; i >= 0; i--) {
1187       AOTCodeEntry* entry = &entries_address[i];
1188       if (entry->load_fail()) {
1189         continue;
1190       }
1191       if (entry->for_preload()) {
1192         if (entry->not_entrant()) {
1193           // Skip not entrant preload code:
1194           // we can't pre-load code which may have failing dependencies.
1195           log_info(aot, codecache, exit)("Skip not entrant preload code comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1196                                          entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1197         } else {
1198           copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1199           stats.collect_entry_stats(entry);
1200           current += sizeof(AOTCodeEntry);
1201           preload_entries_cnt++;
1202         }
1203       }
1204     }
1205 
1206     // Now write the data for preload AOTCodeEntry
1207     for (int i = 0; i < (int)preload_entries_cnt; i++) {
1208       AOTCodeEntry* entry = &preload_entries[i];
1209       uint size = align_up(entry->size(), DATA_ALIGNMENT);
1210       if (size > max_size) {
1211         max_size = size;
1212       }
1213       copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1214       entry->set_offset(current - start); // New offset
1215       current += size;
1216     }
1217 
1218     current = align_up(current, DATA_ALIGNMENT);
1219     uint entries_count = 0;
1220     uint new_entries_offset = current - start;
1221     AOTCodeEntry* code_entries = (AOTCodeEntry*)current;
1222     // Now scan normal entries
1223     for (int i = code_count - 1; i >= 0; i--) {
1224       AOTCodeEntry* entry = &entries_address[i];
1225       if (entry->load_fail() || entry->for_preload()) {
1226         continue;
1227       }
1228       if (entry->not_entrant()) {
1229         log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1230                                        entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1231         entry->set_entrant(); // Reset
1232       }
1233       copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1234       stats.collect_entry_stats(entry);
1235       current += sizeof(AOTCodeEntry);
1236       search[entries_count*2 + 0] = entry->id();
1237       search[entries_count*2 + 1] = entries_count;
1238       entries_count++;
1239     }
1240 
1241     // Now write the data for normal AOTCodeEntry
1242     for (int i = 0; i < (int)entries_count; i++) {
1243       AOTCodeEntry* entry = &code_entries[i];
1244       uint size = align_up(entry->size(), DATA_ALIGNMENT);
1245       if (size > max_size) {
1246         max_size = size;
1247       }
1248       copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1249       entry->set_offset(current - start); // New offset
1250       current += size;
1251     }
1252 
1253     if (preload_entries_cnt == 0 && entries_count == 0) {
1254       log_info(aot, codecache, exit)("AOT Code Cache was not created: no entries");
1255       FREE_C_HEAP_ARRAY(uint, search);
1256       return true; // Nothing to write
1257     }
1258     uint total_entries_cnt = preload_entries_cnt + entries_count;
1259     assert(total_entries_cnt <= code_count, "%d > %d", total_entries_cnt, code_count);
1260     // Write strings
1261     if (strings_count > 0) {
1262       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1263       strings_offset = (current - start); // New offset
1264       current += strings_size;
1265     }
1266 
1267     uint search_table_offset = current - start;
1268     // Sort and store search table
1269     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1270     search_size = 2 * entries_count * sizeof(uint);
1271     copy_bytes((const char*)search, (address)current, search_size);
1272     FREE_C_HEAP_ARRAY(uint, search);
1273     current += search_size;
1274 
1275     log_stats_on_exit(stats);
1276 
1277     uint size = (current - start);
1278     assert(size <= total_size, "%d > %d", size , total_size);
1279     log_debug(aot, codecache, exit)("  AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1280 
1281     // Finalize header
1282     AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1283     header->init(size, (uint)strings_count, strings_offset,
1284                  entries_count, search_table_offset, new_entries_offset,
1285                  preload_entries_cnt, preload_entries_offset,
1286                  stats.entry_count(AOTCodeEntry::Adapter), stats.entry_count(AOTCodeEntry::SharedBlob),
1287                  stats.entry_count(AOTCodeEntry::C1Blob), stats.entry_count(AOTCodeEntry::C2Blob),
1288                  stats.entry_count(AOTCodeEntry::Stub), cpu_features_offset);
1289 
1290     log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", total_entries_cnt);
1291 
1292     _aot_code_directory->set_aot_code_data(size, start);
1293   }
1294   return true;
1295 }
1296 
1297 //------------------Store/Load AOT code ----------------------
1298 
1299 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1300   AOTCodeCache* cache = open_for_dump();
1301   if (cache == nullptr) {
1302     return false;
1303   }
1304   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1305 
1306   if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1307     return false;
1308   }
1309   if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1310     return false;
1311   }
1312   log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1313 
1314 #ifdef ASSERT
1315   LogStreamHandle(Trace, aot, codecache, stubs) log;
1316   if (log.is_enabled()) {
1317     FlagSetting fs(PrintRelocations, true);
1318     blob.print_on(&log);
1319   }
1320 #endif
1321   // we need to take a lock to prevent race between compiler threads generating AOT code
1322   // and the main thread generating adapter
1323   MutexLocker ml(Compile_lock);
1324   if (!is_on()) {
1325     return false; // AOT code cache was already dumped and closed.
1326   }
1327   if (!cache->align_write()) {
1328     return false;
1329   }
1330   uint entry_position = cache->_write_position;
1331 
1332   // Write name
1333   uint name_offset = cache->_write_position - entry_position;
1334   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1335   uint n = cache->write_bytes(name, name_size);
1336   if (n != name_size) {
1337     return false;
1338   }
1339 
1340   // Write CodeBlob
1341   if (!cache->align_write()) {
1342     return false;
1343   }
1344   uint blob_offset = cache->_write_position - entry_position;
1345   address archive_buffer = cache->reserve_bytes(blob.size());
1346   if (archive_buffer == nullptr) {
1347     return false;
1348   }
1349   CodeBlob::archive_blob(&blob, archive_buffer);
1350 
1351   uint reloc_data_size = blob.relocation_size();
1352   n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1353   if (n != reloc_data_size) {
1354     return false;
1355   }
1356 
1357   bool has_oop_maps = false;
1358   if (blob.oop_maps() != nullptr) {
1359     if (!cache->write_oop_map_set(blob)) {
1360       return false;
1361     }
1362     has_oop_maps = true;
1363   }
1364 
1365 #ifndef PRODUCT
1366   // Write asm remarks
1367   if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1368     return false;
1369   }
1370   if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1371     return false;
1372   }
1373 #endif /* PRODUCT */
1374 
1375   if (!cache->write_relocations(blob)) {
1376     if (!cache->failed()) {
1377       // We may miss an address in AOT table - skip this code blob.
1378       cache->set_write_position(entry_position);
1379     }
1380     return false;
1381   }
1382 
1383   uint entry_size = cache->_write_position - entry_position;
1384   AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1385                                                 entry_position, entry_size, name_offset, name_size,
1386                                                 blob_offset, has_oop_maps, blob.content_begin());
1387   log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1388   return true;
1389 }
1390 
1391 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1392   assert(AOTCodeEntry::is_blob(entry_kind),
1393          "wrong entry kind for blob id %s", StubInfo::name(id));
1394   return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1395 }
1396 
1397 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1398   AOTCodeCache* cache = open_for_use();
1399   if (cache == nullptr) {
1400     return nullptr;
1401   }
1402   assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1403 
1404   if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1405     return nullptr;
1406   }
1407   if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1408     return nullptr;
1409   }
1410   log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1411 
1412   AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1413   if (entry == nullptr) {
1414     return nullptr;
1415   }
1416   AOTCodeReader reader(cache, entry, nullptr);
1417   CodeBlob* blob = reader.compile_code_blob(name);
1418 
1419   log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1420                                    (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1421   return blob;
1422 }
1423 
1424 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1425   assert(AOTCodeEntry::is_blob(entry_kind),
1426          "wrong entry kind for blob id %s", StubInfo::name(id));
1427   return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1428 }
1429 
1430 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1431   uint entry_position = _entry->offset();
1432 
1433   // Read name
1434   uint name_offset = entry_position + _entry->name_offset();
1435   uint name_size = _entry->name_size(); // Includes '/0'
1436   const char* stored_name = addr(name_offset);
1437 
1438   if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1439     log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1440                                        stored_name, name);
1441     set_lookup_failed(); // Skip this blob
1442     return nullptr;
1443   }
1444 
1445   // Read archived code blob
1446   uint offset = entry_position + _entry->code_offset();
1447   CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1448   offset += archived_blob->size();
1449 
1450   address reloc_data = (address)addr(offset);
1451   offset += archived_blob->relocation_size();
1452   set_read_position(offset);
1453 
1454   ImmutableOopMapSet* oop_maps = nullptr;
1455   if (_entry->has_oop_maps()) {
1456     oop_maps = read_oop_map_set();
1457   }
1458 
1459   CodeBlob* code_blob = CodeBlob::create(archived_blob,
1460                                          stored_name,
1461                                          reloc_data,
1462                                          oop_maps
1463                                         );
1464   if (code_blob == nullptr) { // no space left in CodeCache
1465     return nullptr;
1466   }
1467 
1468 #ifndef PRODUCT
1469   code_blob->asm_remarks().init();
1470   read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1471   code_blob->dbg_strings().init();
1472   read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1473 #endif // PRODUCT
1474 
1475   fix_relocations(code_blob);
1476 
1477 #ifdef ASSERT
1478   LogStreamHandle(Trace, aot, codecache, stubs) log;
1479   if (log.is_enabled()) {
1480     FlagSetting fs(PrintRelocations, true);
1481     code_blob->print_on(&log);
1482   }
1483 #endif
1484   return code_blob;
1485 }
1486 
1487 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1488   if (!is_dumping_stub()) {
1489     return false;
1490   }
1491   AOTCodeCache* cache = open_for_dump();
1492   if (cache == nullptr) {
1493     return false;
1494   }
1495   log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1496   if (!cache->align_write()) {
1497     return false;
1498   }
1499 #ifdef ASSERT
1500   CodeSection* cs = cgen->assembler()->code_section();
1501   if (cs->has_locs()) {
1502     uint reloc_count = cs->locs_count();
1503     tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1504     // Collect additional data
1505     RelocIterator iter(cs);
1506     while (iter.next()) {
1507       switch (iter.type()) {
1508         case relocInfo::none:
1509           break;
1510         default: {
1511           iter.print_current_on(tty);
1512           fatal("stub's relocation %d unimplemented", (int)iter.type());
1513           break;
1514         }
1515       }
1516     }
1517   }
1518 #endif
1519   uint entry_position = cache->_write_position;
1520 
1521   // Write code
1522   uint code_offset = 0;
1523   uint code_size = cgen->assembler()->pc() - start;
1524   uint n = cache->write_bytes(start, code_size);
1525   if (n != code_size) {
1526     return false;
1527   }
1528   // Write name
1529   uint name_offset = cache->_write_position - entry_position;
1530   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1531   n = cache->write_bytes(name, name_size);
1532   if (n != name_size) {
1533     return false;
1534   }
1535   uint entry_size = cache->_write_position - entry_position;
1536   AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1537                                                 code_offset, code_size,
1538                                                 AOTCodeEntry::Stub, (uint32_t)id);
1539   log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1540   return true;
1541 }
1542 
1543 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1544   if (!is_using_stub()) {
1545     return false;
1546   }
1547   assert(start == cgen->assembler()->pc(), "wrong buffer");
1548   AOTCodeCache* cache = open_for_use();
1549   if (cache == nullptr) {
1550     return false;
1551   }
1552   AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1553   if (entry == nullptr) {
1554     return false;
1555   }
1556   uint entry_position = entry->offset();
1557   // Read name
1558   uint name_offset = entry->name_offset() + entry_position;
1559   uint name_size   = entry->name_size(); // Includes '/0'
1560   const char* saved_name = cache->addr(name_offset);
1561   if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1562     log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1563     cache->set_failed();
1564     report_load_failure();
1565     return false;
1566   }
1567   log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1568   // Read code
1569   uint code_offset = entry->code_offset() + entry_position;
1570   uint code_size   = entry->code_size();
1571   copy_bytes(cache->addr(code_offset), start, code_size);
1572   cgen->assembler()->code_section()->set_end(start + code_size);
1573   log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1574   return true;
1575 }
1576 
1577 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1578   if (!is_dumping_code()) {
1579     return nullptr;
1580   }
1581   assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1582   AOTCodeCache* cache = open_for_dump();
1583   precond(cache != nullptr);
1584   precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1585   if (!compiler->is_c1() && !compiler->is_c2()) {
1586     // Only c1 and c2 compilers
1587     return nullptr;
1588   }
1589   int comp_level = nm->comp_level();
1590   if (comp_level == CompLevel_full_profile) {
1591     // Do not cache C1 compiles with full profile i.e. tier3
1592     return nullptr;
1593   }
1594   assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1595 
1596   TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1597   AOTCodeEntry* entry = nullptr;
1598   entry = cache->write_nmethod(nm, for_preload);
1599   if (entry == nullptr) {
1600     log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1601   }
1602   return entry;
1603 }
1604 
1605 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1606   AOTCodeCache* cache = open_for_dump();
1607   assert(cache != nullptr, "sanity check");
1608   assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1609   uint comp_id = nm->compile_id();
1610   uint comp_level = nm->comp_level();
1611   Method* method = nm->method();
1612   if (!AOTCacheAccess::can_generate_aot_code(method)) {
1613     ResourceMark rm;
1614     log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1615     assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1616     return nullptr;
1617   }
1618   InstanceKlass* holder = method->method_holder();
1619   bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1620   if (!builtin_loader) {
1621     ResourceMark rm;
1622     log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1623     assert(builtin_loader, "sanity");
1624     return nullptr;
1625   }
1626 
1627   _for_preload = for_preload;
1628   _has_clinit_barriers = nm->has_clinit_barriers();
1629 
1630   if (!align_write()) {
1631     return nullptr;
1632   }
1633 
1634   uint entry_position = _write_position;
1635 
1636   // Write name
1637   uint name_offset = 0;
1638   uint name_size   = 0;
1639   uint id = 0;
1640   uint n;
1641   {
1642     ResourceMark rm;
1643     const char* name = method->name_and_sig_as_C_string();
1644     log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1645                                       comp_id, (int)comp_level, name, comp_level,
1646                                       (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1647 
1648     LogStreamHandle(Info, aot, codecache, loader) log;
1649     if (log.is_enabled()) {
1650       oop loader = holder->class_loader();
1651       oop domain = holder->protection_domain();
1652       log.print("Holder: ");
1653       holder->print_value_on(&log);
1654       log.print(" loader: ");
1655       if (loader == nullptr) {
1656         log.print("nullptr");
1657       } else {
1658         loader->print_value_on(&log);
1659       }
1660       log.print(" domain: ");
1661       if (domain == nullptr) {
1662         log.print("nullptr");
1663       } else {
1664         domain->print_value_on(&log);
1665       }
1666       log.cr();
1667     }
1668     name_offset = _write_position  - entry_position;
1669     name_size   = (uint)strlen(name) + 1; // Includes '/0'
1670     n = write_bytes(name, name_size);
1671     if (n != name_size) {
1672       return nullptr;
1673     }
1674   }
1675   id = AOTCacheAccess::delta_from_base_address((address)nm->method());
1676 
1677   // Write CodeBlob
1678   if (!cache->align_write()) {
1679     return nullptr;
1680   }
1681   uint blob_offset = cache->_write_position - entry_position;
1682   address archive_buffer = cache->reserve_bytes(nm->size());
1683   if (archive_buffer == nullptr) {
1684     return nullptr;
1685   }
1686   CodeBlob::archive_blob(nm, archive_buffer);
1687 
1688   uint reloc_data_size = nm->relocation_size();
1689   n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1690   if (n != reloc_data_size) {
1691     return nullptr;
1692   }
1693 
1694   // Write oops and metadata present in the nmethod's data region
1695   if (!write_oops(nm)) {
1696     if (lookup_failed() && !failed()) {
1697       // Skip this method and reposition file
1698       set_write_position(entry_position);
1699     }
1700     return nullptr;
1701   }
1702   if (!write_metadata(nm)) {
1703     if (lookup_failed() && !failed()) {
1704       // Skip this method and reposition file
1705       set_write_position(entry_position);
1706     }
1707     return nullptr;
1708   }
1709 
1710   bool has_oop_maps = false;
1711   if (nm->oop_maps() != nullptr) {
1712     if (!cache->write_oop_map_set(*nm)) {
1713       return nullptr;
1714     }
1715     has_oop_maps = true;
1716   }
1717 
1718   uint immutable_data_size = nm->immutable_data_size();
1719   n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1720   if (n != immutable_data_size) {
1721     return nullptr;
1722   }
1723 
1724   JavaThread* thread = JavaThread::current();
1725   HandleMark hm(thread);
1726   GrowableArray<Handle> oop_list;
1727   GrowableArray<Metadata*> metadata_list;
1728 
1729   nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1730   if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1731     if (lookup_failed() && !failed()) {
1732       // Skip this method and reposition file
1733       set_write_position(entry_position);
1734     }
1735     return nullptr;
1736   }
1737 
1738   if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1739     return nullptr;
1740   }
1741 
1742 #ifndef PRODUCT
1743   if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1744     return nullptr;
1745   }
1746   if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1747     return nullptr;
1748   }
1749 #endif /* PRODUCT */
1750 
1751   uint entry_size = _write_position - entry_position;
1752   AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Nmethod, id,
1753                                                 entry_position, entry_size,
1754                                                 name_offset, name_size,
1755                                                 blob_offset, has_oop_maps,
1756                                                 nm->content_begin(), comp_level, comp_id,
1757                                                 nm->has_clinit_barriers(), for_preload);
1758 #ifdef ASSERT
1759   if (nm->has_clinit_barriers() || for_preload) {
1760     assert(for_preload, "sanity");
1761   }
1762 #endif
1763   {
1764     ResourceMark rm;
1765     const char* name = nm->method()->name_and_sig_as_C_string();
1766     log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1767                            comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1768   }
1769   if (VerifyAOTCode) {
1770     return nullptr;
1771   }
1772   return entry;
1773 }
1774 
1775 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1776   if (!is_using_code()) {
1777     return false;
1778   }
1779   AOTCodeCache* cache = open_for_use();
1780   if (cache == nullptr) {
1781     return false;
1782   }
1783   assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1784   TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1785   CompileTask* task = env->task();
1786   task->mark_aot_load_start(os::elapsed_counter());
1787   AOTCodeEntry* entry = task->aot_code_entry();
1788   bool preload = task->preload();
1789   assert(entry != nullptr, "sanity");
1790   if (log_is_enabled(Info, aot, codecache, nmethod)) {
1791     VM_ENTRY_MARK;
1792     ResourceMark rm;
1793     methodHandle method(THREAD, target->get_Method());
1794     const char* target_name = method->name_and_sig_as_C_string();
1795     uint id = AOTCacheAccess::convert_method_to_offset(method());
1796     bool clinit_brs = entry->has_clinit_barriers();
1797     log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (id: " UINT32_FORMAT_X_0 "%s)",
1798                                       task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1799                                       target_name, id, (clinit_brs ? ", has clinit barriers" : ""));
1800   }
1801   ReadingMark rdmk;
1802   if (rdmk.failed()) {
1803     // Cache is closed, cannot touch anything.
1804     return false;
1805   }
1806 
1807   AOTCodeReader reader(cache, entry, task);
1808   bool success = reader.compile_nmethod(env, target, compiler);
1809   if (success) {
1810     task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1811   } else {
1812     entry->set_load_fail();
1813     entry->set_not_entrant();
1814   }
1815   task->mark_aot_load_finish(os::elapsed_counter());
1816   return success;
1817 }
1818 
1819 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1820   CompileTask* task = env->task();
1821   AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1822   nmethod* nm = nullptr;
1823 
1824   uint entry_position = aot_code_entry->offset();
1825   uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1826   nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1827   set_read_position(archived_nm_offset + archived_nm->size());
1828 
1829   OopRecorder* oop_recorder = new OopRecorder(env->arena());
1830   env->set_oop_recorder(oop_recorder);
1831 
1832   uint offset;
1833 
1834   offset = read_position();
1835   address reloc_data = (address)addr(offset);
1836   offset += archived_nm->relocation_size();
1837   set_read_position(offset);
1838 
1839   // Read oops and metadata
1840   VM_ENTRY_MARK
1841   GrowableArray<Handle> oop_list;
1842   GrowableArray<Metadata*> metadata_list;
1843 
1844   if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1845    return false;
1846   }
1847 
1848   ImmutableOopMapSet* oopmaps = read_oop_map_set();
1849 
1850   offset = read_position();
1851   address immutable_data = (address)addr(offset);
1852   offset += archived_nm->immutable_data_size();
1853   set_read_position(offset);
1854 
1855   GrowableArray<Handle> reloc_immediate_oop_list;
1856   GrowableArray<Metadata*> reloc_immediate_metadata_list;
1857   if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1858    return false;
1859   }
1860 
1861   // Read Dependencies (compressed already)
1862   Dependencies* dependencies = new Dependencies(env);
1863   dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1864   env->set_dependencies(dependencies);
1865 
1866   const char* name = addr(entry_position + aot_code_entry->name_offset());
1867 
1868   if (VerifyAOTCode) {
1869     return false;
1870   }
1871 
1872   TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1873   nm = env->register_aot_method(THREAD,
1874                                 target,
1875                                 compiler,
1876                                 archived_nm,
1877                                 reloc_data,
1878                                 oop_list,
1879                                 metadata_list,
1880                                 oopmaps,
1881                                 immutable_data,
1882                                 reloc_immediate_oop_list,
1883                                 reloc_immediate_metadata_list,
1884                                 this);
1885   bool success = task->is_success();
1886   if (success) {
1887     log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1888 #ifdef ASSERT
1889     LogStreamHandle(Debug, aot, codecache, nmethod) log;
1890     if (log.is_enabled()) {
1891       FlagSetting fs(PrintRelocations, true);
1892       nm->print_on(&log);
1893       nm->decode2(&log);
1894     }
1895 #endif
1896   }
1897 
1898   return success;
1899 }
1900 
1901 bool skip_preload(methodHandle mh) {
1902   if (!mh->method_holder()->is_loaded()) {
1903     return true;
1904   }
1905   DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1906   if (directives->DontPreloadOption) {
1907     LogStreamHandle(Info, aot, codecache, init) log;
1908     if (log.is_enabled()) {
1909       log.print("Exclude preloading code for ");
1910       mh->print_value_on(&log);
1911     }
1912     return true;
1913   }
1914   return false;
1915 }
1916 
1917 void AOTCodeCache::preload_code(JavaThread* thread) {
1918   if (!is_using_code()) {
1919     return;
1920   }
1921   if ((DisableAOTCode & (1 << 3)) != 0) {
1922     return; // no preloaded code (level 5);
1923   }
1924   _cache->preload_aot_code(thread);
1925 }
1926 
1927 void AOTCodeCache::preload_aot_code(TRAPS) {
1928   if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1929     // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
1930     // level we want (that is CompLevel_full_optimization).
1931     return;
1932   }
1933   TraceTime t1("Total time to preload AOT code", &_t_totalPreload, enable_timers(), false);
1934   assert(_for_use, "sanity");
1935   uint count = _load_header->entries_count();
1936   uint preload_entries_count = _load_header->preload_entries_count();
1937   if (preload_entries_count > 0) {
1938     log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
1939     AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1940     uint count = MIN2(preload_entries_count, AOTCodePreloadStop);
1941     for (uint i = AOTCodePreloadStart; i < count; i++) {
1942       AOTCodeEntry* entry = &preload_entry[i];
1943       if (entry->not_entrant()) {
1944         continue;
1945       }
1946       methodHandle mh(THREAD, entry->method());
1947       assert((mh.not_null() && AOTMetaspace::in_aot_cache((address)mh())), "sanity");
1948       if (skip_preload(mh)) {
1949         continue; // Exclude preloading for this method
1950       }
1951       assert(mh->method_holder()->is_loaded(), "");
1952       if (!mh->method_holder()->is_linked()) {
1953         assert(!HAS_PENDING_EXCEPTION, "");
1954         mh->method_holder()->link_class(THREAD);
1955         if (HAS_PENDING_EXCEPTION) {
1956           LogStreamHandle(Info, aot, codecache) log;
1957           if (log.is_enabled()) {
1958             ResourceMark rm;
1959             log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
1960             THREAD->pending_exception()->print_value_on(&log);
1961             if (log_is_enabled(Debug, aot, codecache)) {
1962               THREAD->pending_exception()->print_on(&log);
1963             }
1964           }
1965           CLEAR_PENDING_EXCEPTION;
1966         }
1967       }
1968       if (mh->aot_code_entry() != nullptr) {
1969         // Second C2 compilation of the same method could happen for
1970         // different reasons without marking first entry as not entrant.
1971         continue; // Keep old entry to avoid issues
1972       }
1973       mh->set_aot_code_entry(entry);
1974       CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
1975     }
1976   }
1977 }
1978 
1979 // ------------ process code and data --------------
1980 
1981 // Can't use -1. It is valid value for jump to iteself destination
1982 // used by static call stub: see NativeJump::jump_destination().
1983 #define BAD_ADDRESS_ID -2
1984 
1985 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
1986   GrowableArray<uint> reloc_data;
1987   RelocIterator iter(&code_blob);
1988   LogStreamHandle(Trace, aot, codecache, reloc) log;
1989   while (iter.next()) {
1990     int idx = reloc_data.append(0); // default value
1991     switch (iter.type()) {
1992       case relocInfo::none:
1993       break;
1994       case relocInfo::oop_type: {
1995         oop_Relocation* r = (oop_Relocation*)iter.reloc();
1996         if (r->oop_is_immediate()) {
1997           assert(oop_list != nullptr, "sanity check");
1998           // store index of oop in the reloc immediate oop list
1999           Handle h(JavaThread::current(), r->oop_value());
2000           int oop_idx = oop_list->find(h);
2001           assert(oop_idx != -1, "sanity check");
2002           reloc_data.at_put(idx, (uint)oop_idx);
2003         }
2004         break;
2005       }
2006       case relocInfo::metadata_type: {
2007         metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2008         if (r->metadata_is_immediate()) {
2009           assert(metadata_list != nullptr, "sanity check");
2010           // store index of metadata in the reloc immediate metadata list
2011           int metadata_idx = metadata_list->find(r->metadata_value());
2012           assert(metadata_idx != -1, "sanity check");
2013           reloc_data.at_put(idx, (uint)metadata_idx);
2014         }
2015         break;
2016       }
2017       case relocInfo::virtual_call_type:  // Fall through. They all call resolve_*_call blobs.
2018       case relocInfo::opt_virtual_call_type:
2019       case relocInfo::static_call_type: {
2020         CallRelocation* r = (CallRelocation*)iter.reloc();
2021         address dest = r->destination();
2022         if (dest == r->addr()) { // possible call via trampoline on Aarch64
2023           dest = (address)-1;    // do nothing in this case when loading this relocation
2024         }
2025         int id = _table->id_for_address(dest, iter, &code_blob);
2026         if (id == BAD_ADDRESS_ID) {
2027           return false;
2028         }
2029         reloc_data.at_put(idx, id);
2030         break;
2031       }
2032       case relocInfo::trampoline_stub_type: {
2033         address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2034         int id = _table->id_for_address(dest, iter, &code_blob);
2035         if (id == BAD_ADDRESS_ID) {
2036           return false;
2037         }
2038         reloc_data.at_put(idx, id);
2039         break;
2040       }
2041       case relocInfo::static_stub_type:
2042         break;
2043       case relocInfo::runtime_call_type: {
2044         // Record offset of runtime destination
2045         CallRelocation* r = (CallRelocation*)iter.reloc();
2046         address dest = r->destination();
2047         if (dest == r->addr()) { // possible call via trampoline on Aarch64
2048           dest = (address)-1;    // do nothing in this case when loading this relocation
2049         }
2050         int id = _table->id_for_address(dest, iter, &code_blob);
2051         if (id == BAD_ADDRESS_ID) {
2052           return false;
2053         }
2054         reloc_data.at_put(idx, id);
2055         break;
2056       }
2057       case relocInfo::runtime_call_w_cp_type:
2058         log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2059         return false;
2060       case relocInfo::external_word_type: {
2061         // Record offset of runtime target
2062         address target = ((external_word_Relocation*)iter.reloc())->target();
2063         int id = _table->id_for_address(target, iter, &code_blob);
2064         if (id == BAD_ADDRESS_ID) {
2065           return false;
2066         }
2067         reloc_data.at_put(idx, id);
2068         break;
2069       }
2070       case relocInfo::internal_word_type:
2071         break;
2072       case relocInfo::section_word_type:
2073         break;
2074       case relocInfo::poll_type:
2075         break;
2076       case relocInfo::poll_return_type:
2077         break;
2078       case relocInfo::post_call_nop_type:
2079         break;
2080       case relocInfo::entry_guard_type:
2081         break;
2082       default:
2083         log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2084         return false;
2085         break;
2086     }
2087     if (log.is_enabled()) {
2088       iter.print_current_on(&log);
2089     }
2090   }
2091 
2092   // Write additional relocation data: uint per relocation
2093   // Write the count first
2094   int count = reloc_data.length();
2095   write_bytes(&count, sizeof(int));
2096   for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2097        iter != reloc_data.end(); ++iter) {
2098     uint value = *iter;
2099     int n = write_bytes(&value, sizeof(uint));
2100     if (n != sizeof(uint)) {
2101       return false;
2102     }
2103   }
2104   return true;
2105 }
2106 
2107 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2108   LogStreamHandle(Trace, aot, reloc) log;
2109   uint offset = read_position();
2110   int count = *(int*)addr(offset);
2111   offset += sizeof(int);
2112   if (log.is_enabled()) {
2113     log.print_cr("======== extra relocations count=%d", count);
2114   }
2115   uint* reloc_data = (uint*)addr(offset);
2116   offset += (count * sizeof(uint));
2117   set_read_position(offset);
2118 
2119   RelocIterator iter(code_blob);
2120   int j = 0;
2121   while (iter.next()) {
2122     switch (iter.type()) {
2123       case relocInfo::none:
2124         break;
2125       case relocInfo::oop_type: {
2126         assert(code_blob->is_nmethod(), "sanity check");
2127         oop_Relocation* r = (oop_Relocation*)iter.reloc();
2128         if (r->oop_is_immediate()) {
2129           assert(oop_list != nullptr, "sanity check");
2130           Handle h = oop_list->at(reloc_data[j]);
2131           r->set_value(cast_from_oop<address>(h()));
2132         } else {
2133           r->fix_oop_relocation();
2134         }
2135         break;
2136       }
2137       case relocInfo::metadata_type: {
2138         assert(code_blob->is_nmethod(), "sanity check");
2139         metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2140         Metadata* m;
2141         if (r->metadata_is_immediate()) {
2142           assert(metadata_list != nullptr, "sanity check");
2143           m = metadata_list->at(reloc_data[j]);
2144         } else {
2145           // Get already updated value from nmethod.
2146           int index = r->metadata_index();
2147           m = code_blob->as_nmethod()->metadata_at(index);
2148         }
2149         r->set_value((address)m);
2150         break;
2151       }
2152       case relocInfo::virtual_call_type:   // Fall through. They all call resolve_*_call blobs.
2153       case relocInfo::opt_virtual_call_type:
2154       case relocInfo::static_call_type: {
2155         address dest = _cache->address_for_id(reloc_data[j]);
2156         if (dest != (address)-1) {
2157           ((CallRelocation*)iter.reloc())->set_destination(dest);
2158         }
2159         break;
2160       }
2161       case relocInfo::trampoline_stub_type: {
2162         address dest = _cache->address_for_id(reloc_data[j]);
2163         if (dest != (address)-1) {
2164           ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2165         }
2166         break;
2167       }
2168       case relocInfo::static_stub_type:
2169         break;
2170       case relocInfo::runtime_call_type: {
2171         address dest = _cache->address_for_id(reloc_data[j]);
2172         if (dest != (address)-1) {
2173           ((CallRelocation*)iter.reloc())->set_destination(dest);
2174         }
2175         break;
2176       }
2177       case relocInfo::runtime_call_w_cp_type:
2178         // this relocation should not be in cache (see write_relocations)
2179         assert(false, "runtime_call_w_cp_type relocation is not implemented");
2180         break;
2181       case relocInfo::external_word_type: {
2182         address target = _cache->address_for_id(reloc_data[j]);
2183         // Add external address to global table
2184         int index = ExternalsRecorder::find_index(target);
2185         // Update index in relocation
2186         Relocation::add_jint(iter.data(), index);
2187         external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2188         assert(reloc->target() == target, "sanity");
2189         reloc->set_value(target); // Patch address in the code
2190         break;
2191       }
2192       case relocInfo::internal_word_type: {
2193         internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2194         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2195         break;
2196       }
2197       case relocInfo::section_word_type: {
2198         section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2199         r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2200         break;
2201       }
2202       case relocInfo::poll_type:
2203         break;
2204       case relocInfo::poll_return_type:
2205         break;
2206       case relocInfo::post_call_nop_type:
2207         break;
2208       case relocInfo::entry_guard_type:
2209         break;
2210       default:
2211         assert(false,"relocation %d unimplemented", (int)iter.type());
2212         break;
2213     }
2214     if (log.is_enabled()) {
2215       iter.print_current_on(&log);
2216     }
2217     j++;
2218   }
2219   assert(j == count, "sanity");
2220 }
2221 
2222 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2223   int count = oop_list.length();
2224   if (!write_bytes(&count, sizeof(int))) {
2225     return false;
2226   }
2227   for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2228        iter != oop_list.end(); ++iter) {
2229     Handle h = *iter;
2230     if (!write_oop(h())) {
2231       return false;
2232     }
2233   }
2234 
2235   count = metadata_list.length();
2236   if (!write_bytes(&count, sizeof(int))) {
2237     return false;
2238   }
2239   for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2240        iter != metadata_list.end(); ++iter) {
2241     Metadata* m = *iter;
2242     if (!write_metadata(m)) {
2243       return false;
2244     }
2245   }
2246   return true;
2247 }
2248 
2249 bool AOTCodeCache::write_metadata(nmethod* nm) {
2250   int count = nm->metadata_count()-1;
2251   if (!write_bytes(&count, sizeof(int))) {
2252     return false;
2253   }
2254   for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2255     if (!write_metadata(*p)) {
2256       return false;
2257     }
2258   }
2259   return true;
2260 }
2261 
2262 bool AOTCodeCache::write_metadata(Metadata* m) {
2263   uint n = 0;
2264   if (m == nullptr) {
2265     DataKind kind = DataKind::Null;
2266     n = write_bytes(&kind, sizeof(int));
2267     if (n != sizeof(int)) {
2268       return false;
2269     }
2270   } else if (m == (Metadata*)Universe::non_oop_word()) {
2271     DataKind kind = DataKind::No_Data;
2272     n = write_bytes(&kind, sizeof(int));
2273     if (n != sizeof(int)) {
2274       return false;
2275     }
2276   } else if (m->is_klass()) {
2277     if (!write_klass((Klass*)m)) {
2278       return false;
2279     }
2280   } else if (m->is_method()) {
2281     if (!write_method((Method*)m)) {
2282       return false;
2283     }
2284   } else if (m->is_methodCounters()) {
2285     DataKind kind = DataKind::MethodCnts;
2286     n = write_bytes(&kind, sizeof(int));
2287     if (n != sizeof(int)) {
2288       return false;
2289     }
2290     if (!write_method(((MethodCounters*)m)->method())) {
2291       return false;
2292     }
2293     log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2294   } else { // Not supported
2295     fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2296     return false;
2297   }
2298   return true;
2299 }
2300 
2301 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2302   uint code_offset = read_position();
2303   Metadata* m = nullptr;
2304   DataKind kind = *(DataKind*)addr(code_offset);
2305   code_offset += sizeof(DataKind);
2306   set_read_position(code_offset);
2307   if (kind == DataKind::Null) {
2308     m = (Metadata*)nullptr;
2309   } else if (kind == DataKind::No_Data) {
2310     m = (Metadata*)Universe::non_oop_word();
2311   } else if (kind == DataKind::Klass) {
2312     m = (Metadata*)read_klass(comp_method);
2313   } else if (kind == DataKind::Method) {
2314     m = (Metadata*)read_method(comp_method);
2315   } else if (kind == DataKind::MethodCnts) {
2316     kind = *(DataKind*)addr(code_offset);
2317     code_offset += sizeof(DataKind);
2318     set_read_position(code_offset);
2319     m = (Metadata*)read_method(comp_method);
2320     if (m != nullptr) {
2321       Method* method = (Method*)m;
2322       m = method->get_method_counters(Thread::current());
2323       if (m == nullptr) {
2324         set_lookup_failed();
2325         log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2326       } else {
2327         log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2328       }
2329     }
2330   } else {
2331     set_lookup_failed();
2332     log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2333   }
2334   return m;
2335 }
2336 
2337 bool AOTCodeCache::write_method(Method* method) {
2338   ResourceMark rm; // To method's name printing
2339   if (AOTCacheAccess::can_generate_aot_code(method)) {
2340     DataKind kind = DataKind::Method;
2341     uint n = write_bytes(&kind, sizeof(int));
2342     if (n != sizeof(int)) {
2343       return false;
2344     }
2345     uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2346     n = write_bytes(&method_offset, sizeof(uint));
2347     if (n != sizeof(uint)) {
2348       return false;
2349     }
2350     log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2351              compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2352     return true;
2353   }
2354   log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2355               compile_id(), comp_level(), method->name_and_sig_as_C_string());
2356   set_lookup_failed();
2357   return false;
2358 }
2359 
2360 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2361   uint code_offset = read_position();
2362   uint method_offset = *(uint*)addr(code_offset);
2363   code_offset += sizeof(uint);
2364   set_read_position(code_offset);
2365   Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2366   if (!AOTMetaspace::in_aot_cache((address)m)) {
2367     // Something changed in CDS
2368     set_lookup_failed();
2369     log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2370     return nullptr;
2371   }
2372   assert(m->is_method(), "sanity");
2373   ResourceMark rm;
2374   Klass* k = m->method_holder();
2375   if (!k->is_instance_klass()) {
2376     set_lookup_failed();
2377     log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2378                   compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2379     return nullptr;
2380   } else if (!AOTMetaspace::in_aot_cache((address)k)) {
2381     set_lookup_failed();
2382     log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2383                   compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2384     return nullptr;
2385   } else if (!InstanceKlass::cast(k)->is_loaded()) {
2386     set_lookup_failed();
2387     log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2388                   compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2389     return nullptr;
2390   } else if (!InstanceKlass::cast(k)->is_linked()) {
2391     set_lookup_failed();
2392     log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2393                   compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2394     return nullptr;
2395   }
2396   log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2397                 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2398   return m;
2399 }
2400 
2401 bool AOTCodeCache::write_klass(Klass* klass) {
2402   uint array_dim = 0;
2403   if (klass->is_objArray_klass()) {
2404     array_dim = ObjArrayKlass::cast(klass)->dimension();
2405     klass     = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2406   }
2407   uint init_state = 0;
2408   bool can_write = true;
2409   if (klass->is_instance_klass()) {
2410     InstanceKlass* ik = InstanceKlass::cast(klass);
2411     init_state = (ik->is_initialized() ? 1 : 0);
2412     can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2413   } else {
2414     can_write = AOTCacheAccess::can_generate_aot_code(klass);
2415   }
2416   ResourceMark rm;
2417   uint state = (array_dim << 1) | (init_state & 1);
2418   if (can_write) {
2419     DataKind kind = DataKind::Klass;
2420     uint n = write_bytes(&kind, sizeof(int));
2421     if (n != sizeof(int)) {
2422       return false;
2423     }
2424     // Record state of instance klass initialization and array dimentions.
2425     n = write_bytes(&state, sizeof(int));
2426     if (n != sizeof(int)) {
2427       return false;
2428     }
2429     uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2430     n = write_bytes(&klass_offset, sizeof(uint));
2431     if (n != sizeof(uint)) {
2432       return false;
2433     }
2434     log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2435              compile_id(), comp_level(), klass->external_name(),
2436              (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2437              (array_dim > 0 ? " (object array)" : ""), klass_offset);
2438     return true;
2439   }
2440   log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2441               compile_id(), comp_level(), klass->external_name(),
2442               (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2443               (array_dim > 0 ? " (object array)" : ""));
2444   set_lookup_failed();
2445   return false;
2446 }
2447 
2448 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2449   uint code_offset = read_position();
2450   uint state = *(uint*)addr(code_offset);
2451   uint init_state = (state  & 1);
2452   uint array_dim  = (state >> 1);
2453   code_offset += sizeof(int);
2454   uint klass_offset = *(uint*)addr(code_offset);
2455   code_offset += sizeof(uint);
2456   set_read_position(code_offset);
2457   Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2458   if (!AOTMetaspace::in_aot_cache((address)k)) {
2459     // Something changed in CDS
2460     set_lookup_failed();
2461     log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2462     return nullptr;
2463   }
2464   assert(k->is_klass(), "sanity");
2465   ResourceMark rm;
2466   if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2467     set_lookup_failed();
2468     log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2469                      compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2470     return nullptr;
2471   } else
2472   // Allow not initialized klass which was uninitialized during code caching or for preload
2473   if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2474     set_lookup_failed();
2475     log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2476                      compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2477     return nullptr;
2478   }
2479   if (array_dim > 0) {
2480     assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2481     Klass* ak = k->array_klass_or_null(array_dim);
2482     // FIXME: what would it take to create an array class on the fly?
2483 //    Klass* ak = k->array_klass(dim, JavaThread::current());
2484 //    guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2485     if (ak == nullptr) {
2486       set_lookup_failed();
2487       log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2488                        compile_id(), comp_level(), array_dim, k->external_name());
2489     }
2490     log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2491     return ak;
2492   } else {
2493     log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2494                   compile_id(), comp_level(), k->external_name());
2495     return k;
2496   }
2497 }
2498 
2499 bool AOTCodeCache::write_oop(jobject& jo) {
2500   oop obj = JNIHandles::resolve(jo);
2501   return write_oop(obj);
2502 }
2503 
2504 bool AOTCodeCache::write_oop(oop obj) {
2505   DataKind kind;
2506   uint n = 0;
2507   if (obj == nullptr) {
2508     kind = DataKind::Null;
2509     n = write_bytes(&kind, sizeof(int));
2510     if (n != sizeof(int)) {
2511       return false;
2512     }
2513   } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2514     kind = DataKind::No_Data;
2515     n = write_bytes(&kind, sizeof(int));
2516     if (n != sizeof(int)) {
2517       return false;
2518     }
2519   } else if (java_lang_Class::is_instance(obj)) {
2520     if (java_lang_Class::is_primitive(obj)) {
2521       int bt = (int)java_lang_Class::primitive_type(obj);
2522       kind = DataKind::Primitive;
2523       n = write_bytes(&kind, sizeof(int));
2524       if (n != sizeof(int)) {
2525         return false;
2526       }
2527       n = write_bytes(&bt, sizeof(int));
2528       if (n != sizeof(int)) {
2529         return false;
2530       }
2531       log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2532     } else {
2533       Klass* klass = java_lang_Class::as_Klass(obj);
2534       if (!write_klass(klass)) {
2535         return false;
2536       }
2537     }
2538   } else if (java_lang_String::is_instance(obj)) { // herere
2539     int k = AOTCacheAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2540     ResourceMark rm;
2541     size_t length_sz = 0;
2542     const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2543     if (k >= 0) {
2544       kind = DataKind::String;
2545       n = write_bytes(&kind, sizeof(int));
2546       if (n != sizeof(int)) {
2547         return false;
2548       }
2549       n = write_bytes(&k, sizeof(int));
2550       if (n != sizeof(int)) {
2551         return false;
2552       }
2553       log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2554       return true;
2555     }
2556     // Not archived String object - bailout
2557     set_lookup_failed();
2558     log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2559                                       compile_id(), comp_level(), p2i(obj), string);
2560     return false;
2561   } else if (java_lang_Module::is_instance(obj)) {
2562     fatal("Module object unimplemented");
2563   } else if (java_lang_ClassLoader::is_instance(obj)) {
2564     if (obj == SystemDictionary::java_system_loader()) {
2565       kind = DataKind::SysLoader;
2566       log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2567     } else if (obj == SystemDictionary::java_platform_loader()) {
2568       kind = DataKind::PlaLoader;
2569       log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2570     } else {
2571       ResourceMark rm;
2572       set_lookup_failed();
2573       log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2574                                       compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2575       return false;
2576     }
2577     n = write_bytes(&kind, sizeof(int));
2578     if (n != sizeof(int)) {
2579       return false;
2580     }
2581   } else { // herere
2582     ResourceMark rm;
2583     int k = AOTCacheAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2584     if (k >= 0) {
2585       kind = DataKind::MH_Oop;
2586       n = write_bytes(&kind, sizeof(int));
2587       if (n != sizeof(int)) {
2588         return false;
2589       }
2590       n = write_bytes(&k, sizeof(int));
2591       if (n != sizeof(int)) {
2592         return false;
2593       }
2594       log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2595                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2596       return true;
2597     }
2598     // Not archived Java object - bailout
2599     set_lookup_failed();
2600     log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2601                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2602     return false;
2603   }
2604   return true;
2605 }
2606 
2607 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2608   uint code_offset = read_position();
2609   oop obj = nullptr;
2610   DataKind kind = *(DataKind*)addr(code_offset);
2611   code_offset += sizeof(DataKind);
2612   set_read_position(code_offset);
2613   if (kind == DataKind::Null) {
2614     return nullptr;
2615   } else if (kind == DataKind::No_Data) {
2616     return cast_to_oop(Universe::non_oop_word());
2617   } else if (kind == DataKind::Klass) {
2618     Klass* k = read_klass(comp_method);
2619     if (k == nullptr) {
2620       return nullptr;
2621     }
2622     obj = k->java_mirror();
2623     if (obj == nullptr) {
2624       set_lookup_failed();
2625       log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2626       return nullptr;
2627     }
2628   } else if (kind == DataKind::Primitive) {
2629     code_offset = read_position();
2630     int t = *(int*)addr(code_offset);
2631     code_offset += sizeof(int);
2632     set_read_position(code_offset);
2633     BasicType bt = (BasicType)t;
2634     obj = java_lang_Class::primitive_mirror(bt);
2635     log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2636   } else if (kind == DataKind::String) {
2637     code_offset = read_position();
2638     int k = *(int*)addr(code_offset);
2639     code_offset += sizeof(int);
2640     set_read_position(code_offset);
2641     obj = AOTCacheAccess::get_archived_object(k);
2642     if (obj == nullptr) {
2643       set_lookup_failed();
2644       log_debug(aot, codecache, oops)("Lookup failed for String object");
2645       return nullptr;
2646     }
2647     assert(java_lang_String::is_instance(obj), "must be string");
2648 
2649     ResourceMark rm;
2650     size_t length_sz = 0;
2651     const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2652     log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2653   } else if (kind == DataKind::SysLoader) {
2654     obj = SystemDictionary::java_system_loader();
2655     log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2656   } else if (kind == DataKind::PlaLoader) {
2657     obj = SystemDictionary::java_platform_loader();
2658     log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2659   } else if (kind == DataKind::MH_Oop) {
2660     code_offset = read_position();
2661     int k = *(int*)addr(code_offset);
2662     code_offset += sizeof(int);
2663     set_read_position(code_offset);
2664     obj = AOTCacheAccess::get_archived_object(k);
2665     if (obj == nullptr) {
2666       set_lookup_failed();
2667       log_debug(aot, codecache, oops)("Lookup failed for MH object");
2668       return nullptr;
2669     }
2670     ResourceMark rm;
2671     log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2672                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2673   } else {
2674     set_lookup_failed();
2675     log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2676                      compile_id(), comp_level(), (int)kind);
2677     return nullptr;
2678   }
2679   return obj;
2680 }
2681 
2682 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2683   methodHandle comp_method(JavaThread::current(), target->get_Method());
2684   JavaThread* current = JavaThread::current();
2685   uint offset = read_position();
2686   int count = *(int *)addr(offset);
2687   offset += sizeof(int);
2688   set_read_position(offset);
2689   for (int i = 0; i < count; i++) {
2690     oop obj = read_oop(current, comp_method);
2691     if (lookup_failed()) {
2692       return false;
2693     }
2694     Handle h(thread, obj);
2695     oop_list.append(h);
2696     if (oop_recorder != nullptr) {
2697       jobject jo = JNIHandles::make_local(thread, obj);
2698       if (oop_recorder->is_real(jo)) {
2699         oop_recorder->find_index(jo);
2700       } else {
2701         oop_recorder->allocate_oop_index(jo);
2702       }
2703     }
2704     LogStreamHandle(Debug, aot, codecache, oops) log;
2705     if (log.is_enabled()) {
2706       log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2707       if (obj == Universe::non_oop_word()) {
2708         log.print("non-oop word");
2709       } else if (obj == nullptr) {
2710         log.print("nullptr-oop");
2711       } else {
2712         obj->print_value_on(&log);
2713       }
2714       log.cr();
2715     }
2716   }
2717 
2718   offset = read_position();
2719   count = *(int *)addr(offset);
2720   offset += sizeof(int);
2721   set_read_position(offset);
2722   for (int i = 0; i < count; i++) {
2723     Metadata* m = read_metadata(comp_method);
2724     if (lookup_failed()) {
2725       return false;
2726     }
2727     metadata_list.append(m);
2728     if (oop_recorder != nullptr) {
2729       if (oop_recorder->is_real(m)) {
2730         oop_recorder->find_index(m);
2731       } else {
2732         oop_recorder->allocate_metadata_index(m);
2733       }
2734     }
2735     LogTarget(Debug, aot, codecache, metadata) log;
2736     if (log.is_enabled()) {
2737       LogStream ls(log);
2738       ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2739       if (m == (Metadata*)Universe::non_oop_word()) {
2740         ls.print("non-metadata word");
2741       } else if (m == nullptr) {
2742         ls.print("nullptr-oop");
2743       } else {
2744         Metadata::print_value_on_maybe_null(&ls, m);
2745       }
2746       ls.cr();
2747     }
2748   }
2749   return true;
2750 }
2751 
2752 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2753   ImmutableOopMapSet* oopmaps = cb.oop_maps();
2754   int oopmaps_size = oopmaps->nr_of_bytes();
2755   if (!write_bytes(&oopmaps_size, sizeof(int))) {
2756     return false;
2757   }
2758   uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2759   if (n != (uint)oopmaps->nr_of_bytes()) {
2760     return false;
2761   }
2762   return true;
2763 }
2764 
2765 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2766   uint offset = read_position();
2767   int size = *(int *)addr(offset);
2768   offset += sizeof(int);
2769   ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2770   offset += size;
2771   set_read_position(offset);
2772   return oopmaps;
2773 }
2774 
2775 bool AOTCodeCache::write_oops(nmethod* nm) {
2776   int count = nm->oops_count()-1;
2777   if (!write_bytes(&count, sizeof(int))) {
2778     return false;
2779   }
2780   for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2781     if (!write_oop(*p)) {
2782       return false;
2783     }
2784   }
2785   return true;
2786 }
2787 
2788 #ifndef PRODUCT
2789 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2790   // Write asm remarks
2791   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2792   if (count_ptr == nullptr) {
2793     return false;
2794   }
2795   uint count = 0;
2796   bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2797     log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2798     uint n = write_bytes(&offset, sizeof(uint));
2799     if (n != sizeof(uint)) {
2800       return false;
2801     }
2802     if (use_string_table) {
2803       const char* cstr = add_C_string(str);
2804       int id = _table->id_for_C_string((address)cstr);
2805       assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2806       n = write_bytes(&id, sizeof(int));
2807       if (n != sizeof(int)) {
2808         return false;
2809       }
2810     } else {
2811       n = write_bytes(str, (uint)strlen(str) + 1);
2812       if (n != strlen(str) + 1) {
2813         return false;
2814       }
2815     }
2816     count += 1;
2817     return true;
2818   });
2819   *count_ptr = count;
2820   return result;
2821 }
2822 
2823 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2824   // Read asm remarks
2825   uint offset = read_position();
2826   uint count = *(uint *)addr(offset);
2827   offset += sizeof(uint);
2828   for (uint i = 0; i < count; i++) {
2829     uint remark_offset = *(uint *)addr(offset);
2830     offset += sizeof(uint);
2831     const char* remark = nullptr;
2832     if (use_string_table) {
2833       int remark_string_id = *(uint *)addr(offset);
2834       offset += sizeof(int);
2835       remark = (const char*)_cache->address_for_C_string(remark_string_id);
2836     } else {
2837       remark = (const char*)addr(offset);
2838       offset += (uint)strlen(remark)+1;
2839     }
2840     asm_remarks.insert(remark_offset, remark);
2841   }
2842   set_read_position(offset);
2843 }
2844 
2845 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2846   // Write dbg strings
2847   uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2848   if (count_ptr == nullptr) {
2849     return false;
2850   }
2851   uint count = 0;
2852   bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2853     log_trace(aot, codecache, stubs)("dbg string=%s", str);
2854     if (use_string_table) {
2855       const char* cstr = add_C_string(str);
2856       int id = _table->id_for_C_string((address)cstr);
2857       assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2858       uint n = write_bytes(&id, sizeof(int));
2859       if (n != sizeof(int)) {
2860         return false;
2861       }
2862     } else {
2863       uint n = write_bytes(str, (uint)strlen(str) + 1);
2864       if (n != strlen(str) + 1) {
2865         return false;
2866       }
2867     }
2868     count += 1;
2869     return true;
2870   });
2871   *count_ptr = count;
2872   return result;
2873 }
2874 
2875 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2876   // Read dbg strings
2877   uint offset = read_position();
2878   uint count = *(uint *)addr(offset);
2879   offset += sizeof(uint);
2880   for (uint i = 0; i < count; i++) {
2881     const char* str = nullptr;
2882     if (use_string_table) {
2883       int string_id = *(uint *)addr(offset);
2884       offset += sizeof(int);
2885       str = (const char*)_cache->address_for_C_string(string_id);
2886     } else {
2887       str = (const char*)addr(offset);
2888       offset += (uint)strlen(str)+1;
2889     }
2890     dbg_strings.insert(str);
2891   }
2892   set_read_position(offset);
2893 }
2894 #endif // PRODUCT
2895 
2896 //======================= AOTCodeAddressTable ===============
2897 
2898 // address table ids for generated routines, external addresses and C
2899 // string addresses are partitioned into positive integer ranges
2900 // defined by the following positive base and max values
2901 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2902 //      [_stubs_base, _stubs_base + _stubs_max -1],
2903 //      ...
2904 //      [_c_str_base, _c_str_base + _c_str_max -1],
2905 #define _extrs_max 140
2906 #define _stubs_max 210
2907 #define _shared_blobs_max 25
2908 #define _C1_blobs_max 50
2909 #define _C2_blobs_max 25
2910 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2911 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2912 
2913 #define _extrs_base 0
2914 #define _stubs_base (_extrs_base + _extrs_max)
2915 #define _shared_blobs_base (_stubs_base + _stubs_max)
2916 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2917 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2918 #define _blobs_end  (_shared_blobs_base + _blobs_max)
2919 #if (_C2_blobs_base >= _all_max)
2920 #error AOTCodeAddressTable ranges need adjusting
2921 #endif
2922 
2923 #define SET_ADDRESS(type, addr)                           \
2924   {                                                       \
2925     type##_addr[type##_length++] = (address) (addr);      \
2926     assert(type##_length <= type##_max, "increase size"); \
2927   }
2928 
2929 static bool initializing_extrs = false;
2930 
2931 void AOTCodeAddressTable::init_extrs() {
2932   if (_extrs_complete || initializing_extrs) return; // Done already
2933 
2934   assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
2935 
2936   initializing_extrs = true;
2937   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
2938 
2939   _extrs_length = 0;
2940 
2941   // Record addresses of VM runtime methods
2942   SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
2943   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
2944   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
2945   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
2946   {
2947     // Required by Shared blobs
2948     SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
2949     SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
2950     SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
2951     SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
2952     SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
2953     SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
2954     SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
2955     SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
2956     SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
2957     SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
2958     SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
2959     SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
2960     SET_ADDRESS(_extrs, CompressedOops::base_addr());
2961     SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
2962   }
2963   {
2964     // Required by initial stubs
2965     SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
2966 #if defined(AMD64)
2967     SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
2968 #endif
2969   }
2970 
2971 #ifdef COMPILER1
2972   {
2973     // Required by C1 blobs
2974     SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
2975     SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
2976     SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
2977     SET_ADDRESS(_extrs, Runtime1::is_instance_of);
2978     SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
2979     SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
2980     SET_ADDRESS(_extrs, Runtime1::new_instance);
2981     SET_ADDRESS(_extrs, Runtime1::counter_overflow);
2982     SET_ADDRESS(_extrs, Runtime1::new_type_array);
2983     SET_ADDRESS(_extrs, Runtime1::new_object_array);
2984     SET_ADDRESS(_extrs, Runtime1::new_multi_array);
2985     SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
2986     SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
2987     SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
2988     SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
2989     SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
2990     SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
2991     SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
2992     SET_ADDRESS(_extrs, Runtime1::monitorenter);
2993     SET_ADDRESS(_extrs, Runtime1::monitorexit);
2994     SET_ADDRESS(_extrs, Runtime1::deoptimize);
2995     SET_ADDRESS(_extrs, Runtime1::access_field_patching);
2996     SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
2997     SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
2998     SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
2999     SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3000     SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3001     SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3002 #ifdef X86
3003     SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3004     SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3005     SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3006     SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3007 #endif
3008 #ifndef PRODUCT
3009     SET_ADDRESS(_extrs, os::breakpoint);
3010 #endif
3011   }
3012 #endif // COMPILER1
3013 
3014 #ifdef COMPILER2
3015   {
3016     // Required by C2 blobs
3017     SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3018     SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3019     SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3020     SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3021     SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3022     SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3023     SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3024     SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3025     SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3026     SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3027 #if INCLUDE_JVMTI
3028     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
3029     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
3030     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
3031     SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
3032 #endif
3033     SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3034     SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3035     SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3036     SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3037     SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3038     SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3039     SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3040 #if defined(AMD64)
3041     // Use by C2 intinsic
3042     SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3043 #endif
3044   }
3045 #endif // COMPILER2
3046 
3047   // Record addresses of VM runtime methods and data structs
3048   BarrierSet* bs = BarrierSet::barrier_set();
3049   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3050     SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3051   }
3052 
3053 #if INCLUDE_G1GC
3054   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3055   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3056 #endif
3057 
3058 #if INCLUDE_SHENANDOAHGC
3059   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3060   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3061   SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3062   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3063   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3064   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3065   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3066   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3067   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3068   SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
3069 #endif
3070 
3071 #if INCLUDE_ZGC
3072   SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3073 #if defined(AMD64)
3074   SET_ADDRESS(_extrs, &ZPointerLoadShift);
3075 #endif
3076 #endif // INCLUDE_ZGC
3077 
3078   SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3079   SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3080   SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3081   SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3082 
3083   SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3084   SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3085 #if defined(AMD64) && !defined(ZERO)
3086   SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3087   SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3088 #endif // AMD64
3089   SET_ADDRESS(_extrs, SharedRuntime::d2f);
3090   SET_ADDRESS(_extrs, SharedRuntime::d2i);
3091   SET_ADDRESS(_extrs, SharedRuntime::d2l);
3092   SET_ADDRESS(_extrs, SharedRuntime::dcos);
3093   SET_ADDRESS(_extrs, SharedRuntime::dexp);
3094   SET_ADDRESS(_extrs, SharedRuntime::dlog);
3095   SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3096   SET_ADDRESS(_extrs, SharedRuntime::dpow);
3097   SET_ADDRESS(_extrs, SharedRuntime::dsin);
3098   SET_ADDRESS(_extrs, SharedRuntime::dtan);
3099   SET_ADDRESS(_extrs, SharedRuntime::f2i);
3100   SET_ADDRESS(_extrs, SharedRuntime::f2l);
3101 #ifndef ZERO
3102   SET_ADDRESS(_extrs, SharedRuntime::drem);
3103   SET_ADDRESS(_extrs, SharedRuntime::frem);
3104 #endif
3105   SET_ADDRESS(_extrs, SharedRuntime::l2d);
3106   SET_ADDRESS(_extrs, SharedRuntime::l2f);
3107   SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3108   SET_ADDRESS(_extrs, SharedRuntime::lmul);
3109   SET_ADDRESS(_extrs, SharedRuntime::lrem);
3110 
3111   SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3112   SET_ADDRESS(_extrs, Thread::current);
3113 
3114   SET_ADDRESS(_extrs, os::javaTimeMillis);
3115   SET_ADDRESS(_extrs, os::javaTimeNanos);
3116   // For JFR
3117   SET_ADDRESS(_extrs, os::elapsed_counter);
3118 #if defined(X86) && !defined(ZERO)
3119   SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3120 #endif
3121 
3122 #if INCLUDE_JVMTI
3123   SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3124   SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3125 #endif /* INCLUDE_JVMTI */
3126 
3127 #ifndef PRODUCT
3128   SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3129   SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3130 #endif
3131 
3132 #ifndef ZERO
3133 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3134   SET_ADDRESS(_extrs, MacroAssembler::debug64);
3135 #endif
3136 #if defined(AARCH64)
3137   SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3138 #endif
3139 #endif // ZERO
3140 
3141   // addresses of fields in AOT runtime constants area
3142   address* p = AOTRuntimeConstants::field_addresses_list();
3143   while (*p != nullptr) {
3144     SET_ADDRESS(_extrs, *p++);
3145   }
3146 
3147   _extrs_complete = true;
3148   log_info(aot, codecache, init)("External addresses recorded");
3149 }
3150 
3151 static bool initializing_early_stubs = false;
3152 
3153 void AOTCodeAddressTable::init_early_stubs() {
3154   if (_complete || initializing_early_stubs) return; // Done already
3155   initializing_early_stubs = true;
3156   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3157   _stubs_length = 0;
3158   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3159 
3160   {
3161     // Required by C1 blobs
3162 #if defined(AMD64) && !defined(ZERO)
3163     SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3164     SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3165 #endif // AMD64
3166   }
3167 
3168   _early_stubs_complete = true;
3169   log_info(aot, codecache, init)("Early stubs recorded");
3170 }
3171 
3172 static bool initializing_shared_blobs = false;
3173 
3174 void AOTCodeAddressTable::init_shared_blobs() {
3175   if (_complete || initializing_shared_blobs) return; // Done already
3176   initializing_shared_blobs = true;
3177   address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3178 
3179   // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3180   _shared_blobs_addr = blobs_addr;
3181   _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3182   _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3183 
3184   _shared_blobs_length = 0;
3185   _C1_blobs_length = 0;
3186   _C2_blobs_length = 0;
3187 
3188   // clear the address table
3189   memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3190 
3191   // Record addresses of generated code blobs
3192   SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3193   SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3194   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3195   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3196   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3197   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3198   SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3199   SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3200   SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3201   SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3202   SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3203   SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3204 #ifdef COMPILER2
3205   // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3206   if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3207     SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3208   }
3209 #endif
3210 #if INCLUDE_JVMCI
3211   if (EnableJVMCI) {
3212     SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3213     SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3214   }
3215 #endif
3216   SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3217   SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3218   SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3219   SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3220   SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3221 
3222   assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3223   _shared_blobs_complete = true;
3224   log_info(aot, codecache, init)("All shared blobs recorded");
3225 }
3226 
3227 static bool initializing_stubs = false;
3228 void AOTCodeAddressTable::init_stubs() {
3229   if (_complete || initializing_stubs) return; // Done already
3230   assert(_early_stubs_complete, "early stubs whould be initialized");
3231   initializing_stubs = true;
3232 
3233   // Stubs
3234   SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3235   SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3236   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3237   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3238   SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3239   SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3240 
3241   SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3242   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3243   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3244 
3245   JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3246 
3247   SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3248   SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3249   SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3250   SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3251   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3252   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3253 
3254   SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3255   SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3256   SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3257   SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3258   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3259   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3260 
3261   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3262   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3263   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3264   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3265   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3266   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3267 
3268   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3269   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3270   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3271   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3272   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3273   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3274 
3275   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3276   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3277 
3278   SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3279   SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3280 
3281   SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3282   SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3283   SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3284   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3285   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3286   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3287 
3288   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3289   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3290 
3291   SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3292   SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3293   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3294   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3295   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3296   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3297   SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3298   SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3299   SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3300   SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3301   SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3302   SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3303   SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3304   SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3305   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3306   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3307   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3308   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3309   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3310   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3311   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3312   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3313   SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3314   SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3315   SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3316   SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3317   SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3318   SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3319   SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3320   SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3321 
3322   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3323   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3324   SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3325 
3326   SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3327   SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3328   SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3329   SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3330   SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3331   SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3332   SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3333   SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3334 
3335   SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3336 
3337   SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3338 
3339   SET_ADDRESS(_stubs, StubRoutines::dexp());
3340   SET_ADDRESS(_stubs, StubRoutines::dlog());
3341   SET_ADDRESS(_stubs, StubRoutines::dlog10());
3342   SET_ADDRESS(_stubs, StubRoutines::dpow());
3343   SET_ADDRESS(_stubs, StubRoutines::dsin());
3344   SET_ADDRESS(_stubs, StubRoutines::dcos());
3345   SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3346   SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3347   SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3348   SET_ADDRESS(_stubs, StubRoutines::dtan());
3349 
3350   SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3351   SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3352 
3353   for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3354     SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3355   }
3356   SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3357 
3358 #if defined(AMD64) && !defined(ZERO)
3359   SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3360   SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3361   SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3362   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3363   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3364   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3365   SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3366   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3367   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3368   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3369   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3370   SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3371   SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3372   SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3373   SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3374   SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3375   SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3376   SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3377   SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3378   // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3379   // See C2_MacroAssembler::load_iota_indices().
3380   for (int i = 0; i < 6; i++) {
3381     SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3382   }
3383 #endif
3384 #if defined(AARCH64) && !defined(ZERO)
3385   SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3386   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3387   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3388   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3389   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3390   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3391   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3392   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3393   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3394   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3395   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3396   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3397   SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3398 
3399   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3400   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3401   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3402   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3403   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3404 #endif
3405 
3406   _complete = true;
3407   log_info(aot, codecache, init)("Stubs recorded");
3408 }
3409 
3410 void AOTCodeAddressTable::init_early_c1() {
3411 #ifdef COMPILER1
3412   // Runtime1 Blobs
3413   StubId id = StubInfo::stub_base(StubGroup::C1);
3414   // include forward_exception in range we publish
3415   StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
3416   for (; id != limit; id = StubInfo::next(id)) {
3417     if (Runtime1::blob_for(id) == nullptr) {
3418       log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3419       continue;
3420     }
3421     if (Runtime1::entry_for(id) == nullptr) {
3422       log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3423       continue;
3424     }
3425     address entry = Runtime1::entry_for(id);
3426     SET_ADDRESS(_C1_blobs, entry);
3427   }
3428 #endif // COMPILER1
3429   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3430   _early_c1_complete = true;
3431 }
3432 
3433 void AOTCodeAddressTable::init_c1() {
3434 #ifdef COMPILER1
3435   // Runtime1 Blobs
3436   assert(_early_c1_complete, "early C1 blobs should be initialized");
3437   StubId id = StubInfo::next(StubId::c1_forward_exception_id);
3438   StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
3439   for (; id != limit; id = StubInfo::next(id)) {
3440     if (Runtime1::blob_for(id) == nullptr) {
3441       log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3442       continue;
3443     }
3444     if (Runtime1::entry_for(id) == nullptr) {
3445       log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3446       continue;
3447     }
3448     address entry = Runtime1::entry_for(id);
3449     SET_ADDRESS(_C1_blobs, entry);
3450   }
3451 #if INCLUDE_G1GC
3452   if (UseG1GC) {
3453     G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3454     address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3455     SET_ADDRESS(_C1_blobs, entry);
3456     entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3457     SET_ADDRESS(_C1_blobs, entry);
3458   }
3459 #endif // INCLUDE_G1GC
3460 #if INCLUDE_ZGC
3461   if (UseZGC) {
3462     ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3463     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3464     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3465     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3466     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3467   }
3468 #endif // INCLUDE_ZGC
3469 #if INCLUDE_SHENANDOAHGC
3470   if (UseShenandoahGC) {
3471     ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3472     SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3473     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3474     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3475     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3476     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3477   }
3478 #endif // INCLUDE_SHENANDOAHGC
3479 #endif // COMPILER1
3480 
3481   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3482   _c1_complete = true;
3483   log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3484 }
3485 
3486 void AOTCodeAddressTable::init_c2() {
3487 #ifdef COMPILER2
3488   // OptoRuntime Blobs
3489   SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3490   SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3491   SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3492   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3493   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3494   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3495   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3496   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3497   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3498   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3499   SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3500   SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3501   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3502   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3503   SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3504   SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3505   SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3506   SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3507 #if INCLUDE_JVMTI
3508   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3509   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3510   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3511   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3512 #endif /* INCLUDE_JVMTI */
3513 #endif
3514 
3515   assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3516   _c2_complete = true;
3517   log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3518 }
3519 #undef SET_ADDRESS
3520 
3521 AOTCodeAddressTable::~AOTCodeAddressTable() {
3522   if (_extrs_addr != nullptr) {
3523     FREE_C_HEAP_ARRAY(address, _extrs_addr);
3524   }
3525   if (_stubs_addr != nullptr) {
3526     FREE_C_HEAP_ARRAY(address, _stubs_addr);
3527   }
3528   if (_shared_blobs_addr != nullptr) {
3529     FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3530   }
3531 }
3532 
3533 #ifdef PRODUCT
3534 #define MAX_STR_COUNT 200
3535 #else
3536 #define MAX_STR_COUNT 500
3537 #endif
3538 #define _c_str_max  MAX_STR_COUNT
3539 static const int _c_str_base = _all_max;
3540 
3541 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3542 static const char* _C_strings[MAX_STR_COUNT]    = {nullptr}; // Our duplicates
3543 static int _C_strings_count = 0;
3544 static int _C_strings_s[MAX_STR_COUNT] = {0};
3545 static int _C_strings_id[MAX_STR_COUNT] = {0};
3546 static int _C_strings_used = 0;
3547 
3548 void AOTCodeCache::load_strings() {
3549   uint strings_count  = _load_header->strings_count();
3550   if (strings_count == 0) {
3551     return;
3552   }
3553   uint strings_offset = _load_header->strings_offset();
3554   uint* string_lengths = (uint*)addr(strings_offset);
3555   strings_offset += (strings_count * sizeof(uint));
3556   uint strings_size = _load_header->search_table_offset() - strings_offset;
3557   // We have to keep cached strings longer than _cache buffer
3558   // because they are refernced from compiled code which may
3559   // still be executed on VM exit after _cache is freed.
3560   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3561   memcpy(p, addr(strings_offset), strings_size);
3562   _C_strings_buf = p;
3563   assert(strings_count <= MAX_STR_COUNT, "sanity");
3564   for (uint i = 0; i < strings_count; i++) {
3565     _C_strings[i] = p;
3566     uint len = string_lengths[i];
3567     _C_strings_s[i] = i;
3568     _C_strings_id[i] = i;
3569     p += len;
3570   }
3571   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3572   _C_strings_count = strings_count;
3573   _C_strings_used  = strings_count;
3574   log_debug(aot, codecache, init)("  Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3575 }
3576 
3577 int AOTCodeCache::store_strings() {
3578   if (_C_strings_used > 0) {
3579     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3580     uint offset = _write_position;
3581     uint length = 0;
3582     uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
3583     if (lengths == nullptr) {
3584       return -1;
3585     }
3586     for (int i = 0; i < _C_strings_used; i++) {
3587       const char* str = _C_strings[_C_strings_s[i]];
3588       uint len = (uint)strlen(str) + 1;
3589       length += len;
3590       assert(len < 1000, "big string: %s", str);
3591       lengths[i] = len;
3592       uint n = write_bytes(str, len);
3593       if (n != len) {
3594         return -1;
3595       }
3596     }
3597     log_debug(aot, codecache, exit)("  Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
3598                                    _C_strings_used, length, offset);
3599   }
3600   return _C_strings_used;
3601 }
3602 
3603 const char* AOTCodeCache::add_C_string(const char* str) {
3604   if (is_on_for_dump() && str != nullptr) {
3605     MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3606     AOTCodeAddressTable* table = addr_table();
3607     if (table != nullptr) {
3608       return table->add_C_string(str);
3609     }
3610   }
3611   return str;
3612 }
3613 
3614 const char* AOTCodeAddressTable::add_C_string(const char* str) {
3615   if (_extrs_complete) {
3616     // Check previous strings address
3617     for (int i = 0; i < _C_strings_count; i++) {
3618       if (_C_strings_in[i] == str) {
3619         return _C_strings[i]; // Found previous one - return our duplicate
3620       } else if (strcmp(_C_strings[i], str) == 0) {
3621         return _C_strings[i];
3622       }
3623     }
3624     // Add new one
3625     if (_C_strings_count < MAX_STR_COUNT) {
3626       // Passed in string can be freed and used space become inaccessible.
3627       // Keep original address but duplicate string for future compare.
3628       _C_strings_id[_C_strings_count] = -1; // Init
3629       _C_strings_in[_C_strings_count] = str;
3630       const char* dup = os::strdup(str);
3631       _C_strings[_C_strings_count++] = dup;
3632       log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
3633       return dup;
3634     } else {
3635       assert(false, "Number of C strings >= MAX_STR_COUNT");
3636     }
3637   }
3638   return str;
3639 }
3640 
3641 int AOTCodeAddressTable::id_for_C_string(address str) {
3642   if (str == nullptr) {
3643     return -1;
3644   }
3645   MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3646   for (int i = 0; i < _C_strings_count; i++) {
3647     if (_C_strings[i] == (const char*)str) { // found
3648       int id = _C_strings_id[i];
3649       if (id >= 0) {
3650         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
3651         return id; // Found recorded
3652       }
3653       // Not found in recorded, add new
3654       id = _C_strings_used++;
3655       _C_strings_s[id] = i;
3656       _C_strings_id[i] = id;
3657       return id;
3658     }
3659   }
3660   return -1;
3661 }
3662 
3663 address AOTCodeAddressTable::address_for_C_string(int idx) {
3664   assert(idx < _C_strings_count, "sanity");
3665   return (address)_C_strings[idx];
3666 }
3667 
3668 static int search_address(address addr, address* table, uint length) {
3669   for (int i = 0; i < (int)length; i++) {
3670     if (table[i] == addr) {
3671       return i;
3672     }
3673   }
3674   return BAD_ADDRESS_ID;
3675 }
3676 
3677 address AOTCodeAddressTable::address_for_id(int idx) {
3678   assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3679   if (idx == -1) {
3680     return (address)-1;
3681   }
3682   uint id = (uint)idx;
3683   // special case for symbols based relative to os::init
3684   if (id > (_c_str_base + _c_str_max)) {
3685     return (address)os::init + idx;
3686   }
3687   if (idx < 0) {
3688     fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3689     return nullptr;
3690   }
3691   // no need to compare unsigned id against 0
3692   if (/* id >= _extrs_base && */ id < _extrs_length) {
3693     return _extrs_addr[id - _extrs_base];
3694   }
3695   if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3696     return _stubs_addr[id - _stubs_base];
3697   }
3698   if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3699     return _stubs_addr[id - _stubs_base];
3700   }
3701   if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3702     return _shared_blobs_addr[id - _shared_blobs_base];
3703   }
3704   if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3705     return _C1_blobs_addr[id - _C1_blobs_base];
3706   }
3707   if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3708     return _C1_blobs_addr[id - _C1_blobs_base];
3709   }
3710   if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3711     return _C2_blobs_addr[id - _C2_blobs_base];
3712   }
3713   if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3714     return address_for_C_string(id - _c_str_base);
3715   }
3716   fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3717   return nullptr;
3718 }
3719 
3720 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3721   assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3722   int id = -1;
3723   if (addr == (address)-1) { // Static call stub has jump to itself
3724     return id;
3725   }
3726   // Check card_table_base address first since it can point to any address
3727   BarrierSet* bs = BarrierSet::barrier_set();
3728   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3729     if (addr == ci_card_table_address_as<address>()) {
3730       id = search_address(addr, _extrs_addr, _extrs_length);
3731       assert(id > 0 && _extrs_addr[id - _extrs_base] == addr, "sanity");
3732       return id;
3733     }
3734   }
3735 
3736   // Seach for C string
3737   id = id_for_C_string(addr);
3738   if (id >= 0) {
3739     return id + _c_str_base;
3740   }
3741   if (StubRoutines::contains(addr)) {
3742     // Search in stubs
3743     id = search_address(addr, _stubs_addr, _stubs_length);
3744     if (id == BAD_ADDRESS_ID) {
3745       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3746       if (desc == nullptr) {
3747         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3748       }
3749       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3750       assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3751     } else {
3752       return _stubs_base + id;
3753     }
3754   } else {
3755     CodeBlob* cb = CodeCache::find_blob(addr);
3756     if (cb != nullptr) {
3757       int id_base = _shared_blobs_base;
3758       // Search in code blobs
3759       id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3760       if (id == BAD_ADDRESS_ID) {
3761         id_base = _C1_blobs_base;
3762         // search C1 blobs
3763         id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3764       }
3765       if (id == BAD_ADDRESS_ID) {
3766         id_base = _C2_blobs_base;
3767         // search C2 blobs
3768         id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3769       }
3770       if (id == BAD_ADDRESS_ID) {
3771         assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3772       } else {
3773         return id_base + id;
3774       }
3775     } else {
3776       // Search in runtime functions
3777       id = search_address(addr, _extrs_addr, _extrs_length);
3778       if (id == BAD_ADDRESS_ID) {
3779         ResourceMark rm;
3780         const int buflen = 1024;
3781         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3782         int offset = 0;
3783         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3784           if (offset > 0) {
3785             // Could be address of C string
3786             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3787             CompileTask* task = ciEnv::current()->task();
3788             uint compile_id = 0;
3789             uint comp_level =0;
3790             if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3791               compile_id = task->compile_id();
3792               comp_level = task->comp_level();
3793             }
3794             log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3795                           compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3796             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3797             return dist;
3798           }
3799           reloc.print_current_on(tty);
3800           blob->print_on(tty);
3801           blob->print_code_on(tty);
3802           assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3803         } else {
3804           reloc.print_current_on(tty);
3805           blob->print_on(tty);
3806           blob->print_code_on(tty);
3807           os::find(addr, tty);
3808           assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3809         }
3810       } else {
3811         return _extrs_base + id;
3812       }
3813     }
3814   }
3815   return id;
3816 }
3817 
3818 #undef _extrs_max
3819 #undef _stubs_max
3820 #undef _shared_blobs_max
3821 #undef _C1_blobs_max
3822 #undef _C2_blobs_max
3823 #undef _blobs_max
3824 #undef _extrs_base
3825 #undef _stubs_base
3826 #undef _shared_blobs_base
3827 #undef _C1_blobs_base
3828 #undef _C2_blobs_base
3829 #undef _blobs_end
3830 
3831 void AOTRuntimeConstants::initialize_from_runtime() {
3832   BarrierSet* bs = BarrierSet::barrier_set();
3833   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3834     CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
3835     _aot_runtime_constants._grain_shift = ctbs->grain_shift();
3836     _aot_runtime_constants._card_shift = ctbs->card_shift();
3837   }
3838 }
3839 
3840 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3841 
3842 address AOTRuntimeConstants::_field_addresses_list[] = {
3843   grain_shift_address(),
3844   card_shift_address(),
3845   nullptr
3846 };
3847 
3848 
3849 void AOTCodeCache::wait_for_no_nmethod_readers() {
3850   while (true) {
3851     int cur = Atomic::load(&_nmethod_readers);
3852     int upd = -(cur + 1);
3853     if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3854       // Success, no new readers should appear.
3855       break;
3856     }
3857   }
3858 
3859   // Now wait for all readers to leave.
3860   SpinYield w;
3861   while (Atomic::load(&_nmethod_readers) != -1) {
3862     w.wait();
3863   }
3864 }
3865 
3866 AOTCodeCache::ReadingMark::ReadingMark() {
3867   while (true) {
3868     int cur = Atomic::load(&_nmethod_readers);
3869     if (cur < 0) {
3870       // Cache is already closed, cannot proceed.
3871       _failed = true;
3872       return;
3873     }
3874     if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3875       // Successfully recorded ourselves as entered.
3876       _failed = false;
3877       return;
3878     }
3879   }
3880 }
3881 
3882 AOTCodeCache::ReadingMark::~ReadingMark() {
3883   if (_failed) {
3884     return;
3885   }
3886   while (true) {
3887     int cur = Atomic::load(&_nmethod_readers);
3888     if (cur > 0) {
3889       // Cache is open, we are counting down towards 0.
3890       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3891         return;
3892       }
3893     } else {
3894       // Cache is closed, we are counting up towards -1.
3895       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3896         return;
3897       }
3898     }
3899   }
3900 }
3901 
3902 void AOTCodeCache::print_timers_on(outputStream* st) {
3903   if (is_using_code()) {
3904     st->print_cr ("    AOT Code Preload Time:   %7.3f s", _t_totalPreload.seconds());
3905     st->print_cr ("    AOT Code Load Time:   %7.3f s", _t_totalLoad.seconds());
3906     st->print_cr ("      nmethod register:     %7.3f s", _t_totalRegister.seconds());
3907     st->print_cr ("      find AOT code entry:  %7.3f s", _t_totalFind.seconds());
3908   }
3909   if (is_dumping_code()) {
3910     st->print_cr ("    AOT Code Store Time:  %7.3f s", _t_totalStore.seconds());
3911   }
3912 }
3913 
3914 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
3915   AOTCodeStats result;
3916   for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3917     result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
3918   }
3919 
3920   for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3921     result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
3922   }
3923   result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
3924   return result;
3925 }
3926 
3927 void AOTCodeCache::log_stats_on_exit(AOTCodeStats& stats) {
3928   LogStreamHandle(Debug, aot, codecache, exit) log;
3929   if (log.is_enabled()) {
3930     for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3931       log.print_cr("  %s: total=%u", aot_code_entry_kind_name[kind], stats.entry_count(kind));
3932       if (kind == AOTCodeEntry::Nmethod) {
3933         for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3934           log.print_cr("    Tier %d: total=%u", lvl, stats.nmethod_count(lvl));
3935         }
3936       }
3937     }
3938   }
3939 }
3940 
3941 static void print_helper1(outputStream* st, const char* name, int count) {
3942   if (count > 0) {
3943     st->print(" %s=%d", name, count);
3944   }
3945 }
3946 
3947 void AOTCodeCache::print_statistics_on(outputStream* st) {
3948   AOTCodeCache* cache = open_for_use();
3949   if (cache != nullptr) {
3950     ReadingMark rdmk;
3951     if (rdmk.failed()) {
3952       // Cache is closed, cannot touch anything.
3953       return;
3954     }
3955     AOTCodeStats stats;
3956 
3957     uint preload_count = cache->_load_header->preload_entries_count();
3958     AOTCodeEntry* preload_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->preload_entries_offset());
3959     for (uint i = 0; i < preload_count; i++) {
3960       stats.collect_all_stats(&preload_entries[i]);
3961     }
3962 
3963     uint count = cache->_load_header->entries_count();
3964     AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
3965     for (uint i = 0; i < count; i++) {
3966       stats.collect_all_stats(&load_entries[i]);
3967     }
3968 
3969     for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3970       if (stats.entry_count(kind) > 0) {
3971         st->print("  %s:", aot_code_entry_kind_name[kind]);
3972         print_helper1(st, "total", stats.entry_count(kind));
3973         print_helper1(st, "loaded", stats.entry_loaded_count(kind));
3974         print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
3975         print_helper1(st, "failed", stats.entry_load_failed_count(kind));
3976         st->cr();
3977       }
3978       if (kind == AOTCodeEntry::Nmethod) {
3979         for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3980           if (stats.nmethod_count(lvl) > 0) {
3981             st->print("    AOT Code T%d", lvl);
3982             print_helper1(st, "total", stats.nmethod_count(lvl));
3983             print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
3984             print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
3985             print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
3986             if (lvl == AOTCompLevel_count-1) {
3987               print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
3988             }
3989             st->cr();
3990           }
3991         }
3992       }
3993     }
3994     LogStreamHandle(Debug, aot, codecache, init) log;
3995     if (log.is_enabled()) {
3996       AOTCodeCache::print_unused_entries_on(&log);
3997     }
3998     LogStreamHandle(Trace, aot, codecache) aot_info;
3999     // need a lock to traverse the code cache
4000     if (aot_info.is_enabled()) {
4001       MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4002       NMethodIterator iter(NMethodIterator::all);
4003       while (iter.next()) {
4004         nmethod* nm = iter.method();
4005         if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4006           aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4007                          (nm->method()->in_aot_cache() ? 'S' : ' '),
4008                          (nm->is_aot() ? 'A' : ' '),
4009                          (nm->preloaded() ? 'P' : ' '),
4010                          nm->comp_level());
4011           print_helper(nm, &aot_info);
4012           aot_info.print(": ");
4013           CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4014           LogStreamHandle(Trace, aot, codecache) aot_debug;
4015           if (aot_debug.is_enabled()) {
4016             MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4017             if (mtd != nullptr) {
4018               mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4019                 aot_debug.print("     CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4020               });
4021             }
4022           }
4023         }
4024       }
4025     }
4026   }
4027 }
4028 
4029 void AOTCodeEntry::print(outputStream* st) const {
4030   st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4031                p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4032                (_not_entrant? "not_entrant" : "entrant"),
4033                (_loaded ? ", loaded" : ""),
4034                (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4035                (_for_preload ? ", for_preload" : ""));
4036 }
4037 
4038 // This is called after initialize() but before init2()
4039 // and _cache is not set yet.
4040 void AOTCodeCache::print_on(outputStream* st) {
4041   if (opened_cache != nullptr && opened_cache->for_use()) {
4042     ReadingMark rdmk;
4043     if (rdmk.failed()) {
4044       // Cache is closed, cannot touch anything.
4045       return;
4046     }
4047 
4048     st->print_cr("\nAOT Code Cache Preload entries");
4049 
4050     uint preload_count = opened_cache->_load_header->preload_entries_count();
4051     AOTCodeEntry* preload_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->preload_entries_offset());
4052     for (uint i = 0; i < preload_count; i++) {
4053       AOTCodeEntry* entry = &preload_entries[i];
4054 
4055       uint entry_position = entry->offset();
4056       uint name_offset = entry->name_offset() + entry_position;
4057       const char* saved_name = opened_cache->addr(name_offset);
4058 
4059       st->print_cr("%4u: %10s Id:%u L%u size=%u '%s' %s%s%s",
4060                    i, aot_code_entry_kind_name[entry->kind()], entry->id(), entry->comp_level(),
4061                    entry->size(),  saved_name,
4062                    entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4063                    entry->is_loaded()           ? " loaded"              : "",
4064                    entry->not_entrant()         ? " not_entrant"         : "");
4065 
4066       st->print_raw("         ");
4067       AOTCodeReader reader(opened_cache, entry, nullptr);
4068       reader.print_on(st);
4069     }
4070 
4071     st->print_cr("\nAOT Code Cache entries");
4072 
4073     uint count = opened_cache->_load_header->entries_count();
4074     uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4075     AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4076 
4077     for (uint i = 0; i < count; i++) {
4078       int index = search_entries[2*i + 1];
4079       AOTCodeEntry* entry = &(load_entries[index]);
4080 
4081       uint entry_position = entry->offset();
4082       uint name_offset = entry->name_offset() + entry_position;
4083       const char* saved_name = opened_cache->addr(name_offset);
4084 
4085       st->print_cr("%4u: %10s idx:%4u Id:%u L%u size=%u '%s' %s%s%s%s",
4086                    i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4087                    entry->size(),  saved_name,
4088                    entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4089                    entry->for_preload()         ? " for_preload"         : "",
4090                    entry->is_loaded()           ? " loaded"              : "",
4091                    entry->not_entrant()         ? " not_entrant"         : "");
4092 
4093       st->print_raw("         ");
4094       AOTCodeReader reader(opened_cache, entry, nullptr);
4095       reader.print_on(st);
4096     }
4097   }
4098 }
4099 
4100 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4101   LogStreamHandle(Info, aot, codecache, init) info;
4102   if (info.is_enabled()) {
4103     AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4104       if (entry->is_nmethod() && !entry->is_loaded()) {
4105         MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4106         if (mtd != nullptr) {
4107           if (mtd->has_holder()) {
4108             if (mtd->holder()->method_holder()->is_initialized()) {
4109               ResourceMark rm;
4110               mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4111                 if ((uint)ctd->level() == entry->comp_level()) {
4112                   if (ctd->init_deps_left_acquire() == 0) {
4113                     nmethod* nm = mtd->holder()->code();
4114                     if (nm == nullptr) {
4115                       if (mtd->holder()->queued_for_compilation()) {
4116                         return; // scheduled for compilation
4117                       }
4118                     } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4119                       return; // already online compiled and superseded by a more optimal method
4120                     }
4121                     info.print("AOT Code Cache entry not loaded: ");
4122                     ctd->print_on(&info);
4123                     info.cr();
4124                   }
4125                 }
4126               });
4127             } else {
4128               // not yet initialized
4129             }
4130           } else {
4131             info.print("AOT Code Cache entry doesn't have a holder: ");
4132             mtd->print_on(&info);
4133             info.cr();
4134           }
4135         }
4136       }
4137     });
4138   }
4139 }
4140 
4141 void AOTCodeReader::print_on(outputStream* st) {
4142   uint entry_position = _entry->offset();
4143   set_read_position(entry_position);
4144 
4145   // Read name
4146   uint name_offset = entry_position + _entry->name_offset();
4147   uint name_size = _entry->name_size(); // Includes '/0'
4148   const char* name = addr(name_offset);
4149 
4150   st->print_cr("  name: %s", name);
4151 }
4152