1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "cds/cdsAccess.hpp"
  27 #include "cds/cdsConfig.hpp"
  28 #include "cds/heapShared.hpp"
  29 #include "cds/metaspaceShared.hpp"
  30 #include "ci/ciConstant.hpp"
  31 #include "ci/ciEnv.hpp"
  32 #include "ci/ciField.hpp"
  33 #include "ci/ciMethod.hpp"
  34 #include "ci/ciMethodData.hpp"
  35 #include "ci/ciObject.hpp"
  36 #include "ci/ciUtilities.inline.hpp"
  37 #include "classfile/javaAssertions.hpp"
  38 #include "classfile/stringTable.hpp"
  39 #include "classfile/symbolTable.hpp"
  40 #include "classfile/systemDictionary.hpp"
  41 #include "classfile/vmClasses.hpp"
  42 #include "classfile/vmIntrinsics.hpp"
  43 #include "code/codeBlob.hpp"
  44 #include "code/codeCache.hpp"
  45 #include "code/oopRecorder.inline.hpp"
  46 #include "code/SCCache.hpp"
  47 #include "compiler/abstractCompiler.hpp"
  48 #include "compiler/compilationPolicy.hpp"
  49 #include "compiler/compileBroker.hpp"
  50 #include "compiler/compileTask.hpp"
  51 #include "gc/g1/g1BarrierSetRuntime.hpp"
  52 #include "gc/shared/gcConfig.hpp"
  53 #include "logging/log.hpp"
  54 #include "memory/memoryReserver.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/klass.inline.hpp"
  57 #include "oops/method.inline.hpp"
  58 #include "oops/trainingData.hpp"
  59 #include "prims/jvmtiThreadState.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/flags/flagSetting.hpp"
  62 #include "runtime/globals_extension.hpp"
  63 #include "runtime/handles.inline.hpp"
  64 #include "runtime/java.hpp"
  65 #include "runtime/jniHandles.inline.hpp"
  66 #include "runtime/os.inline.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stubCodeGenerator.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/timerTrace.hpp"
  71 #include "runtime/threadIdentifier.hpp"
  72 #include "utilities/ostream.hpp"
  73 #include "utilities/spinYield.hpp"
  74 #ifdef COMPILER1
  75 #include "c1/c1_Runtime1.hpp"
  76 #include "c1/c1_LIRAssembler.hpp"
  77 #include "gc/shared/c1/barrierSetC1.hpp"
  78 #include "gc/g1/c1/g1BarrierSetC1.hpp"
  79 #if INCLUDE_SHENANDOAHGC
  80 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  81 #endif
  82 #include "gc/z/c1/zBarrierSetC1.hpp"
  83 #endif
  84 #ifdef COMPILER2
  85 #include "opto/runtime.hpp"
  86 #endif
  87 #if INCLUDE_JVMCI
  88 #include "jvmci/jvmci.hpp"
  89 #endif
  90 #if INCLUDE_SHENANDOAHGC
  91 #include "gc/shenandoah/shenandoahRuntime.hpp"
  92 #endif
  93 
  94 #include <sys/stat.h>
  95 #include <errno.h>
  96 
  97 #ifndef O_BINARY       // if defined (Win32) use binary files.
  98 #define O_BINARY 0     // otherwise do nothing.
  99 #endif
 100 
 101 const char* sccentry_kind_name[] = {
 102 #define DECL_KIND_STRING(kind) XSTR(kind),
 103   DO_SCCENTRY_KIND(DECL_KIND_STRING)
 104 #undef DECL_KIND_STRING
 105 };
 106 
 107 static elapsedTimer _t_totalLoad;
 108 static elapsedTimer _t_totalRegister;
 109 static elapsedTimer _t_totalFind;
 110 static elapsedTimer _t_totalStore;
 111 
 112 SCCache* SCCache::_cache = nullptr;
 113 
 114 static bool enable_timers() {
 115   return CITime || log_is_enabled(Info, init);
 116 }
 117 
 118 static void exit_vm_on_load_failure() {
 119   // Treat SCC warnings as error when RequireSharedSpaces is on.
 120   if (RequireSharedSpaces) {
 121     vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
 122   }
 123 }
 124 
 125 static void exit_vm_on_store_failure() {
 126   // Treat SCC warnings as error when RequireSharedSpaces is on.
 127   if (RequireSharedSpaces) {
 128     tty->print_cr("Unable to create startup cached code.");
 129     // Failure during AOT code caching, we don't want to dump core
 130     vm_abort(false);
 131   }
 132 }
 133 
 134 uint SCCache::max_aot_code_size() {
 135   return (uint)CachedCodeMaxSize;
 136 }
 137 
 138 void SCCache::initialize() {
 139   if (LoadCachedCode && !UseSharedSpaces) {
 140     return;
 141   }
 142   if (LoadCachedCode && CDSAccess::get_cached_code_size() == 0) {
 143     LoadCachedCode = false;
 144     return;
 145   }
 146   if (StoreCachedCode || LoadCachedCode) {
 147     if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
 148       FLAG_SET_DEFAULT(ClassInitBarrierMode, 1);
 149     }
 150   } else if (ClassInitBarrierMode > 0) {
 151     log_info(scc, init)("Set ClassInitBarrierMode to 0 because StoreCachedCode and LoadCachedCode are false.");
 152     FLAG_SET_DEFAULT(ClassInitBarrierMode, 0);
 153   }
 154   if (LoadCachedCode || StoreCachedCode) {
 155     if (!open_cache()) {
 156       exit_vm_on_load_failure();
 157       return;
 158     }
 159     if (StoreCachedCode) {
 160       FLAG_SET_DEFAULT(FoldStableValues, false);
 161       FLAG_SET_DEFAULT(ForceUnreachable, true);
 162     }
 163     FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 164   }
 165 }
 166 
 167 void SCCache::init2() {
 168   if (!is_on()) {
 169     return;
 170   }
 171   // After Universe initialized
 172   BarrierSet* bs = BarrierSet::barrier_set();
 173   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
 174     address byte_map_base = ci_card_table_address_as<address>();
 175     if (is_on_for_write() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
 176       // Bail out since we can't encode card table base address with relocation
 177       log_warning(scc, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
 178       close();
 179       exit_vm_on_load_failure();
 180     }
 181   }
 182   // initialize aot runtime constants as appropriate to this runtime
 183   AOTRuntimeConstants::initialize_from_runtime();
 184 
 185   if (!verify_vm_config()) {
 186     close();
 187     exit_vm_on_load_failure();
 188   }
 189 
 190   // initialize the table of external routines so we can save
 191   // generated code blobs that reference them
 192   init_extrs_table();
 193   // initialize the table of initial stubs so we can save
 194   // generated code blobs that reference them
 195   init_early_stubs_table();
 196 }
 197 
 198 void SCCache::print_timers_on(outputStream* st) {
 199   if (LoadCachedCode) {
 200     st->print_cr ("    SC Load Time:         %7.3f s", _t_totalLoad.seconds());
 201     st->print_cr ("      nmethod register:     %7.3f s", _t_totalRegister.seconds());
 202     st->print_cr ("      find cached code:     %7.3f s", _t_totalFind.seconds());
 203   }
 204   if (StoreCachedCode) {
 205     st->print_cr ("    SC Store Time:        %7.3f s", _t_totalStore.seconds());
 206   }
 207 }
 208 
 209 bool SCCache::is_C3_on() {
 210 #if INCLUDE_JVMCI
 211   if (UseJVMCICompiler) {
 212     return (StoreCachedCode || LoadCachedCode) && UseC2asC3;
 213   }
 214 #endif
 215   return false;
 216 }
 217 
 218 bool SCCache::is_code_load_thread_on() {
 219   return UseCodeLoadThread && LoadCachedCode;
 220 }
 221 
 222 bool SCCache::gen_preload_code(ciMethod* m, int entry_bci) {
 223   VM_ENTRY_MARK;
 224   return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
 225          CDSAccess::can_generate_cached_code(m->get_Method());
 226 }
 227 
 228 static void print_helper(nmethod* nm, outputStream* st) {
 229   SCCache::iterate([&](SCCEntry* e) {
 230     if (e->method() == nm->method()) {
 231       ResourceMark rm;
 232       stringStream ss;
 233       ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
 234       if (e->decompile() > 0) {
 235         ss.print("+D%d", e->decompile());
 236       }
 237       ss.print("[%s%s%s]",
 238                (e->is_loaded()   ? "L" : ""),
 239                (e->load_fail()   ? "F" : ""),
 240                (e->not_entrant() ? "I" : ""));
 241       ss.print("#%d", e->comp_id());
 242 
 243       st->print(" %s", ss.freeze());
 244     }
 245   });
 246 }
 247 
 248 void SCCache::close() {
 249   if (is_on()) {
 250     if (SCCache::is_on_for_read()) {
 251       LogStreamHandle(Info, init) log;
 252       if (log.is_enabled()) {
 253         log.print_cr("AOT Code Cache statistics (when closed): ");
 254         SCCache::print_statistics_on(&log);
 255         log.cr();
 256         SCCache::print_timers_on(&log);
 257 
 258         LogStreamHandle(Info, scc, init) log1;
 259         if (log1.is_enabled()) {
 260           SCCache::print_unused_entries_on(&log1);
 261         }
 262 
 263         LogStreamHandle(Info, scc, codecache) info_scc;
 264         // need a lock to traverse the code cache
 265         MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 266         if (info_scc.is_enabled()) {
 267           NMethodIterator iter(NMethodIterator::all);
 268           while (iter.next()) {
 269             nmethod* nm = iter.method();
 270             if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
 271               info_scc.print("%5d:%c%c%c%d:", nm->compile_id(),
 272                              (nm->method()->is_shared() ? 'S' : ' '),
 273                              (nm->is_scc() ? 'A' : ' '),
 274                              (nm->preloaded() ? 'P' : ' '),
 275                              nm->comp_level());
 276               print_helper(nm, &info_scc);
 277               info_scc.print(": ");
 278               CompileTask::print(&info_scc, nm, nullptr, true /*short_form*/);
 279 
 280               LogStreamHandle(Debug, scc, codecache) debug_scc;
 281               if (debug_scc.is_enabled()) {
 282                 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
 283                 if (mtd != nullptr) {
 284                   mtd->iterate_compiles([&](CompileTrainingData* ctd) {
 285                     debug_scc.print("     CTD: "); ctd->print_on(&debug_scc); debug_scc.cr();
 286                   });
 287                 }
 288               }
 289             }
 290           }
 291         }
 292       }
 293     }
 294 
 295     delete _cache; // Free memory
 296     _cache = nullptr;
 297   }
 298 }
 299 
 300 void SCCache::invalidate(SCCEntry* entry) {
 301   // This could be concurent execution
 302   if (entry != nullptr && is_on()) { // Request could come after cache is closed.
 303     _cache->invalidate_entry(entry);
 304   }
 305 }
 306 
 307 bool SCCache::is_loaded(SCCEntry* entry) {
 308   if (is_on() && _cache->cache_buffer() != nullptr) {
 309     return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
 310   }
 311   return false;
 312 }
 313 
 314 void SCCache::preload_code(JavaThread* thread) {
 315   if ((ClassInitBarrierMode == 0) || !is_on_for_read()) {
 316     return;
 317   }
 318   if ((DisableCachedCode & (1 << 3)) != 0) {
 319     return; // no preloaded code (level 5);
 320   }
 321   _cache->preload_startup_code(thread);
 322 }
 323 
 324 SCCEntry* SCCache::find_code_entry(const methodHandle& method, uint comp_level) {
 325   switch (comp_level) {
 326     case CompLevel_simple:
 327       if ((DisableCachedCode & (1 << 0)) != 0) {
 328         return nullptr;
 329       }
 330       break;
 331     case CompLevel_limited_profile:
 332       if ((DisableCachedCode & (1 << 1)) != 0) {
 333         return nullptr;
 334       }
 335       break;
 336     case CompLevel_full_optimization:
 337       if ((DisableCachedCode & (1 << 2)) != 0) {
 338         return nullptr;
 339       }
 340       break;
 341 
 342     default: return nullptr; // Level 1, 2, and 4 only
 343   }
 344   TraceTime t1("SC total find code time", &_t_totalFind, enable_timers(), false);
 345   if (is_on() && _cache->cache_buffer() != nullptr) {
 346     MethodData* md = method->method_data();
 347     uint decomp = (md == nullptr) ? 0 : md->decompile_count();
 348 
 349     ResourceMark rm;
 350     const char* target_name = method->name_and_sig_as_C_string();
 351     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
 352     SCCEntry* entry = _cache->find_entry(SCCEntry::Code, hash, comp_level, decomp);
 353     if (entry == nullptr) {
 354       log_info(scc, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash);
 355 #ifdef ASSERT
 356     } else {
 357       uint name_offset = entry->offset() + entry->name_offset();
 358       uint name_size   = entry->name_size(); // Includes '/0'
 359       const char* name = _cache->cache_buffer() + name_offset;
 360       if (strncmp(target_name, name, name_size) != 0) {
 361         assert(false, "SCA: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
 362       }
 363 #endif
 364     }
 365 
 366     DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
 367     if (directives->IgnorePrecompiledOption) {
 368       LogStreamHandle(Info, scc, compilation) log;
 369       if (log.is_enabled()) {
 370         log.print("Ignore cached code entry on level %d for ", comp_level);
 371         method->print_value_on(&log);
 372       }
 373       return nullptr;
 374     }
 375 
 376     return entry;
 377   }
 378   return nullptr;
 379 }
 380 
 381 void SCCache::add_C_string(const char* str) {
 382   if (is_on_for_write()) {
 383     _cache->add_new_C_string(str);
 384   }
 385 }
 386 
 387 bool SCCache::allow_const_field(ciConstant& value) {
 388   return !is_on() || !StoreCachedCode // Restrict only when we generate cache
 389         // Can not trust primitive too   || !is_reference_type(value.basic_type())
 390         // May disable this too for now  || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
 391         ;
 392 }
 393 
 394 
 395 bool SCCache::open_cache() {
 396   SCCache* cache = new SCCache();
 397   if (cache->failed()) {
 398     delete cache;
 399     _cache = nullptr;
 400     return false;
 401   }
 402   _cache = cache;
 403   return true;
 404 }
 405 
 406 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
 407 public:
 408   uint _aot_code_size;
 409   char* _aot_code_data;
 410 
 411   void set_aot_code_data(uint size, char* aot_data) {
 412     _aot_code_size = size;
 413     CDSAccess::set_pointer(&_aot_code_data, aot_data);
 414   }
 415 
 416   static CachedCodeDirectory* create();
 417 };
 418 
 419 // Storing AOT code in the cached code region of AOT Cache:
 420 //
 421 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
 422 //     E.g., you can build a hashtable to record what methods have been archived.
 423 //
 424 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
 425 //     allocated using CDSAccess::allocate_from_code_cache().
 426 //
 427 // [3] CachedCodeDirectory must be the very first allocation.
 428 //
 429 // [4] Two kinds of pointer can be stored:
 430 //     - A pointer p that points to metadata. CDSAccess::can_generate_cached_code(p) must return true.
 431 //     - A pointer to a buffer returned by CDSAccess::allocate_from_code_cache().
 432 //       (It's OK to point to an interior location within this buffer).
 433 //     Such pointers must be stored using CDSAccess::set_pointer()
 434 //
 435 // The buffers allocated by CDSAccess::allocate_from_code_cache() are in a contiguous region. At runtime, this
 436 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
 437 // (e.g., to account for the runtime location of the CodeCache).
 438 //
 439 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
 440 static CachedCodeDirectory* _cached_code_directory = nullptr;
 441 
 442 CachedCodeDirectory* CachedCodeDirectory::create() {
 443   assert(CDSAccess::is_cached_code_region_empty(), "must be");
 444   CachedCodeDirectory* dir = (CachedCodeDirectory*)CDSAccess::allocate_from_code_cache(sizeof(CachedCodeDirectory));
 445   dir->dumptime_init_internal();
 446   return dir;
 447 }
 448 
 449 #define DATA_ALIGNMENT HeapWordSize
 450 
 451 SCCache::SCCache() {
 452   _load_header = nullptr;
 453   _for_read  = LoadCachedCode;
 454   _for_write = StoreCachedCode;
 455   _load_size = 0;
 456   _store_size = 0;
 457   _write_position = 0;
 458   _closing  = false;
 459   _failed = false;
 460   _lookup_failed = false;
 461   _table = nullptr;
 462   _load_entries = nullptr;
 463   _store_entries  = nullptr;
 464   _C_strings_buf  = nullptr;
 465   _load_buffer = nullptr;
 466   _store_buffer = nullptr;
 467   _C_store_buffer = nullptr;
 468   _store_entries_cnt = 0;
 469   _gen_preload_code = false;
 470   _for_preload = false;       // changed while storing entry data
 471   _has_clinit_barriers = false;
 472 
 473   _compile_id = 0;
 474   _comp_level = 0;
 475 
 476   _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false;
 477 
 478   if (_for_read) {
 479     // Read cache
 480     ReservedSpace rs = MemoryReserver::reserve(CDSAccess::get_cached_code_size(), mtCode);
 481     if (!rs.is_reserved()) {
 482       log_warning(scc, init)("Failed to reserved %u bytes of memory for mapping cached code region in AOT Cache", (uint)CDSAccess::get_cached_code_size());
 483       set_failed();
 484       return;
 485     }
 486     if (!CDSAccess::map_cached_code(rs)) {
 487       log_warning(scc, init)("Failed to read/mmap cached code region in AOT Cache");
 488       set_failed();
 489       return;
 490     }
 491     _cached_code_directory = (CachedCodeDirectory*)rs.base();
 492     _cached_code_directory->runtime_init_internal();
 493 
 494     _load_size = _cached_code_directory->_aot_code_size;
 495     _load_buffer = _cached_code_directory->_aot_code_data;
 496     assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
 497     log_info(scc, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
 498 
 499     _load_header = (SCCHeader*)addr(0);
 500     if (!_load_header->verify_config(_load_size)) {
 501       set_failed();
 502       return;
 503     }
 504     log_info(scc, init)("Read header from AOT Code Cache");
 505     if (_load_header->has_meta_ptrs()) {
 506       assert(UseSharedSpaces, "should be verified already");
 507       _use_meta_ptrs = true; // Regardless UseMetadataPointers
 508       UseMetadataPointers = true;
 509     }
 510     // Read strings
 511     load_strings();
 512   }
 513   if (_for_write) {
 514     _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0);
 515 
 516     _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
 517     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 518     // Entries allocated at the end of buffer in reverse (as on stack).
 519     _store_entries = (SCCEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
 520     log_info(scc, init)("Allocated store buffer at address " INTPTR_FORMAT " of size " UINT32_FORMAT " bytes", p2i(_store_buffer), max_aot_code_size());
 521   }
 522   _table = new SCAddressTable();
 523 }
 524 
 525 void SCCache::init_extrs_table() {
 526   SCAddressTable* table = addr_table();
 527   if (table != nullptr) {
 528     table->init_extrs();
 529   }
 530 }
 531 void SCCache::init_early_stubs_table() {
 532   SCAddressTable* table = addr_table();
 533   if (table != nullptr) {
 534     table->init_early_stubs();
 535   }
 536 }
 537 void SCCache::init_shared_blobs_table() {
 538   SCAddressTable* table = addr_table();
 539   if (table != nullptr) {
 540     table->init_shared_blobs();
 541   }
 542 }
 543 void SCCache::init_stubs_table() {
 544   SCAddressTable* table = addr_table();
 545   if (table != nullptr) {
 546     table->init_stubs();
 547   }
 548 }
 549 
 550 void SCCache::init_opto_table() {
 551   SCAddressTable* table = addr_table();
 552   if (table != nullptr) {
 553     table->init_opto();
 554   }
 555 }
 556 
 557 void SCCache::init_c1_table() {
 558   SCAddressTable* table = addr_table();
 559   if (table != nullptr) {
 560     table->init_c1();
 561   }
 562 }
 563 
 564 void SCConfig::record(bool use_meta_ptrs) {
 565   _flags = 0;
 566   if (use_meta_ptrs) {
 567     _flags |= metadataPointers;
 568   }
 569 #ifdef ASSERT
 570   _flags |= debugVM;
 571 #endif
 572   if (UseCompressedOops) {
 573     _flags |= compressedOops;
 574   }
 575   if (UseCompressedClassPointers) {
 576     _flags |= compressedClassPointers;
 577   }
 578   if (UseTLAB) {
 579     _flags |= useTLAB;
 580   }
 581   if (JavaAssertions::systemClassDefault()) {
 582     _flags |= systemClassAssertions;
 583   }
 584   if (JavaAssertions::userClassDefault()) {
 585     _flags |= userClassAssertions;
 586   }
 587   if (EnableContended) {
 588     _flags |= enableContendedPadding;
 589   }
 590   if (RestrictContended) {
 591     _flags |= restrictContendedPadding;
 592   }
 593   _compressedOopShift    = CompressedOops::shift();
 594   _compressedKlassShift  = CompressedKlassPointers::shift();
 595   _contendedPaddingWidth = ContendedPaddingWidth;
 596   _objectAlignment       = ObjectAlignmentInBytes;
 597   _gc                    = (uint)Universe::heap()->kind();
 598 }
 599 
 600 bool SCConfig::verify() const {
 601 #ifdef ASSERT
 602   if ((_flags & debugVM) == 0) {
 603     log_warning(scc, init)("Disable AOT Code: it was created by product VM, it can't be used by debug VM");
 604     return false;
 605   }
 606 #else
 607   if ((_flags & debugVM) != 0) {
 608     log_warning(scc, init)("Disable AOT Code: it was created by debug VM, it can't be used by product VM");
 609     return false;
 610   }
 611 #endif
 612 
 613   CollectedHeap::Name scc_gc = (CollectedHeap::Name)_gc;
 614   if (scc_gc != Universe::heap()->kind()) {
 615     log_warning(scc, init)("Disable AOT Code: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(scc_gc), GCConfig::hs_err_name());
 616     return false;
 617   }
 618 
 619   if (((_flags & compressedOops) != 0) != UseCompressedOops) {
 620     log_warning(scc, init)("Disable AOT Code: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
 621     return false;
 622   }
 623   if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
 624     log_warning(scc, init)("Disable AOT Code: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
 625     return false;
 626   }
 627 
 628   if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
 629     log_warning(scc, init)("Disable AOT Code: it was created with JavaAssertions::systemClassDefault() = %s", JavaAssertions::systemClassDefault() ? "disabled" : "enabled");
 630     return false;
 631   }
 632   if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
 633     log_warning(scc, init)("Disable AOT Code: it was created with JavaAssertions::userClassDefault() = %s", JavaAssertions::userClassDefault() ? "disabled" : "enabled");
 634     return false;
 635   }
 636 
 637   if (((_flags & enableContendedPadding) != 0) != EnableContended) {
 638     log_warning(scc, init)("Disable AOT Code: it was created with EnableContended = %s", EnableContended ? "false" : "true");
 639     return false;
 640   }
 641   if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
 642     log_warning(scc, init)("Disable AOT Code: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
 643     return false;
 644   }
 645   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 646     log_warning(scc, init)("Disable AOT Code: it was created with CompressedOops::shift() = %d vs current %d", _compressedOopShift, CompressedOops::shift());
 647     return false;
 648   }
 649   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 650     log_warning(scc, init)("Disable AOT Code: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
 651     return false;
 652   }
 653   if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
 654     log_warning(scc, init)("Disable AOT Code: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
 655     return false;
 656   }
 657   if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
 658     log_warning(scc, init)("Disable AOT Code: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
 659     return false;
 660   }
 661   return true;
 662 }
 663 
 664 bool SCCHeader::verify_config(uint load_size) const {
 665   if (_version != SCC_VERSION) {
 666     log_warning(scc, init)("Disable AOT Code: different SCC version %d vs %d recorded in AOT Cache", SCC_VERSION, _version);
 667     return false;
 668   }
 669   if (_cache_size != load_size) {
 670     log_warning(scc, init)("Disable AOT Code: different cached code size %d vs %d recorded in AOT Cache", load_size, _cache_size);
 671     return false;
 672   }
 673   return true;
 674 }
 675 
 676 volatile int SCCache::_nmethod_readers = 0;
 677 
 678 SCCache::~SCCache() {
 679   if (_closing) {
 680     return; // Already closed
 681   }
 682   // Stop any further access to cache.
 683   // Checked on entry to load_nmethod() and store_nmethod().
 684   _closing = true;
 685   if (_for_read) {
 686     // Wait for all load_nmethod() finish.
 687     wait_for_no_nmethod_readers();
 688   }
 689   // Prevent writing code into cache while we are closing it.
 690   // This lock held by ciEnv::register_method() which calls store_nmethod().
 691   MutexLocker ml(Compile_lock);
 692   if (for_write()) { // Finalize cache
 693     finish_write();
 694   }
 695   _load_buffer = nullptr;
 696   if (_C_store_buffer != nullptr) {
 697     FREE_C_HEAP_ARRAY(char, _C_store_buffer);
 698     _C_store_buffer = nullptr;
 699     _store_buffer = nullptr;
 700   }
 701   if (_table != nullptr) {
 702     delete _table;
 703     _table = nullptr;
 704   }
 705 }
 706 
 707 SCCache* SCCache::open_for_read() {
 708   if (SCCache::is_on_for_read()) {
 709     return SCCache::cache();
 710   }
 711   return nullptr;
 712 }
 713 
 714 SCCache* SCCache::open_for_write() {
 715   if (SCCache::is_on_for_write()) {
 716     SCCache* cache = SCCache::cache();
 717     cache->clear_lookup_failed(); // Reset bit
 718     return cache;
 719   }
 720   return nullptr;
 721 }
 722 
 723 void copy_bytes(const char* from, address to, uint size) {
 724   assert(size > 0, "sanity");
 725   bool by_words = true;
 726   if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
 727     // Use wordwise copies if possible:
 728     Copy::disjoint_words((HeapWord*)from,
 729                          (HeapWord*)to,
 730                          ((size_t)size + HeapWordSize-1) / HeapWordSize);
 731   } else {
 732     by_words = false;
 733     Copy::conjoint_jbytes(from, to, (size_t)size);
 734   }
 735   log_trace(scc)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
 736 }
 737 
 738 void SCCReader::set_read_position(uint pos) {
 739   if (pos == _read_position) {
 740     return;
 741   }
 742   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 743   _read_position = pos;
 744 }
 745 
 746 bool SCCache::set_write_position(uint pos) {
 747   if (pos == _write_position) {
 748     return true;
 749   }
 750   if (_store_size < _write_position) {
 751     _store_size = _write_position; // Adjust during write
 752   }
 753   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 754   _write_position = pos;
 755   return true;
 756 }
 757 
 758 static char align_buffer[256] = { 0 };
 759 
 760 bool SCCache::align_write() {
 761   // We are not executing code from cache - we copy it by bytes first.
 762   // No need for big alignment (or at all).
 763   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 764   if (padding == DATA_ALIGNMENT) {
 765     return true;
 766   }
 767   uint n = write_bytes((const void*)&align_buffer, padding);
 768   if (n != padding) {
 769     return false;
 770   }
 771   log_trace(scc)("Adjust write alignment in AOT Code Cache");
 772   return true;
 773 }
 774 
 775 uint SCCache::write_bytes(const void* buffer, uint nbytes) {
 776   assert(for_write(), "Code Cache file is not created");
 777   if (nbytes == 0) {
 778     return 0;
 779   }
 780   uint new_position = _write_position + nbytes;
 781   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 782     log_warning(scc)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase CachedCodeMaxSize.",
 783                      nbytes, _write_position);
 784     set_failed();
 785     exit_vm_on_store_failure();
 786     return 0;
 787   }
 788   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 789   log_trace(scc)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
 790   _write_position += nbytes;
 791   if (_store_size < _write_position) {
 792     _store_size = _write_position;
 793   }
 794   return nbytes;
 795 }
 796 
 797 void SCCEntry::update_method_for_writing() {
 798   if (_method != nullptr) {
 799     _method = CDSAccess::method_in_cached_code(_method);
 800   }
 801 }
 802 
 803 void SCCEntry::print(outputStream* st) const {
 804   st->print_cr(" SCA entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s%s]",
 805                p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile,
 806                (_not_entrant? "not_entrant" : "entrant"),
 807                (_loaded ? ", loaded" : ""),
 808                (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
 809                (_for_preload ? ", for_preload" : ""),
 810                (_ignore_decompile ? ", ignore_decomp" : ""));
 811 }
 812 
 813 void* SCCEntry::operator new(size_t x, SCCache* cache) {
 814   return (void*)(cache->add_entry());
 815 }
 816 
 817 bool skip_preload(methodHandle mh) {
 818   if (!mh->method_holder()->is_loaded()) {
 819     return true;
 820   }
 821   DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
 822   if (directives->DontPreloadOption) {
 823     LogStreamHandle(Info, scc, init) log;
 824     if (log.is_enabled()) {
 825       log.print("Exclude preloading code for ");
 826       mh->print_value_on(&log);
 827     }
 828     return true;
 829   }
 830   return false;
 831 }
 832 
 833 void SCCache::preload_startup_code(TRAPS) {
 834   if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
 835     // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
 836     // level we want (that is CompLevel_full_optimization).
 837     return;
 838   }
 839   assert(_for_read, "sanity");
 840   uint count = _load_header->entries_count();
 841   if (_load_entries == nullptr) {
 842     // Read it
 843     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 844     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 845     log_info(scc, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
 846   }
 847   uint preload_entries_count = _load_header->preload_entries_count();
 848   if (preload_entries_count > 0) {
 849     uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
 850     log_info(scc, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
 851     uint count = MIN2(preload_entries_count, SCLoadStop);
 852     for (uint i = SCLoadStart; i < count; i++) {
 853       uint index = entries_index[i];
 854       SCCEntry* entry = &(_load_entries[index]);
 855       if (entry->not_entrant()) {
 856         continue;
 857       }
 858       methodHandle mh(THREAD, entry->method());
 859       assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
 860       if (skip_preload(mh)) {
 861         continue; // Exclude preloading for this method
 862       }
 863       assert(mh->method_holder()->is_loaded(), "");
 864       if (!mh->method_holder()->is_linked()) {
 865         assert(!HAS_PENDING_EXCEPTION, "");
 866         mh->method_holder()->link_class(THREAD);
 867         if (HAS_PENDING_EXCEPTION) {
 868           LogStreamHandle(Info, scc) log;
 869           if (log.is_enabled()) {
 870             ResourceMark rm;
 871             log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
 872             THREAD->pending_exception()->print_value_on(&log);
 873             if (log_is_enabled(Debug, scc)) {
 874               THREAD->pending_exception()->print_on(&log);
 875             }
 876           }
 877           CLEAR_PENDING_EXCEPTION;
 878         }
 879       }
 880       if (mh->scc_entry() != nullptr) {
 881         // Second C2 compilation of the same method could happen for
 882         // different reasons without marking first entry as not entrant.
 883         continue; // Keep old entry to avoid issues
 884       }
 885       mh->set_scc_entry(entry);
 886       CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0, false, CompileTask::Reason_Preload, CHECK);
 887     }
 888   }
 889 }
 890 
 891 static bool check_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp, SCCEntry* entry) {
 892   if (entry->kind() == kind) {
 893     assert(entry->id() == id, "sanity");
 894     if (kind != SCCEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
 895                                   (entry->comp_level() == comp_level) &&
 896                                   (entry->ignore_decompile() || entry->decompile() == decomp))) {
 897       return true; // Found
 898     }
 899   }
 900   return false;
 901 }
 902 
 903 SCCEntry* SCCache::find_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp) {
 904   assert(_for_read, "sanity");
 905   uint count = _load_header->entries_count();
 906   if (_load_entries == nullptr) {
 907     // Read it
 908     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 909     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 910     log_info(scc, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
 911   }
 912   // Binary search
 913   int l = 0;
 914   int h = count - 1;
 915   while (l <= h) {
 916     int mid = (l + h) >> 1;
 917     int ix = mid * 2;
 918     uint is = _search_entries[ix];
 919     if (is == id) {
 920       int index = _search_entries[ix + 1];
 921       SCCEntry* entry = &(_load_entries[index]);
 922       if (check_entry(kind, id, comp_level, decomp, entry)) {
 923         return entry; // Found
 924       }
 925       // Leaner search around (could be the same nmethod with different decompile count)
 926       for (int i = mid - 1; i >= l; i--) { // search back
 927         ix = i * 2;
 928         is = _search_entries[ix];
 929         if (is != id) {
 930           break;
 931         }
 932         index = _search_entries[ix + 1];
 933         SCCEntry* entry = &(_load_entries[index]);
 934         if (check_entry(kind, id, comp_level, decomp, entry)) {
 935           return entry; // Found
 936         }
 937       }
 938       for (int i = mid + 1; i <= h; i++) { // search forward
 939         ix = i * 2;
 940         is = _search_entries[ix];
 941         if (is != id) {
 942           break;
 943         }
 944         index = _search_entries[ix + 1];
 945         SCCEntry* entry = &(_load_entries[index]);
 946         if (check_entry(kind, id, comp_level, decomp, entry)) {
 947           return entry; // Found
 948         }
 949       }
 950       break; // Not found match (different decompile count or not_entrant state).
 951     } else if (is < id) {
 952       l = mid + 1;
 953     } else {
 954       h = mid - 1;
 955     }
 956   }
 957   return nullptr;
 958 }
 959 
 960 void SCCache::invalidate_entry(SCCEntry* entry) {
 961   assert(entry!= nullptr, "all entries should be read already");
 962   if (entry->not_entrant()) {
 963     return; // Someone invalidated it already
 964   }
 965 #ifdef ASSERT
 966   bool found = false;
 967   if (_for_read) {
 968     uint count = _load_header->entries_count();
 969     uint i = 0;
 970     for(; i < count; i++) {
 971       if (entry == &(_load_entries[i])) {
 972         break;
 973       }
 974     }
 975     found = (i < count);
 976   }
 977   if (!found && _for_write) {
 978     uint count = _store_entries_cnt;
 979     uint i = 0;
 980     for(; i < count; i++) {
 981       if (entry == &(_store_entries[i])) {
 982         break;
 983       }
 984     }
 985     found = (i < count);
 986   }
 987   assert(found, "entry should exist");
 988 #endif
 989   entry->set_not_entrant();
 990   {
 991     uint name_offset = entry->offset() + entry->name_offset();
 992     const char* name;
 993     if (SCCache::is_loaded(entry)) {
 994       name = _load_buffer + name_offset;
 995     } else {
 996       name = _store_buffer + name_offset;
 997     }
 998     uint level   = entry->comp_level();
 999     uint comp_id = entry->comp_id();
1000     uint decomp  = entry->decompile();
1001     bool clinit_brs = entry->has_clinit_barriers();
1002     log_info(scc, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)",
1003                            name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1004   }
1005   if (entry->next() != nullptr) {
1006     entry = entry->next();
1007     assert(entry->has_clinit_barriers(), "expecting only such entries here");
1008     invalidate_entry(entry);
1009   }
1010 }
1011 
1012 static int uint_cmp(const void *i, const void *j) {
1013   uint a = *(uint *)i;
1014   uint b = *(uint *)j;
1015   return a > b ? 1 : a < b ? -1 : 0;
1016 }
1017 
1018 AOTCodeStats AOTCodeStats::add_cached_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
1019   AOTCodeStats result;
1020   for (int kind = SCCEntry::None; kind < SCCEntry::Kind_count; kind++) {
1021     result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
1022   }
1023 
1024   for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
1025     result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
1026   }
1027   result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
1028   return result;
1029 }
1030 
1031 void SCCache::log_stats_on_exit() {
1032   LogStreamHandle(Info, scc, exit) log;
1033   if (log.is_enabled()) {
1034     AOTCodeStats prev_stats;
1035     AOTCodeStats current_stats;
1036     AOTCodeStats total_stats;
1037     uint max_size = 0;
1038 
1039     uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1040 
1041     for (uint i = 0; i < load_count; i++) {
1042       prev_stats.collect_entry_stats(&_load_entries[i]);
1043       if (max_size < _load_entries[i].size()) {
1044         max_size = _load_entries[i].size();
1045       }
1046     }
1047     for (uint i = 0; i < _store_entries_cnt; i++) {
1048       current_stats.collect_entry_stats(&_store_entries[i]);
1049       if (max_size < _store_entries[i].size()) {
1050         max_size = _store_entries[i].size();
1051       }
1052     }
1053     total_stats = AOTCodeStats::add_cached_code_stats(prev_stats, current_stats);
1054 
1055     log.print_cr("Wrote %d SCCEntry entries(%u max size) to AOT Code Cache",
1056                  total_stats.total_count(), max_size);
1057     for (uint kind = SCCEntry::None; kind < SCCEntry::Kind_count; kind++) {
1058       if (total_stats.entry_count(kind) > 0) {
1059         log.print_cr("  %s: total=%u(old=%u+new=%u)",
1060                      sccentry_kind_name[kind], total_stats.entry_count(kind), prev_stats.entry_count(kind), current_stats.entry_count(kind));
1061         if (kind == SCCEntry::Code) {
1062           for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
1063             if (total_stats.nmethod_count(lvl) > 0) {
1064               log.print_cr("    Tier %d: total=%u(old=%u+new=%u)",
1065                            lvl, total_stats.nmethod_count(lvl), prev_stats.nmethod_count(lvl), current_stats.nmethod_count(lvl));
1066             }
1067           }
1068         }
1069       }
1070     }
1071     log.print_cr("Total=%u(old=%u+new=%u)", total_stats.total_count(), prev_stats.total_count(), current_stats.total_count());
1072   }
1073 }
1074 
1075 bool SCCache::finish_write() {
1076   if (!align_write()) {
1077     return false;
1078   }
1079   uint strings_offset = _write_position;
1080   int strings_count = store_strings();
1081   if (strings_count < 0) {
1082     return false;
1083   }
1084   if (!align_write()) {
1085     return false;
1086   }
1087   uint strings_size = _write_position - strings_offset;
1088 
1089   uint entries_count = 0; // Number of entrant (useful) code entries
1090   uint entries_offset = _write_position;
1091 
1092   uint store_count = _store_entries_cnt;
1093   if (store_count > 0) {
1094     _cached_code_directory = CachedCodeDirectory::create();
1095     assert(_cached_code_directory != nullptr, "Sanity check");
1096 
1097     uint header_size = (uint)align_up(sizeof(SCCHeader),  DATA_ALIGNMENT);
1098     uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1099     uint code_count = store_count + load_count;
1100     uint search_count = code_count * 2;
1101     uint search_size = search_count * sizeof(uint);
1102     uint entries_size = (uint)align_up(code_count * sizeof(SCCEntry), DATA_ALIGNMENT); // In bytes
1103     uint preload_entries_cnt = 0;
1104     uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1105     uint preload_entries_size = code_count * sizeof(uint);
1106     // _write_position should include code and strings
1107     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1108     uint total_size = _write_position + _load_size + header_size +
1109                      code_alignment + search_size + preload_entries_size + entries_size;
1110 
1111     assert(total_size < max_aot_code_size(), "Cached code region size (" UINT32_FORMAT " bytes) in AOT Code Cache is less than the required size (" UINT32_FORMAT " bytes).",
1112            total_size, max_aot_code_size());
1113 
1114     // Create ordered search table for entries [id, index];
1115     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1116 
1117     char* buffer = (char *)CDSAccess::allocate_from_code_cache(total_size + DATA_ALIGNMENT); // NEW_C_HEAP_ARRAY(char, total_size + DATA_ALIGNMENT, mtCode);
1118     char* start = align_up(buffer, DATA_ALIGNMENT);
1119     char* current = start + header_size; // Skip header
1120 
1121     SCCEntry* entries_address = _store_entries; // Pointer to latest entry
1122 
1123     // Add old entries first
1124     if (_for_read && (_load_header != nullptr)) {
1125       for(uint i = 0; i < load_count; i++) {
1126         if (_load_entries[i].load_fail()) {
1127           continue;
1128         }
1129         if (_load_entries[i].not_entrant()) {
1130           log_info(scc, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id());
1131           if (_load_entries[i].for_preload()) {
1132             // Skip not entrant preload code:
1133             // we can't pre-load code which may have failing dependencies.
1134             continue;
1135           }
1136           _load_entries[i].set_entrant(); // Reset
1137         } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) {
1138           // record entrant first version code for pre-loading
1139           preload_entries[preload_entries_cnt++] = entries_count;
1140         }
1141         {
1142           uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT);
1143           copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size);
1144           _load_entries[i].set_offset(current - start); // New offset
1145           current += size;
1146           uint n = write_bytes(&(_load_entries[i]), sizeof(SCCEntry));
1147           if (n != sizeof(SCCEntry)) {
1148             FREE_C_HEAP_ARRAY(char, buffer);
1149             FREE_C_HEAP_ARRAY(uint, search);
1150             return false;
1151           }
1152           search[entries_count*2 + 0] = _load_entries[i].id();
1153           search[entries_count*2 + 1] = entries_count;
1154           entries_count++;
1155         }
1156       }
1157     }
1158     // SCCEntry entries were allocated in reverse in store buffer.
1159     // Process them in reverse order to cache first code first.
1160     for (int i = store_count - 1; i >= 0; i--) {
1161       if (entries_address[i].load_fail()) {
1162         continue;
1163       }
1164       if (entries_address[i].not_entrant()) {
1165         log_info(scc, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : ""));
1166         if (entries_address[i].for_preload()) {
1167           // Skip not entrant preload code:
1168           // we can't pre-load code which may have failing dependencies.
1169           continue;
1170         }
1171         entries_address[i].set_entrant(); // Reset
1172       } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) {
1173         // record entrant first version code for pre-loading
1174         preload_entries[preload_entries_cnt++] = entries_count;
1175       }
1176       {
1177         entries_address[i].set_next(nullptr); // clear pointers before storing data
1178         uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1179         copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1180         entries_address[i].set_offset(current - start); // New offset
1181         entries_address[i].update_method_for_writing();
1182         current += size;
1183         uint n = write_bytes(&(entries_address[i]), sizeof(SCCEntry));
1184         if (n != sizeof(SCCEntry)) {
1185           FREE_C_HEAP_ARRAY(char, buffer);
1186           FREE_C_HEAP_ARRAY(uint, search);
1187           return false;
1188         }
1189         search[entries_count*2 + 0] = entries_address[i].id();
1190         search[entries_count*2 + 1] = entries_count;
1191         entries_count++;
1192       }
1193     }
1194 
1195     if (entries_count == 0) {
1196       log_info(scc, exit)("No entires written to AOT Code Cache");
1197       FREE_C_HEAP_ARRAY(char, buffer);
1198       FREE_C_HEAP_ARRAY(uint, search);
1199       return true; // Nothing to write
1200     }
1201     assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1202     // Write strings
1203     if (strings_count > 0) {
1204       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1205       strings_offset = (current - start); // New offset
1206       current += strings_size;
1207     }
1208     uint preload_entries_offset = (current - start);
1209     preload_entries_size = preload_entries_cnt * sizeof(uint);
1210     if (preload_entries_size > 0) {
1211       copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1212       current += preload_entries_size;
1213       log_info(scc, exit)("Wrote %d preload entries to AOT Code Cache", preload_entries_cnt);
1214     }
1215     if (preload_entries != nullptr) {
1216       FREE_C_HEAP_ARRAY(uint, preload_entries);
1217     }
1218 
1219     uint new_entries_offset = (current - start); // New offset
1220     // Sort and store search table
1221     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1222     search_size = 2 * entries_count * sizeof(uint);
1223     copy_bytes((const char*)search, (address)current, search_size);
1224     FREE_C_HEAP_ARRAY(uint, search);
1225     current += search_size;
1226 
1227     // Write entries
1228     entries_size = entries_count * sizeof(SCCEntry); // New size
1229     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1230     current += entries_size;
1231 
1232     log_stats_on_exit();
1233 
1234     uint size = (current - start);
1235     assert(size <= total_size, "%d > %d", size , total_size);
1236 
1237     // Finalize header
1238     SCCHeader* header = (SCCHeader*)start;
1239     header->init(size,
1240                  (uint)strings_count, strings_offset,
1241                  entries_count, new_entries_offset,
1242                  preload_entries_cnt, preload_entries_offset,
1243                  _use_meta_ptrs);
1244     log_info(scc, init)("Wrote SCCache header to AOT Code Cache");
1245     log_info(scc, exit)("Wrote %d bytes of data to AOT Code Cache", size);
1246 
1247     _cached_code_directory->set_aot_code_data(size, start);
1248   }
1249   return true;
1250 }
1251 
1252 bool SCCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1253   assert(start == cgen->assembler()->pc(), "wrong buffer");
1254   SCCache* cache = open_for_read();
1255   if (cache == nullptr) {
1256     return false;
1257   }
1258   SCCEntry* entry = cache->find_entry(SCCEntry::Stub, (uint)id);
1259   if (entry == nullptr) {
1260     return false;
1261   }
1262   uint entry_position = entry->offset();
1263   // Read name
1264   uint name_offset = entry->name_offset() + entry_position;
1265   uint name_size   = entry->name_size(); // Includes '/0'
1266   const char* saved_name = cache->addr(name_offset);
1267   if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1268     log_warning(scc)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1269     cache->set_failed();
1270     exit_vm_on_load_failure();
1271     return false;
1272   }
1273   log_info(scc,stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1274   // Read code
1275   uint code_offset = entry->code_offset() + entry_position;
1276   uint code_size   = entry->code_size();
1277   copy_bytes(cache->addr(code_offset), start, code_size);
1278   cgen->assembler()->code_section()->set_end(start + code_size);
1279   log_info(scc,stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1280   return true;
1281 }
1282 
1283 bool SCCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1284   SCCache* cache = open_for_write();
1285   if (cache == nullptr) {
1286     return false;
1287   }
1288   log_info(scc, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1289   if (!cache->align_write()) {
1290     return false;
1291   }
1292 #ifdef ASSERT
1293   CodeSection* cs = cgen->assembler()->code_section();
1294   if (cs->has_locs()) {
1295     uint reloc_count = cs->locs_count();
1296     tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1297     // Collect additional data
1298     RelocIterator iter(cs);
1299     while (iter.next()) {
1300       switch (iter.type()) {
1301         case relocInfo::none:
1302           break;
1303         default: {
1304           iter.print_current_on(tty);
1305           fatal("stub's relocation %d unimplemented", (int)iter.type());
1306           break;
1307         }
1308       }
1309     }
1310   }
1311 #endif
1312   uint entry_position = cache->_write_position;
1313 
1314   // Write code
1315   uint code_offset = 0;
1316   uint code_size = cgen->assembler()->pc() - start;
1317   uint n = cache->write_bytes(start, code_size);
1318   if (n != code_size) {
1319     return false;
1320   }
1321   // Write name
1322   uint name_offset = cache->_write_position - entry_position;
1323   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1324   n = cache->write_bytes(name, name_size);
1325   if (n != name_size) {
1326     return false;
1327   }
1328   uint entry_size = cache->_write_position - entry_position;
1329   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
1330                                           code_offset, code_size, 0, 0,
1331                                           SCCEntry::Stub, (uint32_t)id);
1332   log_info(scc, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1333   return true;
1334 }
1335 
1336 Klass* SCCReader::read_klass(const methodHandle& comp_method, bool shared) {
1337   uint code_offset = read_position();
1338   uint state = *(uint*)addr(code_offset);
1339   uint init_state = (state  & 1);
1340   uint array_dim  = (state >> 1);
1341   code_offset += sizeof(int);
1342   if (_cache->use_meta_ptrs() && shared) {
1343     uint klass_offset = *(uint*)addr(code_offset);
1344     code_offset += sizeof(uint);
1345     set_read_position(code_offset);
1346     Klass* k = (Klass*)((address)SharedBaseAddress + klass_offset);
1347     if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1348       // Something changed in CDS
1349       set_lookup_failed();
1350       log_info(scc)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
1351       return nullptr;
1352     }
1353     assert(k->is_klass(), "sanity");
1354     ResourceMark rm;
1355     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
1356       set_lookup_failed();
1357       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
1358                        compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
1359       return nullptr;
1360     } else
1361     // Allow not initialized klass which was uninitialized during code caching or for preload
1362     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
1363       set_lookup_failed();
1364       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
1365                        compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
1366       return nullptr;
1367     }
1368     if (array_dim > 0) {
1369       assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
1370       Klass* ak = k->array_klass_or_null(array_dim);
1371       // FIXME: what would it take to create an array class on the fly?
1372 //      Klass* ak = k->array_klass(dim, JavaThread::current());
1373 //      guarantee(JavaThread::current()->pending_exception() == nullptr, "");
1374       if (ak == nullptr) {
1375         set_lookup_failed();
1376         log_info(scc)("%d (L%d): %d-dimension array klass lookup failed: %s",
1377                          compile_id(), comp_level(), array_dim, k->external_name());
1378       }
1379       log_info(scc)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
1380       return ak;
1381     } else {
1382       log_info(scc)("%d (L%d): Shared klass lookup: %s",
1383                     compile_id(), comp_level(), k->external_name());
1384       return k;
1385     }
1386   }
1387   int name_length = *(int*)addr(code_offset);
1388   code_offset += sizeof(int);
1389   const char* dest = addr(code_offset);
1390   code_offset += name_length + 1;
1391   set_read_position(code_offset);
1392   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length);
1393   if (klass_sym == nullptr) {
1394     set_lookup_failed();
1395     log_info(scc)("%d (L%d): Probe failed for class %s",
1396                      compile_id(), comp_level(), &(dest[0]));
1397     return nullptr;
1398   }
1399   // Use class loader of compiled method.
1400   Thread* thread = Thread::current();
1401   Handle loader(thread, comp_method->method_holder()->class_loader());
1402   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
1403   assert(!thread->has_pending_exception(), "should not throw");
1404   if (k == nullptr && !loader.is_null()) {
1405     // Try default loader and domain
1406     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
1407     assert(!thread->has_pending_exception(), "should not throw");
1408   }
1409   if (k != nullptr) {
1410     // Allow not initialized klass which was uninitialized during code caching
1411     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) {
1412       set_lookup_failed();
1413       log_info(scc)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0]));
1414       return nullptr;
1415     }
1416     log_info(scc)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name());
1417   } else {
1418     set_lookup_failed();
1419     log_info(scc)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0]));
1420     return nullptr;
1421   }
1422   return k;
1423 }
1424 
1425 Method* SCCReader::read_method(const methodHandle& comp_method, bool shared) {
1426   uint code_offset = read_position();
1427   if (_cache->use_meta_ptrs() && shared) {
1428     uint method_offset = *(uint*)addr(code_offset);
1429     code_offset += sizeof(uint);
1430     set_read_position(code_offset);
1431     Method* m = (Method*)((address)SharedBaseAddress + method_offset);
1432     if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
1433       // Something changed in CDS
1434       set_lookup_failed();
1435       log_info(scc)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
1436       return nullptr;
1437     }
1438     assert(m->is_method(), "sanity");
1439     ResourceMark rm;
1440     Klass* k = m->method_holder();
1441     if (!k->is_instance_klass()) {
1442       set_lookup_failed();
1443       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
1444                     compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
1445       return nullptr;
1446     } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1447       set_lookup_failed();
1448       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
1449                     compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
1450       return nullptr;
1451     } else if (!InstanceKlass::cast(k)->is_loaded()) {
1452       set_lookup_failed();
1453       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
1454                     compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
1455       return nullptr;
1456     } else if (!InstanceKlass::cast(k)->is_linked()) {
1457       set_lookup_failed();
1458       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
1459                     compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
1460       return nullptr;
1461     }
1462     log_info(scc)("%d (L%d): Shared method lookup: %s",
1463                   compile_id(), comp_level(), m->name_and_sig_as_C_string());
1464     return m;
1465   }
1466   int holder_length = *(int*)addr(code_offset);
1467   code_offset += sizeof(int);
1468   int name_length = *(int*)addr(code_offset);
1469   code_offset += sizeof(int);
1470   int signat_length = *(int*)addr(code_offset);
1471   code_offset += sizeof(int);
1472 
1473   const char* dest = addr(code_offset);
1474   code_offset += holder_length + 1 + name_length + 1 + signat_length + 1;
1475   set_read_position(code_offset);
1476   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length);
1477   if (klass_sym == nullptr) {
1478     set_lookup_failed();
1479     log_info(scc)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0]));
1480     return nullptr;
1481   }
1482   // Use class loader of compiled method.
1483   Thread* thread = Thread::current();
1484   Handle loader(thread, comp_method->method_holder()->class_loader());
1485   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
1486   assert(!thread->has_pending_exception(), "should not throw");
1487   if (k == nullptr && !loader.is_null()) {
1488     // Try default loader and domain
1489     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
1490     assert(!thread->has_pending_exception(), "should not throw");
1491   }
1492   if (k != nullptr) {
1493     if (!k->is_instance_klass()) {
1494       set_lookup_failed();
1495       log_info(scc)("%d (L%d): Lookup failed for holder %s: not instance klass",
1496                        compile_id(), comp_level(), &(dest[0]));
1497       return nullptr;
1498     } else if (!InstanceKlass::cast(k)->is_linked()) {
1499       set_lookup_failed();
1500       log_info(scc)("%d (L%d): Lookup failed for holder %s: not linked",
1501                        compile_id(), comp_level(), &(dest[0]));
1502       return nullptr;
1503     }
1504     log_info(scc)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name());
1505   } else {
1506     set_lookup_failed();
1507     log_info(scc)("%d (L%d): Lookup failed for holder %s",
1508                   compile_id(), comp_level(), &(dest[0]));
1509     return nullptr;
1510   }
1511   TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length);
1512   int pos = holder_length + 1 + name_length + 1;
1513   TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length);
1514   if (name_sym == nullptr) {
1515     set_lookup_failed();
1516     log_info(scc)("%d (L%d): Probe failed for method name %s",
1517                      compile_id(), comp_level(), &(dest[holder_length + 1]));
1518     return nullptr;
1519   }
1520   if (sign_sym == nullptr) {
1521     set_lookup_failed();
1522     log_info(scc)("%d (L%d): Probe failed for method signature %s",
1523                      compile_id(), comp_level(), &(dest[pos]));
1524     return nullptr;
1525   }
1526   Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym);
1527   if (m != nullptr) {
1528     ResourceMark rm;
1529     log_info(scc)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1530   } else {
1531     set_lookup_failed();
1532     log_info(scc)("%d (L%d): Lookup failed for method %s::%s%s",
1533                      compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos]));
1534     return nullptr;
1535   }
1536   return m;
1537 }
1538 
1539 bool SCCache::write_klass(Klass* klass) {
1540   bool can_use_meta_ptrs = _use_meta_ptrs;
1541   uint array_dim = 0;
1542   if (klass->is_objArray_klass()) {
1543     array_dim = ObjArrayKlass::cast(klass)->dimension();
1544     klass     = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
1545   }
1546   uint init_state = 0;
1547   if (klass->is_instance_klass()) {
1548     InstanceKlass* ik = InstanceKlass::cast(klass);
1549     ClassLoaderData* cld = ik->class_loader_data();
1550     if (!cld->is_builtin_class_loader_data()) {
1551       set_lookup_failed();
1552       return false;
1553     }
1554     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1555       _for_preload = false;
1556       // Bailout if code has clinit barriers:
1557       // method will be recompiled without them in any case
1558       if (_has_clinit_barriers) {
1559         set_lookup_failed();
1560         return false;
1561       }
1562       can_use_meta_ptrs = false;
1563     }
1564     init_state = (ik->is_initialized() ? 1 : 0);
1565   }
1566   ResourceMark rm;
1567   uint state = (array_dim << 1) | (init_state & 1);
1568   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(klass)) {
1569     DataKind kind = DataKind::Klass_Shared;
1570     uint n = write_bytes(&kind, sizeof(int));
1571     if (n != sizeof(int)) {
1572       return false;
1573     }
1574     // Record state of instance klass initialization.
1575     n = write_bytes(&state, sizeof(int));
1576     if (n != sizeof(int)) {
1577       return false;
1578     }
1579     uint klass_offset = CDSAccess::delta_from_shared_address_base((address)klass);
1580     n = write_bytes(&klass_offset, sizeof(uint));
1581     if (n != sizeof(uint)) {
1582       return false;
1583     }
1584     log_info(scc)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(),
1585                   (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1586                   (array_dim > 0 ? " (object array)" : ""),
1587                   klass_offset);
1588     return true;
1589   }
1590   // Bailout if code has clinit barriers:
1591   // method will be recompiled without them in any case
1592   if (_for_preload && _has_clinit_barriers) {
1593     set_lookup_failed();
1594     return false;
1595   }
1596   _for_preload = false;
1597   log_info(scc,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name());
1598   if (klass->is_hidden()) { // Skip such nmethod
1599     set_lookup_failed();
1600     return false;
1601   }
1602   DataKind kind = DataKind::Klass;
1603   uint n = write_bytes(&kind, sizeof(int));
1604   if (n != sizeof(int)) {
1605     return false;
1606   }
1607   // Record state of instance klass initialization.
1608   n = write_bytes(&state, sizeof(int));
1609   if (n != sizeof(int)) {
1610     return false;
1611   }
1612   Symbol* name = klass->name();
1613   int name_length = name->utf8_length();
1614   int total_length = name_length + 1;
1615   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1616   name->as_C_string(dest, total_length);
1617   dest[total_length - 1] = '\0';
1618   LogTarget(Info, scc, loader) log;
1619   if (log.is_enabled()) {
1620     LogStream ls(log);
1621     oop loader = klass->class_loader();
1622     oop domain = klass->protection_domain();
1623     ls.print("Class %s loader: ", dest);
1624     if (loader == nullptr) {
1625       ls.print("nullptr");
1626     } else {
1627       loader->print_value_on(&ls);
1628     }
1629     ls.print(" domain: ");
1630     if (domain == nullptr) {
1631       ls.print("nullptr");
1632     } else {
1633       domain->print_value_on(&ls);
1634     }
1635     ls.cr();
1636   }
1637   n = write_bytes(&name_length, sizeof(int));
1638   if (n != sizeof(int)) {
1639     return false;
1640   }
1641   n = write_bytes(dest, total_length);
1642   if (n != (uint)total_length) {
1643     return false;
1644   }
1645   log_info(scc)("%d (L%d): Wrote klass: %s%s%s",
1646                 compile_id(), comp_level(),
1647                 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1648                 (array_dim > 0 ? " (object array)" : ""));
1649   return true;
1650 }
1651 
1652 bool SCCache::write_method(Method* method) {
1653   bool can_use_meta_ptrs = _use_meta_ptrs;
1654   Klass* klass = method->method_holder();
1655   if (klass->is_instance_klass()) {
1656     InstanceKlass* ik = InstanceKlass::cast(klass);
1657     ClassLoaderData* cld = ik->class_loader_data();
1658     if (!cld->is_builtin_class_loader_data()) {
1659       set_lookup_failed();
1660       return false;
1661     }
1662     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1663       _for_preload = false;
1664       // Bailout if code has clinit barriers:
1665       // method will be recompiled without them in any case
1666       if (_has_clinit_barriers) {
1667         set_lookup_failed();
1668         return false;
1669       }
1670       can_use_meta_ptrs = false;
1671     }
1672   }
1673   ResourceMark rm;
1674   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(method)) {
1675     DataKind kind = DataKind::Method_Shared;
1676     uint n = write_bytes(&kind, sizeof(int));
1677     if (n != sizeof(int)) {
1678       return false;
1679     }
1680     uint method_offset = CDSAccess::delta_from_shared_address_base((address)method);
1681     n = write_bytes(&method_offset, sizeof(uint));
1682     if (n != sizeof(uint)) {
1683       return false;
1684     }
1685     log_info(scc)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
1686     return true;
1687   }
1688   // Bailout if code has clinit barriers:
1689   // method will be recompiled without them in any case
1690   if (_for_preload && _has_clinit_barriers) {
1691     set_lookup_failed();
1692     return false;
1693   }
1694   _for_preload = false;
1695   log_info(scc,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string());
1696   if (method->is_hidden()) { // Skip such nmethod
1697     set_lookup_failed();
1698     return false;
1699   }
1700   DataKind kind = DataKind::Method;
1701   uint n = write_bytes(&kind, sizeof(int));
1702   if (n != sizeof(int)) {
1703     return false;
1704   }
1705   Symbol* name   = method->name();
1706   Symbol* holder = method->klass_name();
1707   Symbol* signat = method->signature();
1708   int name_length   = name->utf8_length();
1709   int holder_length = holder->utf8_length();
1710   int signat_length = signat->utf8_length();
1711 
1712   // Write sizes and strings
1713   int total_length = holder_length + 1 + name_length + 1 + signat_length + 1;
1714   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1715   holder->as_C_string(dest, total_length);
1716   dest[holder_length] = '\0';
1717   int pos = holder_length + 1;
1718   name->as_C_string(&(dest[pos]), (total_length - pos));
1719   pos += name_length;
1720   dest[pos++] = '\0';
1721   signat->as_C_string(&(dest[pos]), (total_length - pos));
1722   dest[total_length - 1] = '\0';
1723 
1724   LogTarget(Info, scc, loader) log;
1725   if (log.is_enabled()) {
1726     LogStream ls(log);
1727     oop loader = klass->class_loader();
1728     oop domain = klass->protection_domain();
1729     ls.print("Holder %s loader: ", dest);
1730     if (loader == nullptr) {
1731       ls.print("nullptr");
1732     } else {
1733       loader->print_value_on(&ls);
1734     }
1735     ls.print(" domain: ");
1736     if (domain == nullptr) {
1737       ls.print("nullptr");
1738     } else {
1739       domain->print_value_on(&ls);
1740     }
1741     ls.cr();
1742   }
1743 
1744   n = write_bytes(&holder_length, sizeof(int));
1745   if (n != sizeof(int)) {
1746     return false;
1747   }
1748   n = write_bytes(&name_length, sizeof(int));
1749   if (n != sizeof(int)) {
1750     return false;
1751   }
1752   n = write_bytes(&signat_length, sizeof(int));
1753   if (n != sizeof(int)) {
1754     return false;
1755   }
1756   n = write_bytes(dest, total_length);
1757   if (n != (uint)total_length) {
1758     return false;
1759   }
1760   dest[holder_length] = ' ';
1761   dest[holder_length + 1 + name_length] = ' ';
1762   log_info(scc)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest);
1763   return true;
1764 }
1765 
1766 // Repair the pc relative information in the code after load
1767 bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer,
1768                                  OopRecorder* oop_recorder, ciMethod* target) {
1769   bool success = true;
1770   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1771     uint code_offset = read_position();
1772     int reloc_count = *(int*)addr(code_offset);
1773     code_offset += sizeof(int);
1774     if (reloc_count == 0) {
1775       set_read_position(code_offset);
1776       continue;
1777     }
1778     // Read _locs_point (as offset from start)
1779     int locs_point_off = *(int*)addr(code_offset);
1780     code_offset += sizeof(int);
1781     uint reloc_size = reloc_count * sizeof(relocInfo);
1782     CodeSection* cs  = buffer->code_section(i);
1783     if (cs->locs_capacity() < reloc_count) {
1784       cs->expand_locs(reloc_count);
1785     }
1786     relocInfo* reloc_start = cs->locs_start();
1787     copy_bytes(addr(code_offset), (address)reloc_start, reloc_size);
1788     code_offset += reloc_size;
1789     cs->set_locs_end(reloc_start + reloc_count);
1790     cs->set_locs_point(cs->start() + locs_point_off);
1791 
1792     // Read additional relocation data: uint per relocation
1793     uint  data_size  = reloc_count * sizeof(uint);
1794     uint* reloc_data = (uint*)addr(code_offset);
1795     code_offset += data_size;
1796     set_read_position(code_offset);
1797     LogStreamHandle(Info, scc, reloc) log;
1798     if (log.is_enabled()) {
1799       log.print_cr("======== read code section %d relocations [%d]:", i, reloc_count);
1800     }
1801     RelocIterator iter(cs);
1802     int j = 0;
1803     while (iter.next()) {
1804       switch (iter.type()) {
1805         case relocInfo::none:
1806           break;
1807         case relocInfo::oop_type: {
1808           VM_ENTRY_MARK;
1809           oop_Relocation* r = (oop_Relocation*)iter.reloc();
1810           if (r->oop_is_immediate()) {
1811             assert(reloc_data[j] == (uint)j, "should be");
1812             methodHandle comp_method(THREAD, target->get_Method());
1813             jobject jo = read_oop(THREAD, comp_method);
1814             if (lookup_failed()) {
1815               success = false;
1816               break;
1817             }
1818             r->set_value((address)jo);
1819           } else if (false) {
1820             // Get already updated value from OopRecorder.
1821             assert(oop_recorder != nullptr, "sanity");
1822             int index = r->oop_index();
1823             jobject jo = oop_recorder->oop_at(index);
1824             oop obj = JNIHandles::resolve(jo);
1825             r->set_value(*reinterpret_cast<address*>(&obj));
1826           }
1827           break;
1828         }
1829         case relocInfo::metadata_type: {
1830           VM_ENTRY_MARK;
1831           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
1832           Metadata* m;
1833           if (r->metadata_is_immediate()) {
1834             assert(reloc_data[j] == (uint)j, "should be");
1835             methodHandle comp_method(THREAD, target->get_Method());
1836             m = read_metadata(comp_method);
1837             if (lookup_failed()) {
1838               success = false;
1839               break;
1840             }
1841           } else {
1842             // Get already updated value from OopRecorder.
1843             assert(oop_recorder != nullptr, "sanity");
1844             int index = r->metadata_index();
1845             m = oop_recorder->metadata_at(index);
1846           }
1847           r->set_value((address)m);
1848           break;
1849         }
1850         case relocInfo::virtual_call_type:   // Fall through. They all call resolve_*_call blobs.
1851         case relocInfo::opt_virtual_call_type:
1852         case relocInfo::static_call_type: {
1853           address dest = _cache->address_for_id(reloc_data[j]);
1854           if (dest != (address)-1) {
1855             ((CallRelocation*)iter.reloc())->set_destination(dest);
1856           }
1857           break;
1858         }
1859         case relocInfo::trampoline_stub_type: {
1860           address dest = _cache->address_for_id(reloc_data[j]);
1861           if (dest != (address)-1) {
1862             ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
1863           }
1864           break;
1865         }
1866         case relocInfo::static_stub_type:
1867           break;
1868         case relocInfo::runtime_call_type: {
1869           address dest = _cache->address_for_id(reloc_data[j]);
1870           if (dest != (address)-1) {
1871             ((CallRelocation*)iter.reloc())->set_destination(dest);
1872           }
1873           break;
1874         }
1875         case relocInfo::runtime_call_w_cp_type:
1876           fatal("runtime_call_w_cp_type unimplemented");
1877           //address destination = iter.reloc()->value();
1878           break;
1879         case relocInfo::external_word_type: {
1880           address target = _cache->address_for_id(reloc_data[j]);
1881           // Add external address to global table
1882           int index = ExternalsRecorder::find_index(target);
1883           // Update index in relocation
1884           Relocation::add_jint(iter.data(), index);
1885           external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1886           assert(reloc->target() == target, "sanity");
1887           reloc->set_value(target); // Patch address in the code
1888           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1889           break;
1890         }
1891         case relocInfo::internal_word_type:
1892           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1893           break;
1894         case relocInfo::section_word_type:
1895           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1896           break;
1897         case relocInfo::poll_type:
1898           break;
1899         case relocInfo::poll_return_type:
1900           break;
1901         case relocInfo::post_call_nop_type:
1902           break;
1903         case relocInfo::entry_guard_type:
1904           break;
1905         default:
1906           fatal("relocation %d unimplemented", (int)iter.type());
1907           break;
1908       }
1909       if (success && log.is_enabled()) {
1910         iter.print_current_on(&log);
1911       }
1912       j++;
1913     }
1914     assert(j <= (int)reloc_count, "sanity");
1915   }
1916   return success;
1917 }
1918 
1919 bool SCCReader::read_code(CodeBuffer* buffer, CodeBuffer* orig_buffer, uint code_offset) {
1920   assert(code_offset == align_up(code_offset, DATA_ALIGNMENT), "%d not aligned to %d", code_offset, DATA_ALIGNMENT);
1921   SCCodeSection* scc_cs = (SCCodeSection*)addr(code_offset);
1922   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1923     CodeSection* cs = buffer->code_section(i);
1924     // Read original section size and address.
1925     uint orig_size = scc_cs[i]._size;
1926     log_debug(scc)("======== read code section %d [%d]:", i, orig_size);
1927     uint orig_size_align = align_up(orig_size, DATA_ALIGNMENT);
1928     if (i != (int)CodeBuffer::SECT_INSTS) {
1929       buffer->initialize_section_size(cs, orig_size_align);
1930     }
1931     if (orig_size_align > (uint)cs->capacity()) { // Will not fit
1932       log_info(scc)("%d (L%d): original code section %d size %d > current capacity %d",
1933                        compile_id(), comp_level(), i, orig_size, cs->capacity());
1934       return false;
1935     }
1936     if (orig_size == 0) {
1937       assert(cs->size() == 0, "should match");
1938       continue;  // skip trivial section
1939     }
1940     address orig_start = scc_cs[i]._origin_address;
1941 
1942     // Populate fake original buffer (no code allocation in CodeCache).
1943     // It is used for relocations to calculate sections addesses delta.
1944     CodeSection* orig_cs = orig_buffer->code_section(i);
1945     assert(!orig_cs->is_allocated(), "This %d section should not be set", i);
1946     orig_cs->initialize(orig_start, orig_size);
1947 
1948     // Load code to new buffer.
1949     address code_start = cs->start();
1950     copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align);
1951     cs->set_end(code_start + orig_size);
1952   }
1953 
1954   return true;
1955 }
1956 
1957 bool SCCache::load_adapter(CodeBuffer* buffer, uint32_t id, const char* name, uint32_t offsets[4]) {
1958 #ifdef ASSERT
1959   LogStreamHandle(Debug, scc, stubs) log;
1960   if (log.is_enabled()) {
1961     FlagSetting fs(PrintRelocations, true);
1962     buffer->print_on(&log);
1963   }
1964 #endif
1965   SCCache* cache = open_for_read();
1966   if (cache == nullptr) {
1967     return false;
1968   }
1969   log_info(scc, stubs)("Looking up adapter %s (0x%x) in AOT Code Cache", name, id);
1970   SCCEntry* entry = cache->find_entry(SCCEntry::Adapter, id);
1971   if (entry == nullptr) {
1972     return false;
1973   }
1974   SCCReader reader(cache, entry, nullptr);
1975   return reader.compile_adapter(buffer, name, offsets);
1976 }
1977 bool SCCReader::compile_adapter(CodeBuffer* buffer, const char* name, uint32_t offsets[4]) {
1978   uint entry_position = _entry->offset();
1979   // Read name
1980   uint name_offset = entry_position + _entry->name_offset();
1981   uint name_size = _entry->name_size(); // Includes '/0'
1982   const char* stored_name = addr(name_offset);
1983   log_info(scc, stubs)("%d (L%d): Reading adapter '%s' from AOT Code Cache",
1984                        compile_id(), comp_level(), name);
1985   if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1986     log_warning(scc)("%d (L%d): Saved adapter's name '%s' is different from '%s'",
1987                      compile_id(), comp_level(), stored_name, name);
1988     // n.b. this is not fatal -- we have just seen a hash id clash
1989     // so no need to call cache->set_failed()
1990     return false;
1991   }
1992   // Create fake original CodeBuffer
1993   CodeBuffer orig_buffer(name);
1994   // Read code
1995   uint code_offset = entry_position + _entry->code_offset();
1996   if (!read_code(buffer, &orig_buffer, code_offset)) {
1997     return false;
1998   }
1999   // Read relocations
2000   uint reloc_offset = entry_position + _entry->reloc_offset();
2001   set_read_position(reloc_offset);
2002   if (!read_relocations(buffer, &orig_buffer, nullptr, nullptr)) {
2003     return false;
2004   }
2005   uint offset = read_position();
2006   int offsets_count = *(int*)addr(offset);
2007   offset += sizeof(int);
2008   assert(offsets_count == 4, "wrong caller expectations");
2009   set_read_position(offset);
2010   for (int i = 0; i < offsets_count; i++) {
2011     uint32_t arg = *(uint32_t*)addr(offset);
2012     offset += sizeof(uint32_t);
2013     log_debug(scc, stubs)("%d (L%d): Reading adapter '%s'  offsets[%d] == 0x%x from AOT Code Cache",
2014                          compile_id(), comp_level(), stored_name, i, arg);
2015     offsets[i] = arg;
2016   }
2017   log_debug(scc, stubs)("%d (L%d): Read adapter '%s' with '%d' args from AOT Code Cache",
2018                        compile_id(), comp_level(), stored_name, offsets_count);
2019 #ifdef ASSERT
2020   LogStreamHandle(Debug, scc, stubs) log;
2021   if (log.is_enabled()) {
2022     FlagSetting fs(PrintRelocations, true);
2023     buffer->print_on(&log);
2024     buffer->decode();
2025   }
2026 #endif
2027   // mark entry as loaded
2028   ((SCCEntry *)_entry)->set_loaded();
2029   return true;
2030 }
2031 
2032 bool SCCache::load_exception_blob(CodeBuffer* buffer, int* pc_offset) {
2033 #ifdef ASSERT
2034   LogStreamHandle(Debug, scc, nmethod) log;
2035   if (log.is_enabled()) {
2036     FlagSetting fs(PrintRelocations, true);
2037     buffer->print_on(&log);
2038   }
2039 #endif
2040   SCCache* cache = open_for_read();
2041   if (cache == nullptr) {
2042     return false;
2043   }
2044   SCCEntry* entry = cache->find_entry(SCCEntry::Blob, 999);
2045   if (entry == nullptr) {
2046     return false;
2047   }
2048   SCCReader reader(cache, entry, nullptr);
2049   return reader.compile_blob(buffer, pc_offset);
2050 }
2051 
2052 bool SCCReader::compile_blob(CodeBuffer* buffer, int* pc_offset) {
2053   uint entry_position = _entry->offset();
2054 
2055   // Read pc_offset
2056   *pc_offset = *(int*)addr(entry_position);
2057 
2058   // Read name
2059   uint name_offset = entry_position + _entry->name_offset();
2060   uint name_size = _entry->name_size(); // Includes '/0'
2061   const char* name = addr(name_offset);
2062 
2063   log_info(scc, stubs)("%d (L%d): Reading blob '%s' with pc_offset %d from AOT Code Cache",
2064                        compile_id(), comp_level(), name, *pc_offset);
2065 
2066   if (strncmp(buffer->name(), name, (name_size - 1)) != 0) {
2067     log_warning(scc)("%d (L%d): Saved blob's name '%s' is different from '%s'",
2068                      compile_id(), comp_level(), name, buffer->name());
2069     ((SCCache*)_cache)->set_failed();
2070     exit_vm_on_load_failure();
2071     return false;
2072   }
2073 
2074   // Create fake original CodeBuffer
2075   CodeBuffer orig_buffer(name);
2076 
2077   // Read code
2078   uint code_offset = entry_position + _entry->code_offset();
2079   if (!read_code(buffer, &orig_buffer, code_offset)) {
2080     return false;
2081   }
2082 
2083   // Read relocations
2084   uint reloc_offset = entry_position + _entry->reloc_offset();
2085   set_read_position(reloc_offset);
2086   if (!read_relocations(buffer, &orig_buffer, nullptr, nullptr)) {
2087     return false;
2088   }
2089 
2090   log_info(scc, stubs)("%d (L%d): Read blob '%s' from AOT Code Cache",
2091                        compile_id(), comp_level(), name);
2092 #ifdef ASSERT
2093   LogStreamHandle(Debug, scc, nmethod) log;
2094   if (log.is_enabled()) {
2095     FlagSetting fs(PrintRelocations, true);
2096     buffer->print_on(&log);
2097     buffer->decode();
2098   }
2099 #endif
2100   return true;
2101 }
2102 
2103 bool SCCache::write_relocations(CodeBuffer* buffer, uint& all_reloc_size) {
2104   uint all_reloc_count = 0;
2105   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2106     CodeSection* cs = buffer->code_section(i);
2107     uint reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2108     all_reloc_count += reloc_count;
2109   }
2110   all_reloc_size = all_reloc_count * sizeof(relocInfo);
2111   bool success = true;
2112   uint* reloc_data = NEW_C_HEAP_ARRAY(uint, all_reloc_count, mtCode);
2113   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2114     CodeSection* cs = buffer->code_section(i);
2115     int reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2116     uint n = write_bytes(&reloc_count, sizeof(int));
2117     if (n != sizeof(int)) {
2118       success = false;
2119       break;
2120     }
2121     if (reloc_count == 0) {
2122       continue;
2123     }
2124     // Write _locs_point (as offset from start)
2125     int locs_point_off = cs->locs_point_off();
2126     n = write_bytes(&locs_point_off, sizeof(int));
2127     if (n != sizeof(int)) {
2128       success = false;
2129       break;
2130     }
2131     relocInfo* reloc_start = cs->locs_start();
2132     uint reloc_size      = reloc_count * sizeof(relocInfo);
2133     n = write_bytes(reloc_start, reloc_size);
2134     if (n != reloc_size) {
2135       success = false;
2136       break;
2137     }
2138     LogStreamHandle(Info, scc, reloc) log;
2139     if (log.is_enabled()) {
2140       log.print_cr("======== write code section %d relocations [%d]:", i, reloc_count);
2141     }
2142     // Collect additional data
2143     RelocIterator iter(cs);
2144     bool has_immediate = false;
2145     int j = 0;
2146     while (iter.next()) {
2147       reloc_data[j] = 0; // initialize
2148       switch (iter.type()) {
2149         case relocInfo::none:
2150           break;
2151         case relocInfo::oop_type: {
2152           oop_Relocation* r = (oop_Relocation*)iter.reloc();
2153           if (r->oop_is_immediate()) {
2154             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2155             has_immediate = true;
2156           }
2157           break;
2158         }
2159         case relocInfo::metadata_type: {
2160           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2161           if (r->metadata_is_immediate()) {
2162             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2163             has_immediate = true;
2164           }
2165           break;
2166         }
2167         case relocInfo::virtual_call_type:  // Fall through. They all call resolve_*_call blobs.
2168         case relocInfo::opt_virtual_call_type:
2169         case relocInfo::static_call_type: {
2170           CallRelocation* r = (CallRelocation*)iter.reloc();
2171           address dest = r->destination();
2172           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2173             dest = (address)-1;    // do nothing in this case when loading this relocation
2174           }
2175           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2176           break;
2177         }
2178         case relocInfo::trampoline_stub_type: {
2179           address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2180           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2181           break;
2182         }
2183         case relocInfo::static_stub_type:
2184           break;
2185         case relocInfo::runtime_call_type: {
2186           // Record offset of runtime destination
2187           CallRelocation* r = (CallRelocation*)iter.reloc();
2188           address dest = r->destination();
2189           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2190             dest = (address)-1;    // do nothing in this case when loading this relocation
2191           }
2192           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2193           break;
2194         }
2195         case relocInfo::runtime_call_w_cp_type:
2196           fatal("runtime_call_w_cp_type unimplemented");
2197           break;
2198         case relocInfo::external_word_type: {
2199           // Record offset of runtime target
2200           address target = ((external_word_Relocation*)iter.reloc())->target();
2201           reloc_data[j] = _table->id_for_address(target, iter, buffer);
2202           break;
2203         }
2204         case relocInfo::internal_word_type:
2205           break;
2206         case relocInfo::section_word_type:
2207           break;
2208         case relocInfo::poll_type:
2209           break;
2210         case relocInfo::poll_return_type:
2211           break;
2212         case relocInfo::post_call_nop_type:
2213           break;
2214         case relocInfo::entry_guard_type:
2215           break;
2216         default:
2217           fatal("relocation %d unimplemented", (int)iter.type());
2218           break;
2219       }
2220       if (log.is_enabled()) {
2221         iter.print_current_on(&log);
2222       }
2223       j++;
2224     }
2225     assert(j <= (int)reloc_count, "sanity");
2226     // Write additional relocation data: uint per relocation
2227     uint data_size = reloc_count * sizeof(uint);
2228     n = write_bytes(reloc_data, data_size);
2229     if (n != data_size) {
2230       success = false;
2231       break;
2232     }
2233     if (has_immediate) {
2234       // Save information about immediates in this Code Section
2235       RelocIterator iter_imm(cs);
2236       int j = 0;
2237       while (iter_imm.next()) {
2238         switch (iter_imm.type()) {
2239           case relocInfo::oop_type: {
2240             oop_Relocation* r = (oop_Relocation*)iter_imm.reloc();
2241             if (r->oop_is_immediate()) {
2242               assert(reloc_data[j] == (uint)j, "should be");
2243               jobject jo = *(jobject*)(r->oop_addr()); // Handle currently
2244               if (!write_oop(jo)) {
2245                 success = false;
2246               }
2247             }
2248             break;
2249           }
2250           case relocInfo::metadata_type: {
2251             metadata_Relocation* r = (metadata_Relocation*)iter_imm.reloc();
2252             if (r->metadata_is_immediate()) {
2253               assert(reloc_data[j] == (uint)j, "should be");
2254               Metadata* m = r->metadata_value();
2255               if (!write_metadata(m)) {
2256                 success = false;
2257               }
2258             }
2259             break;
2260           }
2261           default:
2262             break;
2263         }
2264         if (!success) {
2265           break;
2266         }
2267         j++;
2268       } // while (iter_imm.next())
2269     } // if (has_immediate)
2270   } // for(i < SECT_LIMIT)
2271   FREE_C_HEAP_ARRAY(uint, reloc_data);
2272   return success;
2273 }
2274 
2275 bool SCCache::store_adapter(CodeBuffer* buffer, uint32_t id, const char* name, uint32_t offsets[4]) {
2276   assert(CDSConfig::is_dumping_adapters(), "must be");
2277   SCCache* cache = open_for_write();
2278   if (cache == nullptr) {
2279     return false;
2280   }
2281   log_info(scc, stubs)("Writing adapter '%s' (0x%x) to AOT Code Cache", name, id);
2282 #ifdef ASSERT
2283   LogStreamHandle(Debug, scc, stubs) log;
2284   if (log.is_enabled()) {
2285     FlagSetting fs(PrintRelocations, true);
2286     buffer->print_on(&log);
2287     buffer->decode();
2288   }
2289 #endif
2290   // we need to take a lock to stop main thread racing with C1 and C2 compiler threads to
2291   // write blobs in parallel with each other or with later nmethods
2292   MutexLocker ml(Compile_lock);
2293   if (!cache->align_write()) {
2294     return false;
2295   }
2296   uint entry_position = cache->_write_position;
2297   // Write name
2298   uint name_offset = cache->_write_position - entry_position;
2299   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
2300   uint n = cache->write_bytes(name, name_size);
2301   if (n != name_size) {
2302     return false;
2303   }
2304   // Write code section
2305   if (!cache->align_write()) {
2306     return false;
2307   }
2308   uint code_offset = cache->_write_position - entry_position;
2309   uint code_size = 0;
2310   if (!cache->write_code(buffer, code_size)) {
2311     return false;
2312   }
2313   // Write relocInfo array
2314   uint reloc_offset = cache->_write_position - entry_position;
2315   uint reloc_size = 0;
2316   if (!cache->write_relocations(buffer, reloc_size)) {
2317     return false;
2318   }
2319   int extras_count = 4;
2320   n = cache->write_bytes(&extras_count, sizeof(int));
2321   if (n != sizeof(int)) {
2322     return false;
2323   }
2324   for (int i = 0; i < 4; i++) {
2325     uint32_t arg = offsets[i];
2326     log_debug(scc, stubs)("Writing adapter '%s' (0x%x) offsets[%d] == 0x%x to AOT Code Cache", name, id, i, arg);
2327     n = cache->write_bytes(&arg, sizeof(uint32_t));
2328     if (n != sizeof(uint32_t)) {
2329       return false;
2330     }
2331   }
2332   uint entry_size = cache->_write_position - entry_position;
2333   SCCEntry* entry = new (cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
2334                                           code_offset, code_size, reloc_offset, reloc_size,
2335                                         SCCEntry::Adapter, id, 0);
2336   log_info(scc, stubs)("Wrote adapter '%s' (0x%x) to AOT Code Cache", name, id);
2337   return true;
2338 }
2339 
2340 bool SCCache::write_code(CodeBuffer* buffer, uint& code_size) {
2341   assert(_write_position == align_up(_write_position, DATA_ALIGNMENT), "%d not aligned to %d", _write_position, DATA_ALIGNMENT);
2342   //assert(buffer->blob() != nullptr, "sanity");
2343   uint code_offset = _write_position;
2344   uint cb_total_size = (uint)buffer->total_content_size();
2345   // Write information about Code sections first.
2346   SCCodeSection scc_cs[CodeBuffer::SECT_LIMIT];
2347   uint scc_cs_size = (uint)(sizeof(SCCodeSection) * CodeBuffer::SECT_LIMIT);
2348   uint offset = align_up(scc_cs_size, DATA_ALIGNMENT);
2349   uint total_size = 0;
2350   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2351     const CodeSection* cs = buffer->code_section(i);
2352     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2353     uint cs_size = (uint)cs->size();
2354     scc_cs[i]._size = cs_size;
2355     scc_cs[i]._origin_address = (cs_size == 0) ? nullptr : cs->start();
2356     scc_cs[i]._offset = (cs_size == 0) ? 0 : (offset + total_size);
2357     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2358     total_size += align_up(cs_size, DATA_ALIGNMENT);
2359   }
2360   uint n = write_bytes(scc_cs, scc_cs_size);
2361   if (n != scc_cs_size) {
2362     return false;
2363   }
2364   if (!align_write()) {
2365     return false;
2366   }
2367   assert(_write_position == (code_offset + offset), "%d  != (%d + %d)", _write_position, code_offset, offset);
2368   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2369     const CodeSection* cs = buffer->code_section(i);
2370     uint cs_size = (uint)cs->size();
2371     if (cs_size == 0) {
2372       continue;  // skip trivial section
2373     }
2374     assert((_write_position - code_offset) == scc_cs[i]._offset, "%d != %d", _write_position, scc_cs[i]._offset);
2375     // Write code
2376     n = write_bytes(cs->start(), cs_size);
2377     if (n != cs_size) {
2378       return false;
2379     }
2380     if (!align_write()) {
2381       return false;
2382     }
2383   }
2384   assert((_write_position - code_offset) == (offset + total_size), "(%d - %d) != (%d + %d)", _write_position, code_offset, offset, total_size);
2385   code_size = total_size;
2386   return true;
2387 }
2388 
2389 bool SCCache::store_exception_blob(CodeBuffer* buffer, int pc_offset) {
2390   SCCache* cache = open_for_write();
2391   if (cache == nullptr) {
2392     return false;
2393   }
2394   log_info(scc, stubs)("Writing blob '%s' to AOT Code Cache", buffer->name());
2395 
2396 #ifdef ASSERT
2397   LogStreamHandle(Debug, scc, nmethod) log;
2398   if (log.is_enabled()) {
2399     FlagSetting fs(PrintRelocations, true);
2400     buffer->print_on(&log);
2401     buffer->decode();
2402   }
2403 #endif
2404   // we need to take a lock to prevent race between compiler thread generating blob and the main thread generating adapter
2405   MutexLocker ml(Compile_lock);
2406   if (!cache->align_write()) {
2407     return false;
2408   }
2409   uint entry_position = cache->_write_position;
2410 
2411   // Write pc_offset
2412   uint n = cache->write_bytes(&pc_offset, sizeof(int));
2413   if (n != sizeof(int)) {
2414     return false;
2415   }
2416 
2417   // Write name
2418   const char* name = buffer->name();
2419   uint name_offset = cache->_write_position - entry_position;
2420   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
2421   n = cache->write_bytes(name, name_size);
2422   if (n != name_size) {
2423     return false;
2424   }
2425 
2426   // Write code section
2427   if (!cache->align_write()) {
2428     return false;
2429   }
2430   uint code_offset = cache->_write_position - entry_position;
2431   uint code_size = 0;
2432   if (!cache->write_code(buffer, code_size)) {
2433     return false;
2434   }
2435   // Write relocInfo array
2436   uint reloc_offset = cache->_write_position - entry_position;
2437   uint reloc_size = 0;
2438   if (!cache->write_relocations(buffer, reloc_size)) {
2439     return false;
2440   }
2441 
2442   uint entry_size = cache->_write_position - entry_position;
2443   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
2444                                           code_offset, code_size, reloc_offset, reloc_size,
2445                                           SCCEntry::Blob, (uint32_t)999);
2446   log_info(scc, stubs)("Wrote stub '%s' to AOT Code Cache", name);
2447   return true;
2448 }
2449 
2450 DebugInformationRecorder* SCCReader::read_debug_info(OopRecorder* oop_recorder) {
2451   uint code_offset = align_up(read_position(), DATA_ALIGNMENT);
2452   int data_size  = *(int*)addr(code_offset);
2453   code_offset   += sizeof(int);
2454   int pcs_length = *(int*)addr(code_offset);
2455   code_offset   += sizeof(int);
2456 
2457   log_debug(scc)("======== read DebugInfo [%d, %d]:", data_size, pcs_length);
2458 
2459   // Aligned initial sizes
2460   int data_size_align  = align_up(data_size, DATA_ALIGNMENT);
2461   int pcs_length_align = pcs_length + 1;
2462   assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity");
2463   DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length);
2464 
2465   copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align);
2466   recorder->stream()->set_position(data_size);
2467   code_offset += data_size;
2468 
2469   uint pcs_size = pcs_length * sizeof(PcDesc);
2470   copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size);
2471   code_offset += pcs_size;
2472   set_read_position(code_offset);
2473   return recorder;
2474 }
2475 
2476 bool SCCache::write_debug_info(DebugInformationRecorder* recorder) {
2477   if (!align_write()) {
2478     return false;
2479   }
2480   // Don't call data_size() and pcs_size(). They will freeze OopRecorder.
2481   int data_size = recorder->stream()->position(); // In bytes
2482   uint n = write_bytes(&data_size, sizeof(int));
2483   if (n != sizeof(int)) {
2484     return false;
2485   }
2486   int pcs_length = recorder->pcs_length(); // In bytes
2487   n = write_bytes(&pcs_length, sizeof(int));
2488   if (n != sizeof(int)) {
2489     return false;
2490   }
2491   n = write_bytes(recorder->stream()->buffer(), data_size);
2492   if (n != (uint)data_size) {
2493     return false;
2494   }
2495   uint pcs_size = pcs_length * sizeof(PcDesc);
2496   n = write_bytes(recorder->pcs(), pcs_size);
2497   if (n != pcs_size) {
2498     return false;
2499   }
2500   return true;
2501 }
2502 
2503 OopMapSet* SCCReader::read_oop_maps() {
2504   uint code_offset = read_position();
2505   int om_count = *(int*)addr(code_offset);
2506   code_offset += sizeof(int);
2507 
2508   log_debug(scc)("======== read oop maps [%d]:", om_count);
2509 
2510   OopMapSet* oop_maps = new OopMapSet(om_count);
2511   for (int i = 0; i < (int)om_count; i++) {
2512     int data_size = *(int*)addr(code_offset);
2513     code_offset += sizeof(int);
2514 
2515     OopMap* oop_map = new OopMap(data_size);
2516     // Preserve allocated stream
2517     CompressedWriteStream* stream = oop_map->write_stream();
2518 
2519     // Read data which overwrites default data
2520     copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap));
2521     code_offset += sizeof(OopMap);
2522     stream->set_position(data_size);
2523     oop_map->set_write_stream(stream);
2524     if (data_size > 0) {
2525       copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size);
2526       code_offset += data_size;
2527     }
2528 #ifdef ASSERT
2529     oop_map->_locs_length = 0;
2530     oop_map->_locs_used   = nullptr;
2531 #endif
2532     oop_maps->add(oop_map);
2533   }
2534   set_read_position(code_offset);
2535   return oop_maps;
2536 }
2537 
2538 bool SCCache::write_oop_maps(OopMapSet* oop_maps) {
2539   uint om_count = oop_maps->size();
2540   uint n = write_bytes(&om_count, sizeof(int));
2541   if (n != sizeof(int)) {
2542     return false;
2543   }
2544   for (int i = 0; i < (int)om_count; i++) {
2545     OopMap* om = oop_maps->at(i);
2546     int data_size = om->data_size();
2547     n = write_bytes(&data_size, sizeof(int));
2548     if (n != sizeof(int)) {
2549       return false;
2550     }
2551     n = write_bytes(om, sizeof(OopMap));
2552     if (n != sizeof(OopMap)) {
2553       return false;
2554     }
2555     n = write_bytes(om->data(), (uint)data_size);
2556     if (n != (uint)data_size) {
2557       return false;
2558     }
2559   }
2560   return true;
2561 }
2562 
2563 jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2564   uint code_offset = read_position();
2565   oop obj = nullptr;
2566   DataKind kind = *(DataKind*)addr(code_offset);
2567   code_offset += sizeof(DataKind);
2568   set_read_position(code_offset);
2569   if (kind == DataKind::Null) {
2570     return nullptr;
2571   } else if (kind == DataKind::No_Data) {
2572     return (jobject)Universe::non_oop_word();
2573   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2574     Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
2575     if (k == nullptr) {
2576       return nullptr;
2577     }
2578     obj = k->java_mirror();
2579     if (obj == nullptr) {
2580       set_lookup_failed();
2581       log_info(scc)("Lookup failed for java_mirror of klass %s", k->external_name());
2582       return nullptr;
2583     }
2584   } else if (kind == DataKind::Primitive) {
2585     code_offset = read_position();
2586     int t = *(int*)addr(code_offset);
2587     code_offset += sizeof(int);
2588     set_read_position(code_offset);
2589     BasicType bt = (BasicType)t;
2590     obj = java_lang_Class::primitive_mirror(bt);
2591     log_info(scc)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2592   } else if (kind == DataKind::String_Shared) {
2593     code_offset = read_position();
2594     int k = *(int*)addr(code_offset);
2595     code_offset += sizeof(int);
2596     set_read_position(code_offset);
2597     obj = CDSAccess::get_archived_object(k);
2598   } else if (kind == DataKind::String) {
2599     code_offset = read_position();
2600     int length = *(int*)addr(code_offset);
2601     code_offset += sizeof(int);
2602     set_read_position(code_offset);
2603     const char* dest = addr(code_offset);
2604     set_read_position(code_offset + length);
2605     obj = StringTable::intern(&(dest[0]), thread);
2606     if (obj == nullptr) {
2607       set_lookup_failed();
2608       log_info(scc)("%d (L%d): Lookup failed for String %s",
2609                        compile_id(), comp_level(), &(dest[0]));
2610       return nullptr;
2611     }
2612     assert(java_lang_String::is_instance(obj), "must be string");
2613     log_info(scc)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
2614   } else if (kind == DataKind::SysLoader) {
2615     obj = SystemDictionary::java_system_loader();
2616     log_info(scc)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2617   } else if (kind == DataKind::PlaLoader) {
2618     obj = SystemDictionary::java_platform_loader();
2619     log_info(scc)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2620   } else if (kind == DataKind::MH_Oop_Shared) {
2621     code_offset = read_position();
2622     int k = *(int*)addr(code_offset);
2623     code_offset += sizeof(int);
2624     set_read_position(code_offset);
2625     obj = CDSAccess::get_archived_object(k);
2626   } else {
2627     set_lookup_failed();
2628     log_info(scc)("%d (L%d): Unknown oop's kind: %d",
2629                      compile_id(), comp_level(), (int)kind);
2630     return nullptr;
2631   }
2632   return JNIHandles::make_local(thread, obj);
2633 }
2634 
2635 bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) {
2636   uint code_offset = read_position();
2637   int oop_count = *(int*)addr(code_offset);
2638   code_offset += sizeof(int);
2639   set_read_position(code_offset);
2640   log_debug(scc)("======== read oops [%d]:", oop_count);
2641   if (oop_count == 0) {
2642     return true;
2643   }
2644   {
2645     VM_ENTRY_MARK;
2646     methodHandle comp_method(THREAD, target->get_Method());
2647     for (int i = 1; i < oop_count; i++) {
2648       jobject jo = read_oop(THREAD, comp_method);
2649       if (lookup_failed()) {
2650         return false;
2651       }
2652       if (oop_recorder->is_real(jo)) {
2653         oop_recorder->find_index(jo);
2654       } else {
2655         oop_recorder->allocate_oop_index(jo);
2656       }
2657       LogStreamHandle(Debug, scc, oops) log;
2658       if (log.is_enabled()) {
2659         log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2660         if (jo == (jobject)Universe::non_oop_word()) {
2661           log.print("non-oop word");
2662         } else if (jo == nullptr) {
2663           log.print("nullptr-oop");
2664         } else {
2665           JNIHandles::resolve(jo)->print_value_on(&log);
2666         }
2667         log.cr();
2668       }
2669     }
2670   }
2671   return true;
2672 }
2673 
2674 Metadata* SCCReader::read_metadata(const methodHandle& comp_method) {
2675   uint code_offset = read_position();
2676   Metadata* m = nullptr;
2677   DataKind kind = *(DataKind*)addr(code_offset);
2678   code_offset += sizeof(DataKind);
2679   set_read_position(code_offset);
2680   if (kind == DataKind::Null) {
2681     m = (Metadata*)nullptr;
2682   } else if (kind == DataKind::No_Data) {
2683     m = (Metadata*)Universe::non_oop_word();
2684   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2685     m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2686   } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2687     m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2688   } else if (kind == DataKind::MethodCnts) {
2689     kind = *(DataKind*)addr(code_offset);
2690     bool shared = (kind == DataKind::Method_Shared);
2691     assert(kind == DataKind::Method || shared, "Sanity");
2692     code_offset += sizeof(DataKind);
2693     set_read_position(code_offset);
2694     m = (Metadata*)read_method(comp_method, shared);
2695     if (m != nullptr) {
2696       Method* method = (Method*)m;
2697       m = method->get_method_counters(Thread::current());
2698       if (m == nullptr) {
2699         set_lookup_failed();
2700         log_info(scc)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2701       } else {
2702         log_info(scc)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2703       }
2704     }
2705   } else {
2706     set_lookup_failed();
2707     log_info(scc)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2708   }
2709   return m;
2710 }
2711 
2712 bool SCCReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) {
2713   uint code_offset = read_position();
2714   int metadata_count = *(int*)addr(code_offset);
2715   code_offset += sizeof(int);
2716   set_read_position(code_offset);
2717 
2718   log_debug(scc)("======== read metadata [%d]:", metadata_count);
2719 
2720   if (metadata_count == 0) {
2721     return true;
2722   }
2723   {
2724     VM_ENTRY_MARK;
2725     methodHandle comp_method(THREAD, target->get_Method());
2726 
2727     for (int i = 1; i < metadata_count; i++) {
2728       Metadata* m = read_metadata(comp_method);
2729       if (lookup_failed()) {
2730         return false;
2731       }
2732       if (oop_recorder->is_real(m)) {
2733         oop_recorder->find_index(m);
2734       } else {
2735         oop_recorder->allocate_metadata_index(m);
2736       }
2737       LogTarget(Debug, scc, metadata) log;
2738       if (log.is_enabled()) {
2739         LogStream ls(log);
2740         ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2741         if (m == (Metadata*)Universe::non_oop_word()) {
2742           ls.print("non-metadata word");
2743         } else if (m == nullptr) {
2744           ls.print("nullptr-oop");
2745         } else {
2746           Metadata::print_value_on_maybe_null(&ls, m);
2747         }
2748         ls.cr();
2749       }
2750     }
2751   }
2752   return true;
2753 }
2754 
2755 bool SCCache::write_oop(jobject& jo) {
2756   DataKind kind;
2757   uint n = 0;
2758   oop obj = JNIHandles::resolve(jo);
2759   if (jo == nullptr) {
2760     kind = DataKind::Null;
2761     n = write_bytes(&kind, sizeof(int));
2762     if (n != sizeof(int)) {
2763       return false;
2764     }
2765   } else if (jo == (jobject)Universe::non_oop_word()) {
2766     kind = DataKind::No_Data;
2767     n = write_bytes(&kind, sizeof(int));
2768     if (n != sizeof(int)) {
2769       return false;
2770     }
2771   } else if (java_lang_Class::is_instance(obj)) {
2772     if (java_lang_Class::is_primitive(obj)) {
2773       int bt = (int)java_lang_Class::primitive_type(obj);
2774       kind = DataKind::Primitive;
2775       n = write_bytes(&kind, sizeof(int));
2776       if (n != sizeof(int)) {
2777         return false;
2778       }
2779       n = write_bytes(&bt, sizeof(int));
2780       if (n != sizeof(int)) {
2781         return false;
2782       }
2783       log_info(scc)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2784     } else {
2785       Klass* klass = java_lang_Class::as_Klass(obj);
2786       if (!write_klass(klass)) {
2787         return false;
2788       }
2789     }
2790   } else if (java_lang_String::is_instance(obj)) { // herere
2791     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2792     if (k >= 0) {
2793       kind = DataKind::String_Shared;
2794       n = write_bytes(&kind, sizeof(int));
2795       if (n != sizeof(int)) {
2796         return false;
2797       }
2798       n = write_bytes(&k, sizeof(int));
2799       if (n != sizeof(int)) {
2800         return false;
2801       }
2802       return true;
2803     }
2804     kind = DataKind::String;
2805     n = write_bytes(&kind, sizeof(int));
2806     if (n != sizeof(int)) {
2807       return false;
2808     }
2809     ResourceMark rm;
2810     size_t length_sz = 0;
2811     const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2812     int length = (int)length_sz; // FIXME -- cast
2813     length++; // write tailing '/0'
2814     n = write_bytes(&length, sizeof(int));
2815     if (n != sizeof(int)) {
2816       return false;
2817     }
2818     n = write_bytes(string, (uint)length);
2819     if (n != (uint)length) {
2820       return false;
2821     }
2822     log_info(scc)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
2823   } else if (java_lang_Module::is_instance(obj)) {
2824     fatal("Module object unimplemented");
2825   } else if (java_lang_ClassLoader::is_instance(obj)) {
2826     if (obj == SystemDictionary::java_system_loader()) {
2827       kind = DataKind::SysLoader;
2828       log_info(scc)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2829     } else if (obj == SystemDictionary::java_platform_loader()) {
2830       kind = DataKind::PlaLoader;
2831       log_info(scc)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2832     } else {
2833       fatal("ClassLoader object unimplemented");
2834       return false;
2835     }
2836     n = write_bytes(&kind, sizeof(int));
2837     if (n != sizeof(int)) {
2838       return false;
2839     }
2840   } else { // herere
2841     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2842     if (k >= 0) {
2843       kind = DataKind::MH_Oop_Shared;
2844       n = write_bytes(&kind, sizeof(int));
2845       if (n != sizeof(int)) {
2846         return false;
2847       }
2848       n = write_bytes(&k, sizeof(int));
2849       if (n != sizeof(int)) {
2850         return false;
2851       }
2852       return true;
2853     }
2854     // Unhandled oop - bailout
2855     set_lookup_failed();
2856     log_info(scc, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
2857                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2858     return false;
2859   }
2860   return true;
2861 }
2862 
2863 bool SCCache::write_oops(OopRecorder* oop_recorder) {
2864   int oop_count = oop_recorder->oop_count();
2865   uint n = write_bytes(&oop_count, sizeof(int));
2866   if (n != sizeof(int)) {
2867     return false;
2868   }
2869   log_debug(scc)("======== write oops [%d]:", oop_count);
2870 
2871   for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr
2872     jobject jo = oop_recorder->oop_at(i);
2873     LogStreamHandle(Info, scc, oops) log;
2874     if (log.is_enabled()) {
2875       log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2876       if (jo == (jobject)Universe::non_oop_word()) {
2877         log.print("non-oop word");
2878       } else if (jo == nullptr) {
2879         log.print("nullptr-oop");
2880       } else {
2881         JNIHandles::resolve(jo)->print_value_on(&log);
2882       }
2883       log.cr();
2884     }
2885     if (!write_oop(jo)) {
2886       return false;
2887     }
2888   }
2889   return true;
2890 }
2891 
2892 bool SCCache::write_metadata(Metadata* m) {
2893   uint n = 0;
2894   if (m == nullptr) {
2895     DataKind kind = DataKind::Null;
2896     n = write_bytes(&kind, sizeof(int));
2897     if (n != sizeof(int)) {
2898       return false;
2899     }
2900   } else if (m == (Metadata*)Universe::non_oop_word()) {
2901     DataKind kind = DataKind::No_Data;
2902     n = write_bytes(&kind, sizeof(int));
2903     if (n != sizeof(int)) {
2904       return false;
2905     }
2906   } else if (m->is_klass()) {
2907     if (!write_klass((Klass*)m)) {
2908       return false;
2909     }
2910   } else if (m->is_method()) {
2911     if (!write_method((Method*)m)) {
2912       return false;
2913     }
2914   } else if (m->is_methodCounters()) {
2915     DataKind kind = DataKind::MethodCnts;
2916     n = write_bytes(&kind, sizeof(int));
2917     if (n != sizeof(int)) {
2918       return false;
2919     }
2920     if (!write_method(((MethodCounters*)m)->method())) {
2921       return false;
2922     }
2923     log_info(scc)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2924   } else { // Not supported
2925     fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2926     return false;
2927   }
2928   return true;
2929 }
2930 
2931 bool SCCache::write_metadata(OopRecorder* oop_recorder) {
2932   int metadata_count = oop_recorder->metadata_count();
2933   uint n = write_bytes(&metadata_count, sizeof(int));
2934   if (n != sizeof(int)) {
2935     return false;
2936   }
2937 
2938   log_debug(scc)("======== write metadata [%d]:", metadata_count);
2939 
2940   for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr
2941     Metadata* m = oop_recorder->metadata_at(i);
2942     LogStreamHandle(Debug, scc, metadata) log;
2943     if (log.is_enabled()) {
2944       log.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2945       if (m == (Metadata*)Universe::non_oop_word()) {
2946         log.print("non-metadata word");
2947       } else if (m == nullptr) {
2948         log.print("nullptr-oop");
2949       } else {
2950         Metadata::print_value_on_maybe_null(&log, m);
2951       }
2952       log.cr();
2953     }
2954     if (!write_metadata(m)) {
2955       return false;
2956     }
2957   }
2958   return true;
2959 }
2960 
2961 bool SCCReader::read_dependencies(Dependencies* dependencies) {
2962   uint code_offset = read_position();
2963   int dependencies_size = *(int*)addr(code_offset);
2964 
2965   log_debug(scc)("======== read dependencies [%d]:", dependencies_size);
2966 
2967   code_offset += sizeof(int);
2968   code_offset = align_up(code_offset, DATA_ALIGNMENT);
2969   if (dependencies_size > 0) {
2970     dependencies->set_content((u_char*)addr(code_offset), dependencies_size);
2971   }
2972   code_offset += dependencies_size;
2973   set_read_position(code_offset);
2974   return true;
2975 }
2976 
2977 bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
2978   TraceTime t1("SC total load time", &_t_totalLoad, enable_timers(), false);
2979   CompileTask* task = env->task();
2980   SCCEntry* entry = task->scc_entry();
2981   bool preload = task->preload();
2982   assert(entry != nullptr, "sanity");
2983   SCCache* cache = open_for_read();
2984   if (cache == nullptr) {
2985     return false;
2986   }
2987   if (log_is_enabled(Info, scc, nmethod)) {
2988     uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count();
2989     VM_ENTRY_MARK;
2990     ResourceMark rm;
2991     methodHandle method(THREAD, target->get_Method());
2992     const char* target_name = method->name_and_sig_as_C_string();
2993     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
2994     bool clinit_brs = entry->has_clinit_barriers();
2995     log_info(scc, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
2996                            task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
2997                            target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : ""),
2998                            (entry->ignore_decompile() ? ", ignore_decomp" : ""));
2999   }
3000   ReadingMark rdmk;
3001   if (rdmk.failed()) {
3002     // Cache is closed, cannot touch anything.
3003     return false;
3004   }
3005 
3006   SCCReader reader(cache, entry, task);
3007   bool success = reader.compile(env, target, entry_bci, compiler);
3008   if (success) {
3009     task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
3010   } else {
3011     entry->set_load_fail();
3012   }
3013   return success;
3014 }
3015 
3016 SCCReader::SCCReader(SCCache* cache, SCCEntry* entry, CompileTask* task) {
3017   _cache = cache;
3018   _entry   = entry;
3019   _load_buffer = cache->cache_buffer();
3020   _read_position = 0;
3021   if (task != nullptr) {
3022     _compile_id = task->compile_id();
3023     _comp_level = task->comp_level();
3024     _preload    = task->preload();
3025   } else {
3026     _compile_id = 0;
3027     _comp_level = 0;
3028     _preload    = false;
3029   }
3030   _lookup_failed = false;
3031 }
3032 
3033 bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler) {
3034   uint entry_position = _entry->offset();
3035   uint code_offset = entry_position + _entry->code_offset();
3036   set_read_position(code_offset);
3037 
3038   // Read flags
3039   int flags = *(int*)addr(code_offset);
3040   code_offset += sizeof(int);
3041   bool has_monitors      = (flags & 0x1) != 0;
3042   bool has_wide_vectors  = (flags & 0x2) != 0;
3043   bool has_unsafe_access = (flags & 0x4) != 0;
3044   bool has_scoped_access = (flags & 0x8) != 0;
3045 
3046   int orig_pc_offset = *(int*)addr(code_offset);
3047   code_offset += sizeof(int);
3048   int frame_size = *(int*)addr(code_offset);
3049   code_offset += sizeof(int);
3050 
3051   // Read offsets
3052   CodeOffsets* offsets = (CodeOffsets*)addr(code_offset);
3053   code_offset += sizeof(CodeOffsets);
3054 
3055   // Create Debug Information Recorder to record scopes, oopmaps, etc.
3056   OopRecorder* oop_recorder = new OopRecorder(env->arena());
3057   env->set_oop_recorder(oop_recorder);
3058 
3059   set_read_position(code_offset);
3060 
3061   // Write OopRecorder data
3062   if (!read_oops(oop_recorder, target)) {
3063     return false;
3064   }
3065   if (!read_metadata(oop_recorder, target)) {
3066     return false;
3067   }
3068 
3069   // Read Debug info
3070   DebugInformationRecorder* recorder = read_debug_info(oop_recorder);
3071   if (recorder == nullptr) {
3072     return false;
3073   }
3074   env->set_debug_info(recorder);
3075 
3076   // Read Dependencies (compressed already)
3077   Dependencies* dependencies = new Dependencies(env);
3078   if (!read_dependencies(dependencies)) {
3079     return false;
3080   }
3081   env->set_dependencies(dependencies);
3082 
3083   // Read oop maps
3084   OopMapSet* oop_maps = read_oop_maps();
3085   if (oop_maps == nullptr) {
3086     return false;
3087   }
3088 
3089   // Read exception handles
3090   code_offset = read_position();
3091   int exc_table_length = *(int*)addr(code_offset);
3092   code_offset += sizeof(int);
3093   ExceptionHandlerTable handler_table(MAX2(exc_table_length, 4));
3094   if (exc_table_length > 0) {
3095     handler_table.set_length(exc_table_length);
3096     uint exc_table_size = handler_table.size_in_bytes();
3097     copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size);
3098     code_offset += exc_table_size;
3099   }
3100 
3101   // Read null check table
3102   int nul_chk_length = *(int*)addr(code_offset);
3103   code_offset += sizeof(int);
3104   ImplicitExceptionTable nul_chk_table;
3105   if (nul_chk_length > 0) {
3106     nul_chk_table.set_size(nul_chk_length);
3107     nul_chk_table.set_len(nul_chk_length);
3108     uint nul_chk_size = nul_chk_table.size_in_bytes();
3109     copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry));
3110     code_offset += nul_chk_size;
3111   }
3112 
3113   uint reloc_size = _entry->reloc_size();
3114   CodeBuffer buffer("Compile::Fill_buffer", _entry->code_size(), reloc_size);
3115   buffer.initialize_oop_recorder(oop_recorder);
3116 
3117   const char* name = addr(entry_position + _entry->name_offset());
3118 
3119   // Create fake original CodeBuffer
3120   CodeBuffer orig_buffer(name);
3121 
3122   // Read code
3123   if (!read_code(&buffer, &orig_buffer, align_up(code_offset, DATA_ALIGNMENT))) {
3124     return false;
3125   }
3126 
3127   // Read relocations
3128   uint reloc_offset = entry_position + _entry->reloc_offset();
3129   set_read_position(reloc_offset);
3130   if (!read_relocations(&buffer, &orig_buffer, oop_recorder, target)) {
3131     return false;
3132   }
3133 
3134   log_info(scc, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
3135 #ifdef ASSERT
3136   LogStreamHandle(Debug, scc, nmethod) log;
3137   if (log.is_enabled()) {
3138     FlagSetting fs(PrintRelocations, true);
3139     buffer.print_on(&log);
3140     buffer.decode();
3141   }
3142 #endif
3143 
3144   if (VerifyCachedCode) {
3145     return false;
3146   }
3147 
3148   // Register nmethod
3149   TraceTime t1("SC total nmethod register time", &_t_totalRegister, enable_timers(), false);
3150   env->register_method(target, entry_bci,
3151                        offsets, orig_pc_offset,
3152                        &buffer, frame_size,
3153                        oop_maps, &handler_table,
3154                        &nul_chk_table, compiler,
3155                        _entry->has_clinit_barriers(),
3156                        false,
3157                        has_unsafe_access,
3158                        has_wide_vectors,
3159                        has_monitors,
3160                        has_scoped_access,
3161                        0, true /* install_code */,
3162                        (SCCEntry *)_entry);
3163   CompileTask* task = env->task();
3164   bool success = task->is_success();
3165   if (success) {
3166     ((SCCEntry *)_entry)->set_loaded();
3167   }
3168   return success;
3169 }
3170 
3171 // No concurency for writing to cache file because this method is called from
3172 // ciEnv::register_method() under MethodCompileQueue_lock and Compile_lock locks.
3173 SCCEntry* SCCache::store_nmethod(const methodHandle& method,
3174                      int comp_id,
3175                      int entry_bci,
3176                      CodeOffsets* offsets,
3177                      int orig_pc_offset,
3178                      DebugInformationRecorder* recorder,
3179                      Dependencies* dependencies,
3180                      CodeBuffer* buffer,
3181                      int frame_size,
3182                      OopMapSet* oop_maps,
3183                      ExceptionHandlerTable* handler_table,
3184                      ImplicitExceptionTable* nul_chk_table,
3185                      AbstractCompiler* compiler,
3186                      CompLevel comp_level,
3187                      bool has_clinit_barriers,
3188                      bool for_preload,
3189                      bool has_unsafe_access,
3190                      bool has_wide_vectors,
3191                      bool has_monitors,
3192                      bool has_scoped_access) {
3193   if (!CDSConfig::is_dumping_cached_code()) {
3194     return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
3195   }
3196   if (entry_bci != InvocationEntryBci) {
3197     return nullptr; // No OSR
3198   }
3199   if (compiler->is_c1() && (comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile)) {
3200     // Cache tier1 compilations
3201   } else if (!compiler->is_c2()) {
3202     return nullptr; // Only C2 now
3203   }
3204   TraceTime t1("SC total store time", &_t_totalStore, enable_timers(), false);
3205   SCCache* cache = open_for_write();
3206   if (cache == nullptr) {
3207     return nullptr; // Cache file is closed
3208   }
3209   SCCEntry* entry = cache->write_nmethod(method, comp_id, entry_bci, offsets, orig_pc_offset, recorder, dependencies, buffer,
3210                                   frame_size, oop_maps, handler_table, nul_chk_table, compiler, comp_level,
3211                                   has_clinit_barriers, for_preload, has_unsafe_access, has_wide_vectors, has_monitors, has_scoped_access);
3212   if (entry == nullptr) {
3213     ResourceMark rm;
3214     log_warning(scc, nmethod)("%d (L%d): Cannot store nmethod '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3215   }
3216   return entry;
3217 }
3218 
3219 SCCEntry* SCCache::write_nmethod(const methodHandle& method,
3220                                  int comp_id,
3221                                  int entry_bci,
3222                                  CodeOffsets* offsets,
3223                                  int orig_pc_offset,
3224                                  DebugInformationRecorder* recorder,
3225                                  Dependencies* dependencies,
3226                                  CodeBuffer* buffer,
3227                                  int frame_size,
3228                                  OopMapSet* oop_maps,
3229                                  ExceptionHandlerTable* handler_table,
3230                                  ImplicitExceptionTable* nul_chk_table,
3231                                  AbstractCompiler* compiler,
3232                                  CompLevel comp_level,
3233                                  bool has_clinit_barriers,
3234                                  bool for_preload,
3235                                  bool has_unsafe_access,
3236                                  bool has_wide_vectors,
3237                                  bool has_monitors,
3238                                  bool has_scoped_access) {
3239 //  if (method->is_hidden()) {
3240 //    ResourceMark rm;
3241 //    log_info(scc, nmethod)("%d (L%d): Skip hidden method '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3242 //    return nullptr;
3243 //  }
3244   if (buffer->before_expand() != nullptr) {
3245     ResourceMark rm;
3246     log_warning(scc, nmethod)("%d (L%d): Skip nmethod with expanded buffer '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3247     return nullptr;
3248   }
3249 #ifdef ASSERT
3250   LogStreamHandle(Debug, scc, nmethod) log;
3251   if (log.is_enabled()) {
3252     tty->print_cr(" == store_nmethod");
3253     FlagSetting fs(PrintRelocations, true);
3254     buffer->print_on(&log);
3255     buffer->decode();
3256   }
3257 #endif
3258   assert(!has_clinit_barriers || _gen_preload_code, "sanity");
3259   Method* m = method();
3260   bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)m); // herere
3261   InstanceKlass* holder = m->method_holder();
3262   bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class();
3263   bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
3264   if (!builtin_loader) {
3265     ResourceMark rm;
3266     log_info(scc, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
3267     return nullptr;
3268   }
3269   if (for_preload && !(method_in_cds && klass_in_cds)) {
3270     ResourceMark rm;
3271     log_info(scc, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3272     return nullptr;
3273   }
3274   assert(!for_preload || method_in_cds, "sanity");
3275   _for_preload = for_preload;
3276   _has_clinit_barriers = has_clinit_barriers;
3277 
3278   if (!align_write()) {
3279     return nullptr;
3280   }
3281   _compile_id = comp_id;
3282   _comp_level = (int)comp_level;
3283 
3284   uint entry_position = _write_position;
3285 
3286   uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count();
3287 
3288   // Is this one-step workflow assembly phase?
3289   // In this phase compilation is done based on saved profiling data
3290   // without application run. Ignore decompilation counters in such case.
3291   // Also ignore it for C1 code because it is decompiled unconditionally
3292   // when C2 generated code is published.
3293   bool ignore_decompile = (comp_level == CompLevel_limited_profile) ||
3294                           CDSConfig::is_dumping_final_static_archive();
3295 
3296   // Write name
3297   uint name_offset = 0;
3298   uint name_size   = 0;
3299   uint hash = 0;
3300   uint n;
3301   {
3302     ResourceMark rm;
3303     const char* name   = method->name_and_sig_as_C_string();
3304     log_info(scc, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to AOT Code Cache",
3305                            comp_id, (int)comp_level, name, comp_level, decomp,
3306                            (ignore_decompile ? ", ignore_decomp" : ""),
3307                            (has_clinit_barriers ? ", has clinit barriers" : ""));
3308 
3309     LogStreamHandle(Info, scc, loader) log;
3310     if (log.is_enabled()) {
3311       oop loader = holder->class_loader();
3312       oop domain = holder->protection_domain();
3313       log.print("Holder: ");
3314       holder->print_value_on(&log);
3315       log.print(" loader: ");
3316       if (loader == nullptr) {
3317         log.print("nullptr");
3318       } else {
3319         loader->print_value_on(&log);
3320       }
3321       log.print(" domain: ");
3322       if (domain == nullptr) {
3323         log.print("nullptr");
3324       } else {
3325         domain->print_value_on(&log);
3326       }
3327       log.cr();
3328     }
3329     name_offset = _write_position  - entry_position;
3330     name_size   = (uint)strlen(name) + 1; // Includes '/0'
3331     n = write_bytes(name, name_size);
3332     if (n != name_size) {
3333       return nullptr;
3334     }
3335     hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
3336   }
3337 
3338   if (!align_write()) {
3339     return nullptr;
3340   }
3341 
3342   uint code_offset = _write_position - entry_position;
3343 
3344   int flags = (has_scoped_access ? 0x8 : 0) |
3345               (has_unsafe_access ? 0x4 : 0) |
3346               (has_wide_vectors  ? 0x2 : 0) |
3347               (has_monitors      ? 0x1 : 0);
3348   n = write_bytes(&flags, sizeof(int));
3349   if (n != sizeof(int)) {
3350     return nullptr;
3351   }
3352 
3353   n = write_bytes(&orig_pc_offset, sizeof(int));
3354   if (n != sizeof(int)) {
3355     return nullptr;
3356   }
3357 
3358   n = write_bytes(&frame_size, sizeof(int));
3359   if (n != sizeof(int)) {
3360     return nullptr;
3361   }
3362 
3363   // Write offsets
3364   n = write_bytes(offsets, sizeof(CodeOffsets));
3365   if (n != sizeof(CodeOffsets)) {
3366     return nullptr;
3367   }
3368 
3369   // Write OopRecorder data
3370   if (!write_oops(buffer->oop_recorder())) {
3371     if (lookup_failed() && !failed()) {
3372       // Skip this method and reposition file
3373       set_write_position(entry_position);
3374     }
3375     return nullptr;
3376   }
3377   if (!write_metadata(buffer->oop_recorder())) {
3378     if (lookup_failed() && !failed()) {
3379       // Skip this method and reposition file
3380       set_write_position(entry_position);
3381     }
3382     return nullptr;
3383   }
3384 
3385   // Write Debug info
3386   if (!write_debug_info(recorder)) {
3387     return nullptr;
3388   }
3389   // Write Dependencies
3390   int dependencies_size = (int)dependencies->size_in_bytes();
3391   n = write_bytes(&dependencies_size, sizeof(int));
3392   if (n != sizeof(int)) {
3393     return nullptr;
3394   }
3395   if (!align_write()) {
3396     return nullptr;
3397   }
3398   n = write_bytes(dependencies->content_bytes(), dependencies_size);
3399   if (n != (uint)dependencies_size) {
3400     return nullptr;
3401   }
3402 
3403   // Write oop maps
3404   if (!write_oop_maps(oop_maps)) {
3405     return nullptr;
3406   }
3407 
3408   // Write exception handles
3409   int exc_table_length = handler_table->length();
3410   n = write_bytes(&exc_table_length, sizeof(int));
3411   if (n != sizeof(int)) {
3412     return nullptr;
3413   }
3414   uint exc_table_size = handler_table->size_in_bytes();
3415   n = write_bytes(handler_table->table(), exc_table_size);
3416   if (n != exc_table_size) {
3417     return nullptr;
3418   }
3419 
3420   // Write null check table
3421   int nul_chk_length = nul_chk_table->len();
3422   n = write_bytes(&nul_chk_length, sizeof(int));
3423   if (n != sizeof(int)) {
3424     return nullptr;
3425   }
3426   uint nul_chk_size = nul_chk_table->size_in_bytes();
3427   n = write_bytes(nul_chk_table->data(), nul_chk_size);
3428   if (n != nul_chk_size) {
3429     return nullptr;
3430   }
3431 
3432   // Write code section
3433   if (!align_write()) {
3434     return nullptr;
3435   }
3436   uint code_size = 0;
3437   if (!write_code(buffer, code_size)) {
3438     return nullptr;
3439   }
3440   // Write relocInfo array
3441   uint reloc_offset = _write_position - entry_position;
3442   uint reloc_size = 0;
3443   if (!write_relocations(buffer, reloc_size)) {
3444     if (lookup_failed() && !failed()) {
3445       // Skip this method and reposition file
3446       set_write_position(entry_position);
3447     }
3448     return nullptr;
3449   }
3450   uint entry_size = _write_position - entry_position;
3451 
3452   SCCEntry* entry = new (this) SCCEntry(entry_position, entry_size, name_offset, name_size,
3453                                         code_offset, code_size, reloc_offset, reloc_size,
3454                                         SCCEntry::Code, hash, (uint)comp_level, (uint)comp_id, decomp,
3455                                         has_clinit_barriers, _for_preload, ignore_decompile);
3456   if (method_in_cds) {
3457     entry->set_method(m);
3458   }
3459 #ifdef ASSERT
3460   if (has_clinit_barriers || _for_preload) {
3461     assert(for_preload, "sanity");
3462     assert(entry->method() != nullptr, "sanity");
3463   }
3464 #endif
3465   {
3466     ResourceMark rm;
3467     log_info(scc, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
3468                            comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (_for_preload ? " (for preload)" : ""));
3469   }
3470   if (VerifyCachedCode) {
3471     return nullptr;
3472   }
3473   return entry;
3474 }
3475 
3476 static void print_helper1(outputStream* st, const char* name, int count) {
3477   if (count > 0) {
3478     st->print(" %s=%d", name, count);
3479   }
3480 }
3481 
3482 void SCCache::print_statistics_on(outputStream* st) {
3483   SCCache* cache = open_for_read();
3484   if (cache != nullptr) {
3485     ReadingMark rdmk;
3486     if (rdmk.failed()) {
3487       // Cache is closed, cannot touch anything.
3488       return;
3489     }
3490 
3491     uint count = cache->_load_header->entries_count();
3492     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3493     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3494 
3495     AOTCodeStats stats;
3496     for (uint i = 0; i < count; i++) {
3497       stats.collect_all_stats(&load_entries[i]);
3498     }
3499 
3500     for (uint kind = SCCEntry::None; kind < SCCEntry::Kind_count; kind++) {
3501       if (stats.entry_count(kind) > 0) {
3502         st->print("  %s:", sccentry_kind_name[kind]);
3503         print_helper1(st, "total", stats.entry_count(kind));
3504         print_helper1(st, "loaded", stats.entry_loaded_count(kind));
3505         print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
3506         print_helper1(st, "failed", stats.entry_load_failed_count(kind));
3507         st->cr();
3508       }
3509       if (kind == SCCEntry::Code) {
3510         for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3511           if (stats.nmethod_count(lvl) > 0) {
3512             st->print("    SC T%d", lvl);
3513             print_helper1(st, "total", stats.nmethod_count(lvl));
3514             print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
3515             print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
3516             print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
3517             if (lvl == AOTCompLevel_count-1) {
3518               print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
3519             }
3520             st->cr();
3521           }
3522         }
3523       }
3524     }
3525   } else {
3526     st->print_cr("failed to map code cache");
3527   }
3528 }
3529 
3530 void SCCache::print_on(outputStream* st) {
3531   SCCache* cache = open_for_read();
3532   if (cache != nullptr) {
3533     ReadingMark rdmk;
3534     if (rdmk.failed()) {
3535       // Cache is closed, cannot touch anything.
3536       return;
3537     }
3538 
3539     uint count = cache->_load_header->entries_count();
3540     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3541     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3542 
3543     for (uint i = 0; i < count; i++) {
3544       int index = search_entries[2*i + 1];
3545       SCCEntry* entry = &(load_entries[index]);
3546 
3547       st->print_cr("%4u: %4u: K%u L%u offset=%u decompile=%u size=%u code_size=%u%s%s%s%s",
3548                 i, index, entry->kind(), entry->comp_level(), entry->offset(),
3549                 entry->decompile(), entry->size(), entry->code_size(),
3550                 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
3551                 entry->for_preload()         ? " for_preload"         : "",
3552                 entry->is_loaded()           ? " loaded"              : "",
3553                 entry->not_entrant()         ? " not_entrant"         : "");
3554       st->print_raw("         ");
3555       SCCReader reader(cache, entry, nullptr);
3556       reader.print_on(st);
3557     }
3558   } else {
3559     st->print_cr("failed to map code cache");
3560   }
3561 }
3562 
3563 void SCCache::print_unused_entries_on(outputStream* st) {
3564   LogStreamHandle(Info, scc, init) info;
3565   if (info.is_enabled()) {
3566     SCCache::iterate([&](SCCEntry* entry) {
3567       if (!entry->is_loaded()) {
3568         MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
3569         if (mtd != nullptr) {
3570           if (mtd->has_holder()) {
3571             if (mtd->holder()->method_holder()->is_initialized()) {
3572               ResourceMark rm;
3573               mtd->iterate_compiles([&](CompileTrainingData* ctd) {
3574                 if ((uint)ctd->level() == entry->comp_level()) {
3575                   if (ctd->init_deps_left() == 0) {
3576                     nmethod* nm = mtd->holder()->code();
3577                     if (nm == nullptr) {
3578                       if (mtd->holder()->queued_for_compilation()) {
3579                         return; // scheduled for compilation
3580                       }
3581                     } else if ((uint)nm->comp_level() >= entry->comp_level()) {
3582                       return; // already online compiled and superseded by a more optimal method
3583                     }
3584                     info.print("SCC entry not loaded: ");
3585                     ctd->print_on(&info);
3586                     info.cr();
3587                   }
3588                 }
3589               });
3590             } else {
3591               // not yet initialized
3592             }
3593           } else {
3594             info.print("SCC entry doesn't have a holder: ");
3595             mtd->print_on(&info);
3596             info.cr();
3597           }
3598         }
3599       }
3600     });
3601   }
3602 }
3603 
3604 void SCCReader::print_on(outputStream* st) {
3605   uint entry_position = _entry->offset();
3606   set_read_position(entry_position);
3607 
3608   // Read name
3609   uint name_offset = entry_position + _entry->name_offset();
3610   uint name_size = _entry->name_size(); // Includes '/0'
3611   const char* name = addr(name_offset);
3612 
3613   st->print_cr("  name: %s", name);
3614 }
3615 
3616 // address table ids for generated routines, external addresses and C
3617 // string addresses are partitioned into positive integer ranges
3618 // defined by the following positive base and max values
3619 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
3620 //      [_stubs_base, _stubs_base + _stubs_max -1],
3621 //      ...
3622 //      [_c_str_base, _c_str_base + _c_str_max -1],
3623 #define _extrs_max 80
3624 #define _stubs_max 120
3625 #define _all_blobs_max 100
3626 #define _blobs_max 24
3627 #define _C2_blobs_max 25
3628 #define _C1_blobs_max (_all_blobs_max - _blobs_max - _C2_blobs_max)
3629 #define _all_max 300
3630 
3631 #define _c_str_max MAX_STR_COUNT
3632 #define _extrs_base 0
3633 #define _stubs_base (_extrs_base + _extrs_max)
3634 #define _blobs_base (_stubs_base + _stubs_max)
3635 #define _C1_blobs_base (_blobs_base + _blobs_max)
3636 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
3637 #if (_C2_blobs_base >= _all_max)
3638 #error SCAddress table ranges need adjusting
3639 #endif
3640 #define _c_str_base _all_max
3641 
3642 #define SET_ADDRESS(type, addr)                           \
3643   {                                                       \
3644     type##_addr[type##_length++] = (address) (addr);      \
3645     assert(type##_length <= type##_max, "increase size"); \
3646   }
3647 
3648 static bool initializing_extrs = false;
3649 void SCAddressTable::init_extrs() {
3650   if (_extrs_complete || initializing_extrs) return; // Done already
3651   initializing_extrs = true;
3652   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3653 
3654   _extrs_length = 0;
3655   _stubs_length = 0;
3656 
3657   // Runtime methods
3658 #ifdef COMPILER2
3659   SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3660 #endif
3661 #ifdef COMPILER1
3662   SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3663   SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3664 #endif
3665 
3666   SET_ADDRESS(_extrs, CompressedOops::base_addr());
3667 #if INCLUDE_G1GC
3668   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3669   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3670 #endif
3671 
3672 #if INCLUDE_SHENANDOAHGC
3673   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3674   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3675   SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3676   SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3677   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3678   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3679   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3680   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3681   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3682   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3683 #endif
3684   SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3685 
3686   SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3687   SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3688   SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3689   SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3690 
3691   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3692   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3693   SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3694   SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3695   SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3696   SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3697 
3698   SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3699   SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3700 #if defined(AMD64) && !defined(ZERO)
3701   SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3702   SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3703 #endif // AMD64
3704   SET_ADDRESS(_extrs, SharedRuntime::d2f);
3705   SET_ADDRESS(_extrs, SharedRuntime::d2i);
3706   SET_ADDRESS(_extrs, SharedRuntime::d2l);
3707   SET_ADDRESS(_extrs, SharedRuntime::dcos);
3708   SET_ADDRESS(_extrs, SharedRuntime::dexp);
3709   SET_ADDRESS(_extrs, SharedRuntime::dlog);
3710   SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3711   SET_ADDRESS(_extrs, SharedRuntime::dpow);
3712   SET_ADDRESS(_extrs, SharedRuntime::dsin);
3713   SET_ADDRESS(_extrs, SharedRuntime::dtan);
3714   SET_ADDRESS(_extrs, SharedRuntime::f2i);
3715   SET_ADDRESS(_extrs, SharedRuntime::f2l);
3716 #ifndef ZERO
3717   SET_ADDRESS(_extrs, SharedRuntime::drem);
3718   SET_ADDRESS(_extrs, SharedRuntime::frem);
3719 #endif
3720   SET_ADDRESS(_extrs, SharedRuntime::l2d);
3721   SET_ADDRESS(_extrs, SharedRuntime::l2f);
3722   SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3723   SET_ADDRESS(_extrs, SharedRuntime::lmul);
3724   SET_ADDRESS(_extrs, SharedRuntime::lrem);
3725 #if INCLUDE_JVMTI
3726   SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3727 #endif /* INCLUDE_JVMTI */
3728   BarrierSet* bs = BarrierSet::barrier_set();
3729   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3730     SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3731   }
3732   SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3733   SET_ADDRESS(_extrs, Thread::current);
3734 
3735   SET_ADDRESS(_extrs, os::javaTimeMillis);
3736   SET_ADDRESS(_extrs, os::javaTimeNanos);
3737 
3738 #if INCLUDE_JVMTI
3739   SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3740 #endif /* INCLUDE_JVMTI */
3741   SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3742 #ifndef PRODUCT
3743   SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3744   SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3745 #endif
3746 
3747 #ifndef ZERO
3748 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3749   SET_ADDRESS(_extrs, MacroAssembler::debug64);
3750 #endif
3751 #if defined(AMD64)
3752   SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3753 #endif
3754 #endif
3755 
3756 #ifdef COMPILER1
3757 #ifdef X86
3758   SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3759   SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3760   SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3761   SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3762 #endif
3763 #endif
3764 
3765   // addresses of fields in AOT runtime constants area
3766   address* p = AOTRuntimeConstants::field_addresses_list();
3767   while (*p != nullptr) {
3768     SET_ADDRESS(_extrs, *p++);
3769   }
3770 
3771   _extrs_complete = true;
3772   log_info(scc,init)("External addresses recorded");
3773 }
3774 
3775 static bool initializing_early_stubs = false;
3776 void SCAddressTable::init_early_stubs() {
3777   if (_complete || initializing_early_stubs) return; // Done already
3778   initializing_early_stubs = true;
3779   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3780   _stubs_length = 0;
3781   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3782   _early_stubs_complete = true;
3783   log_info(scc,init)("early stubs recorded");
3784 }
3785 
3786 static bool initializing_shared_blobs = false;
3787 void SCAddressTable::init_shared_blobs() {
3788   if (_complete || initializing_shared_blobs) return; // Done already
3789   initializing_shared_blobs = true;
3790   _blobs_addr = NEW_C_HEAP_ARRAY(address, _all_blobs_max, mtCode);
3791 
3792   // Divide _blobs_addr array to chunks because they could be initialized in parrallel
3793   _C1_blobs_addr = _blobs_addr + _blobs_max;// C1 blobs addresses stored after shared blobs
3794   _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3795 
3796   _blobs_length = 0;       // for shared blobs
3797   _C1_blobs_length = 0;
3798   _C2_blobs_length = 0;
3799 
3800   // Blobs
3801   SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub());
3802   SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub());
3803   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3804   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3805   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_static_call_stub());
3806   SET_ADDRESS(_blobs, SharedRuntime::deopt_blob()->entry_point());
3807   SET_ADDRESS(_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3808   SET_ADDRESS(_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3809 #ifdef COMPILER2
3810   SET_ADDRESS(_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3811 #endif
3812 
3813   assert(_blobs_length <= _blobs_max, "increase _blobs_max to %d", _blobs_length);
3814   log_info(scc,init)("Early shared blobs recorded");
3815 }
3816 
3817 static bool initializing_stubs = false;
3818 void SCAddressTable::init_stubs() {
3819   if (_complete || initializing_stubs) return; // Done already
3820   initializing_stubs = true;
3821   // final blobs
3822   SET_ADDRESS(_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3823   SET_ADDRESS(_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3824   SET_ADDRESS(_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3825   SET_ADDRESS(_blobs, SharedRuntime::throw_StackOverflowError_entry());
3826   SET_ADDRESS(_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3827 
3828   assert(_blobs_length <= _blobs_max, "increase _blobs_max to %d", _blobs_length);
3829 
3830   _shared_blobs_complete = true;
3831   log_info(scc,init)("All shared blobs recorded");
3832 
3833   // Stubs
3834   SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3835 /*
3836   SET_ADDRESS(_stubs, StubRoutines::throw_AbstractMethodError_entry());
3837   SET_ADDRESS(_stubs, StubRoutines::throw_IncompatibleClassChangeError_entry());
3838   SET_ADDRESS(_stubs, StubRoutines::throw_NullPointerException_at_call_entry());
3839   SET_ADDRESS(_stubs, StubRoutines::throw_StackOverflowError_entry());
3840   SET_ADDRESS(_stubs, StubRoutines::throw_delayed_StackOverflowError_entry());
3841 */
3842   SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3843   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3844   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3845   SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3846   SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3847 
3848   SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3849   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3850   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3851 
3852   JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3853 
3854 
3855   SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3856   SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3857   SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3858   SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3859   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3860   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3861 
3862   SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3863   SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3864   SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3865   SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3866   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3867   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3868 
3869   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3870   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3871   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3872   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3873   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3874   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3875 
3876   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3877   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3878   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3879   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3880   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3881   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3882 
3883   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3884   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3885 
3886   SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3887   SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3888 
3889   SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3890   SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3891   SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3892   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3893   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3894   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3895 
3896   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3897   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3898 
3899   SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3900   SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3901   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3902   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3903   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3904   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3905   SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3906   SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3907   SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3908   SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3909   SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3910   SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3911   SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3912   SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3913   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3914   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3915   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3916   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3917   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3918   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3919   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3920   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3921 
3922   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3923 
3924   SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3925   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3926   SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3927 
3928   SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3929   SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3930   SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3931   SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3932   SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3933   SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3934   SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3935   SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3936 
3937   SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3938 
3939   SET_ADDRESS(_stubs, StubRoutines::dexp());
3940   SET_ADDRESS(_stubs, StubRoutines::dlog());
3941   SET_ADDRESS(_stubs, StubRoutines::dlog10());
3942   SET_ADDRESS(_stubs, StubRoutines::dpow());
3943   SET_ADDRESS(_stubs, StubRoutines::dsin());
3944   SET_ADDRESS(_stubs, StubRoutines::dcos());
3945   SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3946   SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3947   SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3948   SET_ADDRESS(_stubs, StubRoutines::dtan());
3949 
3950   SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3951   SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3952 
3953 #if defined(AMD64) && !defined(ZERO)
3954   SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3955   SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3956   SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3957   SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3958   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3959   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3960   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3961   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3962   SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3963   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3964   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3965   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3966   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3967   // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3968   // See C2_MacroAssembler::load_iota_indices().
3969   for (int i = 0; i < 6; i++) {
3970     SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3971   }
3972 #endif
3973 #if defined(AARCH64) && !defined(ZERO)
3974   SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3975   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3976   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3977   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3978   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3979   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3980   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3981   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3982   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3983   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3984   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3985   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3986   SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3987 
3988   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3989   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3990   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3991   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3992   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3993 #endif
3994 
3995   _complete = true;
3996   log_info(scc,init)("Stubs recorded");
3997 }
3998 
3999 void SCAddressTable::init_opto() {
4000 #ifdef COMPILER2
4001   // OptoRuntime Blobs
4002   SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
4003   SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
4004   SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
4005   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
4006   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
4007   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
4008   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
4009   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
4010   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
4011   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
4012   SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
4013   SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
4014   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
4015   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
4016   SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
4017   SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
4018   SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
4019   SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
4020 #if INCLUDE_JVMTI
4021   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
4022   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
4023   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
4024   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
4025 #endif /* INCLUDE_JVMTI */
4026 #endif
4027 
4028   assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
4029   _opto_complete = true;
4030   log_info(scc,init)("OptoRuntime Blobs recorded");
4031 }
4032 
4033 void SCAddressTable::init_c1() {
4034 #ifdef COMPILER1
4035   // Runtime1 Blobs
4036   for (int i = 0; i < (int)(C1StubId::NUM_STUBIDS); i++) {
4037     C1StubId id = (C1StubId)i;
4038     if (Runtime1::blob_for(id) == nullptr) {
4039       log_info(scc, init)("C1 blob %s is missing", Runtime1::name_for(id));
4040       continue;
4041     }
4042     if (Runtime1::entry_for(id) == nullptr) {
4043       log_info(scc, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
4044       continue;
4045     }
4046     address entry = Runtime1::entry_for(id);
4047     SET_ADDRESS(_C1_blobs, entry);
4048   }
4049 #if INCLUDE_G1GC
4050   if (UseG1GC) {
4051     G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
4052     address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
4053     SET_ADDRESS(_C1_blobs, entry);
4054     entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
4055     SET_ADDRESS(_C1_blobs, entry);
4056   }
4057 #endif // INCLUDE_G1GC
4058 #if INCLUDE_ZGC
4059   if (UseZGC) {
4060     ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
4061     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
4062     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
4063     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
4064     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
4065   }
4066 #endif // INCLUDE_ZGC
4067 #if INCLUDE_SHENANDOAHGC
4068   if (UseShenandoahGC) {
4069     ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
4070     SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
4071     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
4072     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
4073     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
4074     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
4075   }
4076 #endif // INCLUDE_SHENANDOAHGC
4077 #endif // COMPILER1
4078 
4079   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
4080   _c1_complete = true;
4081   log_info(scc,init)("Runtime1 Blobs recorded");
4082 }
4083 
4084 #undef SET_ADDRESS
4085 
4086 SCAddressTable::~SCAddressTable() {
4087   if (_extrs_addr != nullptr) {
4088     FREE_C_HEAP_ARRAY(address, _extrs_addr);
4089   }
4090   if (_stubs_addr != nullptr) {
4091     FREE_C_HEAP_ARRAY(address, _stubs_addr);
4092   }
4093   if (_blobs_addr != nullptr) {
4094     FREE_C_HEAP_ARRAY(address, _blobs_addr);
4095   }
4096 }
4097 
4098 #ifdef PRODUCT
4099 #define MAX_STR_COUNT 200
4100 #else
4101 #define MAX_STR_COUNT 500
4102 #endif
4103 static const char* _C_strings[MAX_STR_COUNT] = {nullptr};
4104 static int _C_strings_count = 0;
4105 static int _C_strings_s[MAX_STR_COUNT] = {0};
4106 static int _C_strings_id[MAX_STR_COUNT] = {0};
4107 static int _C_strings_len[MAX_STR_COUNT] = {0};
4108 static int _C_strings_hash[MAX_STR_COUNT] = {0};
4109 static int _C_strings_used = 0;
4110 
4111 void SCCache::load_strings() {
4112   uint strings_count  = _load_header->strings_count();
4113   if (strings_count == 0) {
4114     return;
4115   }
4116   uint strings_offset = _load_header->strings_offset();
4117   uint strings_size   = _load_header->entries_offset() - strings_offset;
4118   uint data_size = (uint)(strings_count * sizeof(uint));
4119   uint* sizes = (uint*)addr(strings_offset);
4120   uint* hashs = (uint*)addr(strings_offset + data_size);
4121   strings_size -= 2 * data_size;
4122   // We have to keep cached strings longer than _cache buffer
4123   // because they are refernced from compiled code which may
4124   // still be executed on VM exit after _cache is freed.
4125   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
4126   memcpy(p, addr(strings_offset + 2 * data_size), strings_size);
4127   _C_strings_buf = p;
4128   assert(strings_count <= MAX_STR_COUNT, "sanity");
4129   for (uint i = 0; i < strings_count; i++) {
4130     _C_strings[i] = p;
4131     uint len = sizes[i];
4132     _C_strings_s[i] = i;
4133     _C_strings_id[i] = i;
4134     _C_strings_len[i] = len;
4135     _C_strings_hash[i] = hashs[i];
4136     p += len;
4137   }
4138   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
4139   _C_strings_count = strings_count;
4140   _C_strings_used  = strings_count;
4141   log_info(scc, init)("Load %d C strings at offset %d from AOT Code Cache", _C_strings_count, strings_offset);
4142 }
4143 
4144 int SCCache::store_strings() {
4145   uint offset = _write_position;
4146   uint length = 0;
4147   if (_C_strings_used > 0) {
4148     // Write sizes first
4149     for (int i = 0; i < _C_strings_used; i++) {
4150       uint len = _C_strings_len[i] + 1; // Include 0
4151       length += len;
4152       assert(len < 1000, "big string: %s", _C_strings[i]);
4153       uint n = write_bytes(&len, sizeof(uint));
4154       if (n != sizeof(uint)) {
4155         return -1;
4156       }
4157     }
4158     // Write hashs
4159     for (int i = 0; i < _C_strings_used; i++) {
4160       uint n = write_bytes(&(_C_strings_hash[i]), sizeof(uint));
4161       if (n != sizeof(uint)) {
4162         return -1;
4163       }
4164     }
4165     for (int i = 0; i < _C_strings_used; i++) {
4166       uint len = _C_strings_len[i] + 1; // Include 0
4167       uint n = write_bytes(_C_strings[_C_strings_s[i]], len);
4168       if (n != len) {
4169         return -1;
4170       }
4171     }
4172     log_info(scc, exit)("Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
4173                         _C_strings_used, length, offset);
4174   }
4175   return _C_strings_used;
4176 }
4177 
4178 void SCCache::add_new_C_string(const char* str) {
4179   assert(for_write(), "only when storing code");
4180   _table->add_C_string(str);
4181 }
4182 
4183 void SCAddressTable::add_C_string(const char* str) {
4184   if (str != nullptr && _extrs_complete) {
4185     // Check previous strings address
4186     for (int i = 0; i < _C_strings_count; i++) {
4187       if (_C_strings[i] == str) {
4188         return; // Found existing one
4189       }
4190     }
4191     // Add new one
4192     if (_C_strings_count < MAX_STR_COUNT) {
4193       log_trace(scc)("add_C_string: [%d] " INTPTR_FORMAT " %s", _C_strings_count, p2i(str), str);
4194       _C_strings_id[_C_strings_count] = -1; // Init
4195       _C_strings[_C_strings_count++] = str;
4196     } else {
4197       if (Thread::current()->is_Compiler_thread()) {
4198         CompileTask* task = ciEnv::current()->task();
4199         log_info(scc)("%d (L%d): Number of C strings > max %d %s",
4200                       task->compile_id(), task->comp_level(), MAX_STR_COUNT, str);
4201       }
4202     }
4203   }
4204 }
4205 
4206 int SCAddressTable::id_for_C_string(address str) {
4207   for (int i = 0; i < _C_strings_count; i++) {
4208     if (_C_strings[i] == (const char*)str) { // found
4209       int id = _C_strings_id[i];
4210       if (id >= 0) {
4211         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
4212         return id; // Found recorded
4213       }
4214       // Search for the same string content
4215       int len = (int)strlen((const char*)str);
4216       int hash = java_lang_String::hash_code((const jbyte*)str, len);
4217       for (int j = 0; j < _C_strings_used; j++) {
4218         if ((_C_strings_len[j] == len) && (_C_strings_hash[j] == hash)) {
4219           _C_strings_id[i] = j; // Found match
4220           return j;
4221         }
4222       }
4223       // Not found in recorded, add new
4224       id = _C_strings_used++;
4225       _C_strings_s[id] = i;
4226       _C_strings_id[i] = id;
4227       _C_strings_len[id] = len;
4228       _C_strings_hash[id] = hash;
4229       return id;
4230     }
4231   }
4232   return -1;
4233 }
4234 
4235 address SCAddressTable::address_for_C_string(int idx) {
4236   assert(idx < _C_strings_count, "sanity");
4237   return (address)_C_strings[idx];
4238 }
4239 
4240 int search_address(address addr, address* table, uint length) {
4241   for (int i = 0; i < (int)length; i++) {
4242     if (table[i] == addr) {
4243       return i;
4244     }
4245   }
4246   return -1;
4247 }
4248 
4249 address SCAddressTable::address_for_id(int idx) {
4250   if (!_extrs_complete) {
4251     fatal("SCA extrs table is not complete");
4252   }
4253   if (idx == -1) {
4254     return (address)-1;
4255   }
4256   uint id = (uint)idx;
4257   // special case for symbols based relative to os::init
4258   if (id > (_c_str_base + _c_str_max)) {
4259     return (address)os::init + idx;
4260   }
4261   if (idx < 0) {
4262     fatal("Incorrect id %d for SCA table", id);
4263   }
4264   // no need to compare unsigned id against 0
4265   if (/* id >= _extrs_base && */ id < _extrs_length) {
4266     return _extrs_addr[id - _extrs_base];
4267   }
4268   if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
4269     return _stubs_addr[id - _stubs_base];
4270   }
4271   if (id >= _blobs_base && id < _blobs_base + _blobs_length) {
4272     return _blobs_addr[id - _blobs_base];
4273   }
4274   if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
4275     return _C1_blobs_addr[id - _C1_blobs_base];
4276   }
4277   if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
4278     return _C2_blobs_addr[id - _C2_blobs_base];
4279   }
4280   if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
4281     return address_for_C_string(id - _c_str_base);
4282   }
4283   fatal("Incorrect id %d for SCA table", id);
4284   return nullptr;
4285 }
4286 
4287 int SCAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer) {
4288   int id = -1;
4289   if (addr == (address)-1) { // Static call stub has jump to itself
4290     return id;
4291   }
4292   if (!_extrs_complete) {
4293     fatal("SCA table is not complete");
4294   }
4295   // Seach for C string
4296   id = id_for_C_string(addr);
4297   if (id >=0) {
4298     return id + _c_str_base;
4299   }
4300   if (StubRoutines::contains(addr)) {
4301     // Search in stubs
4302     id = search_address(addr, _stubs_addr, _stubs_length);
4303     if (id < 0) {
4304       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
4305       if (desc == nullptr) {
4306         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
4307       }
4308       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
4309       fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in SCA table", p2i(addr), sub_name);
4310     } else {
4311       return _stubs_base + id;
4312     }
4313   } else {
4314     CodeBlob* cb = CodeCache::find_blob(addr);
4315     if (cb != nullptr) {
4316       int id_base = _blobs_base;
4317       // Search in code blobs
4318        id = search_address(addr, _blobs_addr, _blobs_length);
4319       if (id == -1) {
4320         id_base = _C1_blobs_base;
4321         // search C1 blobs
4322         id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
4323       }
4324       if (id == -1) {
4325         id_base = _C2_blobs_base;
4326         // search C2 blobs
4327         id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
4328       }
4329       if (id < 0) {
4330         fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in SCA table", p2i(addr), cb->name());
4331       } else {
4332         return id_base + id;
4333       }
4334     } else {
4335       // Search in runtime functions
4336       id = search_address(addr, _extrs_addr, _extrs_length);
4337       if (id < 0) {
4338         ResourceMark rm;
4339         const int buflen = 1024;
4340         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
4341         int offset = 0;
4342         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
4343           if (offset > 0) {
4344             // Could be address of C string
4345             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
4346             CompileTask* task = ciEnv::current()->task();
4347             uint compile_id = 0;
4348             uint comp_level =0;
4349             if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
4350               compile_id = task->compile_id();
4351               comp_level = task->comp_level();
4352             }
4353             log_info(scc)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in SCA table",
4354                           compile_id, comp_level, p2i(addr), dist, (const char*)addr);
4355             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
4356             return dist;
4357           }
4358           fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in SCA table", p2i(addr), func_name, offset);
4359         } else {
4360           os::print_location(tty, p2i(addr), true);
4361           reloc.print_current_on(tty);
4362 #ifndef PRODUCT
4363           buffer->print_on(tty);
4364           buffer->decode();
4365 #endif // !PRODUCT
4366           fatal("Address " INTPTR_FORMAT " for <unknown> is missing in SCA table", p2i(addr));
4367         }
4368       } else {
4369         return _extrs_base + id;
4370       }
4371     }
4372   }
4373   return id;
4374 }
4375 
4376 #undef _extrs_max
4377 #undef _stubs_max
4378 #undef _all_blobs_max
4379 #undef _blobs_max
4380 #undef _C1_blobs_max
4381 #undef _C2_blobs_max
4382 #undef _extrs_base
4383 #undef _stubs_base
4384 #undef _blobs_base
4385 #undef _C1_blobs_base
4386 #undef _C2_blobs_base
4387 #undef _c_str_base
4388 
4389 void AOTRuntimeConstants::initialize_from_runtime() {
4390   BarrierSet* bs = BarrierSet::barrier_set();
4391   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4392     CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
4393     _aot_runtime_constants._grain_shift = ctbs->grain_shift();
4394     _aot_runtime_constants._card_shift = ctbs->card_shift();
4395   }
4396 }
4397 
4398 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
4399 
4400 address AOTRuntimeConstants::_field_addresses_list[] = {
4401   grain_shift_address(),
4402   card_shift_address(),
4403   nullptr
4404 };
4405 
4406 
4407 void SCCache::wait_for_no_nmethod_readers() {
4408   while (true) {
4409     int cur = Atomic::load(&_nmethod_readers);
4410     int upd = -(cur + 1);
4411     if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
4412       // Success, no new readers should appear.
4413       break;
4414     }
4415   }
4416 
4417   // Now wait for all readers to leave.
4418   SpinYield w;
4419   while (Atomic::load(&_nmethod_readers) != -1) {
4420     w.wait();
4421   }
4422 }
4423 
4424 SCCache::ReadingMark::ReadingMark() {
4425   while (true) {
4426     int cur = Atomic::load(&_nmethod_readers);
4427     if (cur < 0) {
4428       // Cache is already closed, cannot proceed.
4429       _failed = true;
4430       return;
4431     }
4432     if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4433       // Successfully recorded ourselves as entered.
4434       _failed = false;
4435       return;
4436     }
4437   }
4438 }
4439 
4440 SCCache::ReadingMark::~ReadingMark() {
4441   if (_failed) {
4442     return;
4443   }
4444   while (true) {
4445     int cur = Atomic::load(&_nmethod_readers);
4446     if (cur > 0) {
4447       // Cache is open, we are counting down towards 0.
4448       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
4449         return;
4450       }
4451     } else {
4452       // Cache is closed, we are counting up towards -1.
4453       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4454         return;
4455       }
4456     }
4457   }
4458 }