1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/cdsAccess.hpp"
  28 #include "cds/cdsConfig.hpp"
  29 #include "cds/metaspaceShared.hpp"
  30 #include "ci/ciConstant.hpp"
  31 #include "ci/ciEnv.hpp"
  32 #include "ci/ciField.hpp"
  33 #include "ci/ciMethod.hpp"
  34 #include "ci/ciMethodData.hpp"
  35 #include "ci/ciObject.hpp"
  36 #include "ci/ciUtilities.inline.hpp"
  37 #include "classfile/javaAssertions.hpp"
  38 #include "classfile/stringTable.hpp"
  39 #include "classfile/symbolTable.hpp"
  40 #include "classfile/systemDictionary.hpp"
  41 #include "classfile/vmClasses.hpp"
  42 #include "classfile/vmIntrinsics.hpp"
  43 #include "code/codeBlob.hpp"
  44 #include "code/codeCache.hpp"
  45 #include "code/oopRecorder.inline.hpp"
  46 #include "code/SCCache.hpp"
  47 #include "compiler/abstractCompiler.hpp"
  48 #include "compiler/compilationPolicy.hpp"
  49 #include "compiler/compileBroker.hpp"
  50 #include "compiler/compileTask.hpp"
  51 #include "gc/g1/g1BarrierSetRuntime.hpp"
  52 #include "gc/shared/gcConfig.hpp"
  53 #include "logging/log.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/klass.inline.hpp"
  56 #include "oops/method.inline.hpp"
  57 #include "oops/trainingData.hpp"
  58 #include "prims/jvmtiThreadState.hpp"
  59 #include "runtime/atomic.hpp"
  60 #include "runtime/flags/flagSetting.hpp"
  61 #include "runtime/globals_extension.hpp"
  62 #include "runtime/handles.inline.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/jniHandles.inline.hpp"
  65 #include "runtime/os.inline.hpp"
  66 #include "runtime/sharedRuntime.hpp"
  67 #include "runtime/stubCodeGenerator.hpp"
  68 #include "runtime/stubRoutines.hpp"
  69 #include "runtime/timerTrace.hpp"
  70 #include "runtime/threadIdentifier.hpp"
  71 #include "utilities/ostream.hpp"
  72 #include "utilities/spinYield.hpp"
  73 #ifdef COMPILER1
  74 #include "c1/c1_Runtime1.hpp"
  75 #include "c1/c1_LIRAssembler.hpp"
  76 #include "gc/shared/c1/barrierSetC1.hpp"
  77 #include "gc/g1/c1/g1BarrierSetC1.hpp"
  78 #if INCLUDE_SHENANDOAHGC
  79 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  80 #endif
  81 #include "gc/z/c1/zBarrierSetC1.hpp"
  82 #endif
  83 #ifdef COMPILER2
  84 #include "opto/runtime.hpp"
  85 #endif
  86 #if INCLUDE_JVMCI
  87 #include "jvmci/jvmci.hpp"
  88 #endif
  89 #if INCLUDE_SHENANDOAHGC
  90 #include "gc/shenandoah/shenandoahRuntime.hpp"
  91 #endif
  92 
  93 #include <sys/stat.h>
  94 #include <errno.h>
  95 
  96 #ifndef O_BINARY       // if defined (Win32) use binary files.
  97 #define O_BINARY 0     // otherwise do nothing.
  98 #endif
  99 
 100 static elapsedTimer _t_totalLoad;
 101 static elapsedTimer _t_totalRegister;
 102 static elapsedTimer _t_totalFind;
 103 static elapsedTimer _t_totalStore;
 104 
 105 SCCache* SCCache::_cache = nullptr;
 106 
 107 static bool enable_timers() {
 108   return CITime || log_is_enabled(Info, init);
 109 }
 110 
 111 static void exit_vm_on_load_failure() {
 112   // Treat SCC warnings as error when RequireSharedSpaces is on.
 113   if (RequireSharedSpaces) {
 114     vm_exit_during_initialization("Unable to used startup cached code.", nullptr);
 115   }
 116 }
 117 
 118 static void exit_vm_on_store_failure() {
 119   // Treat SCC warnings as error when RequireSharedSpaces is on.
 120   if (RequireSharedSpaces) {
 121     tty->print_cr("Unable to create startup cached code.");
 122     // Failure during AOT code caching, we don't want to dump core
 123     vm_abort(false);
 124   }
 125 }
 126 void SCCache::initialize() {
 127   if (LoadCachedCode && !UseSharedSpaces) {
 128     return;
 129   }
 130   if (StoreCachedCode || LoadCachedCode) {
 131     if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
 132       FLAG_SET_DEFAULT(ClassInitBarrierMode, 1);
 133     }
 134   } else if (ClassInitBarrierMode > 0) {
 135     log_info(scc, init)("Set ClassInitBarrierMode to 0 because StoreCachedCode and LoadCachedCode are false.");
 136     FLAG_SET_DEFAULT(ClassInitBarrierMode, 0);
 137   }
 138   if ((LoadCachedCode || StoreCachedCode) && CachedCodeFile != nullptr) {
 139     const int len = (int)strlen(CachedCodeFile);
 140     // cache file path
 141     char* path  = NEW_C_HEAP_ARRAY(char, len+1, mtCode);
 142     memcpy(path, CachedCodeFile, len);
 143     path[len] = '\0';
 144     if (!open_cache(path)) {
 145       exit_vm_on_load_failure();
 146       return;
 147     }
 148     if (StoreCachedCode) {
 149       FLAG_SET_DEFAULT(FoldStableValues, false);
 150       FLAG_SET_DEFAULT(ForceUnreachable, true);
 151     }
 152     FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 153   }
 154 }
 155 
 156 void SCCache::init2() {
 157   if (!is_on()) {
 158     return;
 159   }
 160   // After Universe initialized
 161   BarrierSet* bs = BarrierSet::barrier_set();
 162   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
 163     address byte_map_base = ci_card_table_address_as<address>();
 164     if (is_on_for_write() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
 165       // Bail out since we can't encode card table base address with relocation
 166       log_warning(scc, init)("Can't create Startup Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
 167       close();
 168       exit_vm_on_load_failure();
 169     }
 170   }
 171   // initialize aot runtime constants as appropriate to this runtime
 172   AOTRuntimeConstants::initialize_from_runtime();
 173 
 174   if (!verify_vm_config()) {
 175     close();
 176     exit_vm_on_load_failure();
 177   }
 178 }
 179 
 180 void SCCache::print_timers_on(outputStream* st) {
 181   if (LoadCachedCode) {
 182     st->print_cr ("    SC Load Time:         %7.3f s", _t_totalLoad.seconds());
 183     st->print_cr ("      nmethod register:     %7.3f s", _t_totalRegister.seconds());
 184     st->print_cr ("      find cached code:     %7.3f s", _t_totalFind.seconds());
 185   }
 186   if (StoreCachedCode) {
 187     st->print_cr ("    SC Store Time:        %7.3f s", _t_totalStore.seconds());
 188   }
 189 }
 190 
 191 bool SCCache::is_C3_on() {
 192 #if INCLUDE_JVMCI
 193   if (UseJVMCICompiler) {
 194     return (StoreCachedCode || LoadCachedCode) && UseC2asC3;
 195   }
 196 #endif
 197   return false;
 198 }
 199 
 200 bool SCCache::is_code_load_thread_on() {
 201   return UseCodeLoadThread && LoadCachedCode;
 202 }
 203 
 204 bool SCCache::gen_preload_code(ciMethod* m, int entry_bci) {
 205   VM_ENTRY_MARK;
 206   return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
 207          CDSAccess::can_generate_cached_code(m->get_Method());
 208 }
 209 
 210 static void print_helper(nmethod* nm, outputStream* st) {
 211   SCCache::iterate([&](SCCEntry* e) {
 212     if (e->method() == nm->method()) {
 213       ResourceMark rm;
 214       stringStream ss;
 215       ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
 216       if (e->decompile() > 0) {
 217         ss.print("+D%d", e->decompile());
 218       }
 219       ss.print("[%s%s%s]",
 220                (e->is_loaded()   ? "L" : ""),
 221                (e->load_fail()   ? "F" : ""),
 222                (e->not_entrant() ? "I" : ""));
 223       ss.print("#%d", e->comp_id());
 224 
 225       st->print(" %s", ss.freeze());
 226     }
 227   });
 228 }
 229 
 230 void SCCache::close() {
 231   if (is_on()) {
 232     if (SCCache::is_on_for_read()) {
 233       LogStreamHandle(Info, init) log;
 234       if (log.is_enabled()) {
 235         log.print_cr("Startup Code Cache statistics (when closed): ");
 236         SCCache::print_statistics_on(&log);
 237         log.cr();
 238         SCCache::print_timers_on(&log);
 239 
 240         LogStreamHandle(Info, scc, init) log1;
 241         if (log1.is_enabled()) {
 242           SCCache::print_unused_entries_on(&log1);
 243         }
 244 
 245         LogStreamHandle(Info, scc, codecache) info_scc;
 246         if (info_scc.is_enabled()) {
 247           NMethodIterator iter(NMethodIterator::all);
 248           while (iter.next()) {
 249             nmethod* nm = iter.method();
 250             if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
 251               info_scc.print("%5d:%c%c%c%d:", nm->compile_id(),
 252                              (nm->method()->is_shared() ? 'S' : ' '),
 253                              (nm->is_scc() ? 'A' : ' '),
 254                              (nm->preloaded() ? 'P' : ' '),
 255                              nm->comp_level());
 256               print_helper(nm, &info_scc);
 257               info_scc.print(": ");
 258               CompileTask::print(&info_scc, nm, nullptr, true /*short_form*/);
 259 
 260               LogStreamHandle(Debug, scc, codecache) debug_scc;
 261               if (debug_scc.is_enabled()) {
 262                 MethodTrainingData* mtd = MethodTrainingData::lookup_for(nm->method());
 263                 if (mtd != nullptr) {
 264                   mtd->iterate_all_compiles([&](CompileTrainingData* ctd) {
 265                     debug_scc.print("     CTD: "); ctd->print_on(&debug_scc); debug_scc.cr();
 266                   });
 267                 }
 268               }
 269             }
 270           }
 271         }
 272       }
 273     }
 274 
 275     delete _cache; // Free memory
 276     _cache = nullptr;
 277   }
 278 }
 279 
 280 void SCCache::invalidate(SCCEntry* entry) {
 281   // This could be concurent execution
 282   if (entry != nullptr && is_on()) { // Request could come after cache is closed.
 283     _cache->invalidate_entry(entry);
 284   }
 285 }
 286 
 287 bool SCCache::is_loaded(SCCEntry* entry) {
 288   if (is_on() && _cache->cache_buffer() != nullptr) {
 289     return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
 290   }
 291   return false;
 292 }
 293 
 294 void SCCache::preload_code(JavaThread* thread) {
 295   if ((ClassInitBarrierMode == 0) || !is_on_for_read()) {
 296     return;
 297   }
 298   if ((DisableCachedCode & (1 << 3)) != 0) {
 299     return; // no preloaded code (level 5);
 300   }
 301   _cache->preload_startup_code(thread);
 302 }
 303 
 304 SCCEntry* SCCache::find_code_entry(const methodHandle& method, uint comp_level) {
 305   switch (comp_level) {
 306     case CompLevel_simple:
 307       if ((DisableCachedCode & (1 << 0)) != 0) {
 308         return nullptr;
 309       }
 310       break;
 311     case CompLevel_limited_profile:
 312       if ((DisableCachedCode & (1 << 1)) != 0) {
 313         return nullptr;
 314       }
 315       break;
 316     case CompLevel_full_optimization:
 317       if ((DisableCachedCode & (1 << 2)) != 0) {
 318         return nullptr;
 319       }
 320       break;
 321 
 322     default: return nullptr; // Level 1, 2, and 4 only
 323   }
 324   TraceTime t1("SC total find code time", &_t_totalFind, enable_timers(), false);
 325   if (is_on() && _cache->cache_buffer() != nullptr) {
 326     MethodData* md = method->method_data();
 327     uint decomp = (md == nullptr) ? 0 : md->decompile_count();
 328 
 329     ResourceMark rm;
 330     const char* target_name = method->name_and_sig_as_C_string();
 331     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
 332     SCCEntry* entry = _cache->find_entry(SCCEntry::Code, hash, comp_level, decomp);
 333     if (entry == nullptr) {
 334       log_info(scc, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash);
 335 #ifdef ASSERT
 336     } else {
 337       uint name_offset = entry->offset() + entry->name_offset();
 338       uint name_size   = entry->name_size(); // Includes '/0'
 339       const char* name = _cache->cache_buffer() + name_offset;
 340       if (strncmp(target_name, name, name_size) != 0) {
 341         assert(false, "SCA: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
 342       }
 343 #endif
 344     }
 345 
 346     DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
 347     if (directives->IgnorePrecompiledOption) {
 348       LogStreamHandle(Info, scc, compilation) log;
 349       if (log.is_enabled()) {
 350         log.print("Ignore cached code entry on level %d for ", comp_level);
 351         method->print_value_on(&log);
 352       }
 353       return nullptr;
 354     }
 355 
 356     return entry;
 357   }
 358   return nullptr;
 359 }
 360 
 361 void SCCache::add_C_string(const char* str) {
 362   if (is_on_for_write()) {
 363     _cache->add_new_C_string(str);
 364   }
 365 }
 366 
 367 bool SCCache::allow_const_field(ciConstant& value) {
 368   return !is_on() || !StoreCachedCode // Restrict only when we generate cache
 369         // Can not trust primitive too   || !is_reference_type(value.basic_type())
 370         // May disable this too for now  || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
 371         ;
 372 }
 373 
 374 bool SCCache::open_cache(const char* cache_path) {
 375   if (LoadCachedCode) {
 376     log_info(scc)("Trying to load Startup Code Cache '%s'", cache_path);
 377     struct stat st;
 378     if (os::stat(cache_path, &st) != 0) {
 379       log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path);
 380       return false;
 381     } else if ((st.st_mode & S_IFMT) != S_IFREG) {
 382       log_warning(scc, init)("Specified Startup Code Cache is not file '%s'", cache_path);
 383       return false;
 384     }
 385     int fd = os::open(cache_path, O_RDONLY | O_BINARY, 0);
 386     if (fd < 0) {
 387       if (errno == ENOENT) {
 388         log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path);
 389       } else {
 390         log_warning(scc, init)("Failed to open Startup Code Cache file '%s': (%s)", cache_path, os::strerror(errno));
 391       }
 392       return false;
 393     } else {
 394       log_info(scc, init)("Opened for read Startup Code Cache '%s'", cache_path);
 395     }
 396     SCCache* cache = new SCCache(cache_path, fd, (uint)st.st_size);
 397     bool failed = cache->failed();
 398     if (::close(fd) < 0) {
 399       log_warning(scc)("Failed to close for read Startup Code Cache file '%s'", cache_path);
 400       failed = true;
 401     }
 402     if (failed) {
 403       delete cache;
 404       _cache = nullptr;
 405       return false;
 406     }
 407     _cache = cache;
 408   }
 409   if (_cache == nullptr && StoreCachedCode) {
 410     SCCache* cache = new SCCache(cache_path, -1 /* fd */, 0 /* size */);
 411     if (cache->failed()) {
 412       delete cache;
 413       _cache = nullptr;
 414       return false;
 415     }
 416     _cache = cache;
 417   }
 418   return true;
 419 }
 420 
 421 class CachedCodeDirectory {
 422 public:
 423   int _some_number;
 424   InstanceKlass* _some_klass;
 425   size_t _my_data_length;
 426   void* _my_data;
 427 };
 428 
 429 // Skeleton code for including cached code in CDS:
 430 //
 431 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
 432 //     E.g., you can build a hashtable to record what methods have been archived.
 433 //
 434 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
 435 //     allocated using CDSAccess::allocate_from_code_cache().
 436 //
 437 // [3] CachedCodeDirectory must be the very first allocation.
 438 //
 439 // [4] Two kinds of pointer can be stored:
 440 //     - A pointer p that points to metadata. CDSAccess::can_generate_cached_code(p) must return true.
 441 //     - A pointer to a buffer returned by CDSAccess::allocate_from_code_cache().
 442 //       (It's OK to point to an interior location within this buffer).
 443 //     Such pointers must be stored using CDSAccess::set_pointer()
 444 //
 445 // The buffers allocated by CDSAccess::allocate_from_code_cache() are in a contiguous region. At runtime, this
 446 // region is mapped to the beginning of the CodeCache (see _cds_code_space in codeCache.cpp). All the pointers
 447 // in this buffer are relocated as necessary (e.g., to account for the runtime location of the CodeCache).
 448 //
 449 // Example:
 450 //
 451 // # make sure hw.cds doesn't exist, so that it's regenerated (1.5 step training)
 452 // $ rm -f hw.cds; java -Xlog:cds,scc::uptime,tags,pid -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld
 453 //
 454 // # After training is finish, hw.cds should contain a CachedCodeDirectory. You can see the effect of relocation
 455 // # from the [scc] log.
 456 // $ java -Xlog:cds,scc -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld
 457 // [0.016s][info][scc] new workflow: cached code mapped at 0x7fef97ebc000
 458 // [0.016s][info][scc] _cached_code_directory->_some_klass     = 0x800009ca8 (java.lang.String)
 459 // [0.016s][info][scc] _cached_code_directory->_some_number    = 0
 460 // [0.016s][info][scc] _cached_code_directory->_my_data_length = 0
 461 // [0.016s][info][scc] _cached_code_directory->_my_data        = 0x7fef97ebc020 (32 bytes offset from base)
 462 //
 463 // The 1.5 step training may be hard to debug. If you want to run in a debugger, run the above training step
 464 // with an additional "-XX:+CDSManualFinalImage" command-line argument.
 465 
 466 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
 467 static CachedCodeDirectory* _cached_code_directory = nullptr;
 468 
 469 #if INCLUDE_CDS_JAVA_HEAP
 470 void SCCache::new_workflow_start_writing_cache() {
 471   CachedCodeDirectory* dir = (CachedCodeDirectory*)CDSAccess::allocate_from_code_cache(sizeof(CachedCodeDirectory));
 472   _cached_code_directory = dir;
 473 
 474   CDSAccess::set_pointer(&dir->_some_klass, vmClasses::String_klass());
 475 
 476   size_t n = 120;
 477   void* d = (void*)CDSAccess::allocate_from_code_cache(n);
 478   CDSAccess::set_pointer(&dir->_my_data, d);
 479 }
 480 
 481 void SCCache::new_workflow_end_writing_cache() {
 482 
 483 }
 484 
 485 void SCCache::new_workflow_load_cache() {
 486   void* ptr = CodeCache::map_cached_code();
 487   if (ptr != nullptr) {
 488     // At this point:
 489     // - CodeCache::initialize_heaps() has finished.
 490     // - CDS archive is fully mapped ("metadata", "heap" and "cached_code" regions are mapped)
 491     // - All pointers in the mapped CDS regions are relocated.
 492     // - CDSAccess::get_archived_object() works.
 493     ResourceMark rm;
 494     _cached_code_directory = (CachedCodeDirectory*)ptr;
 495     InstanceKlass* k = _cached_code_directory->_some_klass;
 496     log_info(scc)("new workflow: cached code mapped at %p", ptr);
 497     log_info(scc)("_cached_code_directory->_some_klass     = %p (%s)", k, k->external_name());
 498     log_info(scc)("_cached_code_directory->_some_number    = %d", _cached_code_directory->_some_number);
 499     log_info(scc)("_cached_code_directory->_my_data_length = %zu", _cached_code_directory->_my_data_length);
 500     log_info(scc)("_cached_code_directory->_my_data        = %p (%zu bytes offset from base)", _cached_code_directory->_my_data,
 501                   pointer_delta((address)_cached_code_directory->_my_data, (address)_cached_code_directory, 1));
 502   }
 503 }
 504 #endif // INCLUDE_CDS_JAVA_HEAP
 505 
 506 #define DATA_ALIGNMENT HeapWordSize
 507 
 508 SCCache::SCCache(const char* cache_path, int fd, uint load_size) {
 509   _load_header = nullptr;
 510   _cache_path = cache_path;
 511   _for_read  = LoadCachedCode;
 512   _for_write = StoreCachedCode;
 513   _load_size = load_size;
 514   _store_size = 0;
 515   _write_position = 0;
 516   _closing  = false;
 517   _failed = false;
 518   _lookup_failed = false;
 519   _table = nullptr;
 520   _load_entries = nullptr;
 521   _store_entries  = nullptr;
 522   _C_strings_buf  = nullptr;
 523   _load_buffer = nullptr;
 524   _store_buffer = nullptr;
 525   _C_load_buffer = nullptr;
 526   _C_store_buffer = nullptr;
 527   _store_entries_cnt = 0;
 528   _gen_preload_code = false;
 529   _for_preload = false;       // changed while storing entry data
 530   _has_clinit_barriers = false;
 531 
 532   _compile_id = 0;
 533   _comp_level = 0;
 534 
 535   _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false;
 536 
 537   // Read header at the begining of cache
 538   uint header_size = sizeof(SCCHeader);
 539   if (_for_read) {
 540     // Read cache
 541     _C_load_buffer = NEW_C_HEAP_ARRAY(char, load_size + DATA_ALIGNMENT, mtCode);
 542     _load_buffer = align_up(_C_load_buffer, DATA_ALIGNMENT);
 543     uint n = (uint)::read(fd, _load_buffer, load_size);
 544     if (n != load_size) {
 545       log_warning(scc, init)("Failed to read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache file '%s'", load_size, p2i(_load_buffer), _cache_path);
 546       set_failed();
 547       return;
 548     }
 549     log_info(scc, init)("Read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache '%s'", load_size, p2i(_load_buffer), _cache_path);
 550 
 551     _load_header = (SCCHeader*)addr(0);
 552     const char* scc_jvm_version = addr(_load_header->jvm_version_offset());
 553     if (strncmp(scc_jvm_version, VM_Version::internal_vm_info_string(), strlen(scc_jvm_version)) != 0) {
 554       log_warning(scc, init)("Disable Startup Code Cache: JVM version '%s' recorded in '%s' does not match current version '%s'", scc_jvm_version, _cache_path, VM_Version::internal_vm_info_string());
 555       set_failed();
 556       return;
 557     }
 558     if (!_load_header->verify_config(_cache_path, load_size)) {
 559       set_failed();
 560       return;
 561     }
 562     log_info(scc, init)("Read header from Startup Code Cache '%s'", cache_path);
 563     if (_load_header->has_meta_ptrs()) {
 564       assert(UseSharedSpaces, "should be verified already");
 565       _use_meta_ptrs = true; // Regardless UseMetadataPointers
 566       UseMetadataPointers = true;
 567     }
 568     // Read strings
 569     load_strings();
 570   }
 571   if (_for_write) {
 572     _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0);
 573 
 574     _C_store_buffer = NEW_C_HEAP_ARRAY(char, CachedCodeMaxSize + DATA_ALIGNMENT, mtCode);
 575     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 576     // Entries allocated at the end of buffer in reverse (as on stack).
 577     _store_entries = (SCCEntry*)align_up(_C_store_buffer + CachedCodeMaxSize, DATA_ALIGNMENT);
 578     log_info(scc, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %d", p2i(_store_buffer), CachedCodeMaxSize);
 579   }
 580   _table = new SCAddressTable();
 581 }
 582 
 583 void SCCache::init_table() {
 584   SCCache* cache = SCCache::cache();
 585   if (cache != nullptr && cache->_table != nullptr) {
 586     cache->_table->init();
 587   }
 588 }
 589 
 590 void SCCache::init_opto_table() {
 591   SCCache* cache = SCCache::cache();
 592   if (cache != nullptr && cache->_table != nullptr) {
 593     cache->_table->init_opto();
 594   }
 595 }
 596 
 597 void SCCache::init_c1_table() {
 598   SCCache* cache = SCCache::cache();
 599   if (cache != nullptr && cache->_table != nullptr) {
 600     cache->_table->init_c1();
 601   }
 602 }
 603 
 604 void SCConfig::record(bool use_meta_ptrs) {
 605   _flags = 0;
 606   if (use_meta_ptrs) {
 607     _flags |= metadataPointers;
 608   }
 609 #ifdef ASSERT
 610   _flags |= debugVM;
 611 #endif
 612   if (UseCompressedOops) {
 613     _flags |= compressedOops;
 614   }
 615   if (UseCompressedClassPointers) {
 616     _flags |= compressedClassPointers;
 617   }
 618   if (UseTLAB) {
 619     _flags |= useTLAB;
 620   }
 621   if (JavaAssertions::systemClassDefault()) {
 622     _flags |= systemClassAssertions;
 623   }
 624   if (JavaAssertions::userClassDefault()) {
 625     _flags |= userClassAssertions;
 626   }
 627   if (EnableContended) {
 628     _flags |= enableContendedPadding;
 629   }
 630   if (RestrictContended) {
 631     _flags |= restrictContendedPadding;
 632   }
 633   _compressedOopShift    = CompressedOops::shift();
 634   _compressedKlassShift  = CompressedKlassPointers::shift();
 635   _contendedPaddingWidth = ContendedPaddingWidth;
 636   _objectAlignment       = ObjectAlignmentInBytes;
 637   _gc                    = (uint)Universe::heap()->kind();
 638 }
 639 
 640 bool SCConfig::verify(const char* cache_path) const {
 641 #ifdef ASSERT
 642   if ((_flags & debugVM) == 0) {
 643     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by product VM, it can't be used by debug VM", cache_path);
 644     return false;
 645   }
 646 #else
 647   if ((_flags & debugVM) != 0) {
 648     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by debug VM, it can't be used by product VM", cache_path);
 649     return false;
 650   }
 651 #endif
 652 
 653   CollectedHeap::Name scc_gc = (CollectedHeap::Name)_gc;
 654   if (scc_gc != Universe::heap()->kind()) {
 655     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with different GC: %s vs current %s", cache_path, GCConfig::hs_err_name(scc_gc), GCConfig::hs_err_name());
 656     return false;
 657   }
 658 
 659   if (((_flags & compressedOops) != 0) != UseCompressedOops) {
 660     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedOops = %s", cache_path, UseCompressedOops ? "false" : "true");
 661     return false;
 662   }
 663   if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
 664     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedClassPointers = %s", cache_path, UseCompressedClassPointers ? "false" : "true");
 665     return false;
 666   }
 667 
 668   if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
 669     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::systemClassDefault() = %s", cache_path, JavaAssertions::systemClassDefault() ? "disabled" : "enabled");
 670     return false;
 671   }
 672   if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
 673     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::userClassDefault() = %s", cache_path, JavaAssertions::userClassDefault() ? "disabled" : "enabled");
 674     return false;
 675   }
 676 
 677   if (((_flags & enableContendedPadding) != 0) != EnableContended) {
 678     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with EnableContended = %s", cache_path, EnableContended ? "false" : "true");
 679     return false;
 680   }
 681   if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
 682     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with RestrictContended = %s", cache_path, RestrictContended ? "false" : "true");
 683     return false;
 684   }
 685   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 686     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedOops::shift() = %d vs current %d", cache_path, _compressedOopShift, CompressedOops::shift());
 687     return false;
 688   }
 689   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 690     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedKlassPointers::shift() = %d vs current %d", cache_path, _compressedKlassShift, CompressedKlassPointers::shift());
 691     return false;
 692   }
 693   if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
 694     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ContendedPaddingWidth = %d vs current %d", cache_path, _contendedPaddingWidth, ContendedPaddingWidth);
 695     return false;
 696   }
 697   if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
 698     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ObjectAlignmentInBytes = %d vs current %d", cache_path, _objectAlignment, ObjectAlignmentInBytes);
 699     return false;
 700   }
 701   return true;
 702 }
 703 
 704 bool SCCHeader::verify_config(const char* cache_path, uint load_size) const {
 705   if (_version != SCC_VERSION) {
 706     log_warning(scc, init)("Disable Startup Code Cache: different SCC version %d vs %d recorded in '%s'", SCC_VERSION, _version, cache_path);
 707     return false;
 708   }
 709   if (_cache_size != load_size) {
 710     log_warning(scc, init)("Disable Startup Code Cache: different cached code size %d vs %d recorded in '%s'", load_size, _cache_size, cache_path);
 711     return false;
 712   }
 713   if (has_meta_ptrs() && !UseSharedSpaces) {
 714     log_warning(scc, init)("Disable Startup Cached Code: '%s' contains metadata pointers but CDS is off", cache_path);
 715     return false;
 716   }
 717   return true;
 718 }
 719 
 720 volatile int SCCache::_nmethod_readers = 0;
 721 
 722 SCCache::~SCCache() {
 723   if (_closing) {
 724     return; // Already closed
 725   }
 726   // Stop any further access to cache.
 727   // Checked on entry to load_nmethod() and store_nmethod().
 728   _closing = true;
 729   if (_for_read) {
 730     // Wait for all load_nmethod() finish.
 731     wait_for_no_nmethod_readers();
 732   }
 733   // Prevent writing code into cache while we are closing it.
 734   // This lock held by ciEnv::register_method() which calls store_nmethod().
 735   MutexLocker ml(Compile_lock);
 736   if (for_write()) { // Finalize cache
 737     finish_write();
 738   }
 739   FREE_C_HEAP_ARRAY(char, _cache_path);
 740   if (_C_load_buffer != nullptr) {
 741     FREE_C_HEAP_ARRAY(char, _C_load_buffer);
 742     _C_load_buffer = nullptr;
 743     _load_buffer = nullptr;
 744   }
 745   if (_C_store_buffer != nullptr) {
 746     FREE_C_HEAP_ARRAY(char, _C_store_buffer);
 747     _C_store_buffer = nullptr;
 748     _store_buffer = nullptr;
 749   }
 750   if (_table != nullptr) {
 751     delete _table;
 752     _table = nullptr;
 753   }
 754 }
 755 
 756 SCCache* SCCache::open_for_read() {
 757   if (SCCache::is_on_for_read()) {
 758     return SCCache::cache();
 759   }
 760   return nullptr;
 761 }
 762 
 763 SCCache* SCCache::open_for_write() {
 764   if (SCCache::is_on_for_write()) {
 765     SCCache* cache = SCCache::cache();
 766     cache->clear_lookup_failed(); // Reset bit
 767     return cache;
 768   }
 769   return nullptr;
 770 }
 771 
 772 void copy_bytes(const char* from, address to, uint size) {
 773   assert(size > 0, "sanity");
 774   bool by_words = true;
 775   if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
 776     // Use wordwise copies if possible:
 777     Copy::disjoint_words((HeapWord*)from,
 778                          (HeapWord*)to,
 779                          ((size_t)size + HeapWordSize-1) / HeapWordSize);
 780   } else {
 781     by_words = false;
 782     Copy::conjoint_jbytes(from, to, (size_t)size);
 783   }
 784   log_trace(scc)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
 785 }
 786 
 787 void SCCReader::set_read_position(uint pos) {
 788   if (pos == _read_position) {
 789     return;
 790   }
 791   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 792   _read_position = pos;
 793 }
 794 
 795 bool SCCache::set_write_position(uint pos) {
 796   if (pos == _write_position) {
 797     return true;
 798   }
 799   if (_store_size < _write_position) {
 800     _store_size = _write_position; // Adjust during write
 801   }
 802   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 803   _write_position = pos;
 804   return true;
 805 }
 806 
 807 static char align_buffer[256] = { 0 };
 808 
 809 bool SCCache::align_write() {
 810   // We are not executing code from cache - we copy it by bytes first.
 811   // No need for big alignment (or at all).
 812   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 813   if (padding == DATA_ALIGNMENT) {
 814     return true;
 815   }
 816   uint n = write_bytes((const void*)&align_buffer, padding);
 817   if (n != padding) {
 818     return false;
 819   }
 820   log_trace(scc)("Adjust write alignment in Startup Code Cache '%s'", _cache_path);
 821   return true;
 822 }
 823 
 824 uint SCCache::write_bytes(const void* buffer, uint nbytes) {
 825   assert(for_write(), "Code Cache file is not created");
 826   if (nbytes == 0) {
 827     return 0;
 828   }
 829   uint new_position = _write_position + nbytes;
 830   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 831     log_warning(scc)("Failed to write %d bytes at offset %d to Startup Code Cache file '%s'. Increase CachedCodeMaxSize.",
 832                      nbytes, _write_position, _cache_path);
 833     set_failed();
 834     exit_vm_on_store_failure();
 835     return 0;
 836   }
 837   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 838   log_trace(scc)("Wrote %d bytes at offset %d to Startup Code Cache '%s'", nbytes, _write_position, _cache_path);
 839   _write_position += nbytes;
 840   if (_store_size < _write_position) {
 841     _store_size = _write_position;
 842   }
 843   return nbytes;
 844 }
 845 
 846 void SCCEntry::update_method_for_writing() {
 847   if (_method != nullptr) {
 848     _method = CDSAccess::method_in_cached_code(_method);
 849   }
 850 }
 851 
 852 void SCCEntry::print(outputStream* st) const {
 853   st->print_cr(" SCA entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s%s]",
 854                p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile,
 855                (_not_entrant? "not_entrant" : "entrant"),
 856                (_loaded ? ", loaded" : ""),
 857                (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
 858                (_for_preload ? ", for_preload" : ""),
 859                (_ignore_decompile ? ", ignore_decomp" : ""));
 860 }
 861 
 862 void* SCCEntry::operator new(size_t x, SCCache* cache) {
 863   return (void*)(cache->add_entry());
 864 }
 865 
 866 bool skip_preload(methodHandle mh) {
 867   if (!mh->method_holder()->is_loaded()) {
 868     return true;
 869   }
 870   DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
 871   if (directives->DontPreloadOption) {
 872     LogStreamHandle(Info, scc, init) log;
 873     if (log.is_enabled()) {
 874       log.print("Exclude preloading code for ");
 875       mh->print_value_on(&log);
 876     }
 877     return true;
 878   }
 879   return false;
 880 }
 881 
 882 void SCCache::preload_startup_code(TRAPS) {
 883   if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
 884     // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
 885     // level we want (that is CompLevel_full_optimization).
 886     return;
 887   }
 888   assert(_for_read, "sanity");
 889   uint count = _load_header->entries_count();
 890   if (_load_entries == nullptr) {
 891     // Read it
 892     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 893     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 894     log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path);
 895   }
 896   uint preload_entries_count = _load_header->preload_entries_count();
 897   if (preload_entries_count > 0) {
 898     uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
 899     log_info(scc, init)("Load %d preload entries from Startup Code Cache '%s'", preload_entries_count, _cache_path);
 900     uint count = MIN2(preload_entries_count, SCLoadStop);
 901     for (uint i = SCLoadStart; i < count; i++) {
 902       uint index = entries_index[i];
 903       SCCEntry* entry = &(_load_entries[index]);
 904       if (entry->not_entrant()) {
 905         continue;
 906       }
 907       methodHandle mh(THREAD, entry->method());
 908       assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
 909       if (skip_preload(mh)) {
 910         continue; // Exclude preloading for this method
 911       }
 912       assert(mh->method_holder()->is_loaded(), "");
 913       if (!mh->method_holder()->is_linked()) {
 914         assert(!HAS_PENDING_EXCEPTION, "");
 915         mh->method_holder()->link_class(THREAD);
 916         if (HAS_PENDING_EXCEPTION) {
 917           LogStreamHandle(Info, scc) log;
 918           if (log.is_enabled()) {
 919             ResourceMark rm;
 920             log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
 921             THREAD->pending_exception()->print_value_on(&log);
 922             if (log_is_enabled(Debug, scc)) {
 923               THREAD->pending_exception()->print_on(&log);
 924             }
 925           }
 926           CLEAR_PENDING_EXCEPTION;
 927         }
 928       }
 929       if (mh->scc_entry() != nullptr) {
 930         // Second C2 compilation of the same method could happen for
 931         // different reasons without marking first entry as not entrant.
 932         continue; // Keep old entry to avoid issues
 933       }
 934       mh->set_scc_entry(entry);
 935       CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0, false, CompileTask::Reason_Preload, CHECK);
 936     }
 937   }
 938 }
 939 
 940 static bool check_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp, SCCEntry* entry) {
 941   if (entry->kind() == kind) {
 942     assert(entry->id() == id, "sanity");
 943     if (kind != SCCEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
 944                                   (entry->comp_level() == comp_level) &&
 945                                   (entry->ignore_decompile() || entry->decompile() == decomp))) {
 946       return true; // Found
 947     }
 948   }
 949   return false;
 950 }
 951 
 952 SCCEntry* SCCache::find_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp) {
 953   assert(_for_read, "sanity");
 954   uint count = _load_header->entries_count();
 955   if (_load_entries == nullptr) {
 956     // Read it
 957     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 958     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 959     log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path);
 960   }
 961   // Binary search
 962   int l = 0;
 963   int h = count - 1;
 964   while (l <= h) {
 965     int mid = (l + h) >> 1;
 966     int ix = mid * 2;
 967     uint is = _search_entries[ix];
 968     if (is == id) {
 969       int index = _search_entries[ix + 1];
 970       SCCEntry* entry = &(_load_entries[index]);
 971       if (check_entry(kind, id, comp_level, decomp, entry)) {
 972         return entry; // Found
 973       }
 974       // Leaner search around (could be the same nmethod with different decompile count)
 975       for (int i = mid - 1; i >= l; i--) { // search back
 976         ix = i * 2;
 977         is = _search_entries[ix];
 978         if (is != id) {
 979           break;
 980         }
 981         index = _search_entries[ix + 1];
 982         SCCEntry* entry = &(_load_entries[index]);
 983         if (check_entry(kind, id, comp_level, decomp, entry)) {
 984           return entry; // Found
 985         }
 986       }
 987       for (int i = mid + 1; i <= h; i++) { // search forward
 988         ix = i * 2;
 989         is = _search_entries[ix];
 990         if (is != id) {
 991           break;
 992         }
 993         index = _search_entries[ix + 1];
 994         SCCEntry* entry = &(_load_entries[index]);
 995         if (check_entry(kind, id, comp_level, decomp, entry)) {
 996           return entry; // Found
 997         }
 998       }
 999       break; // Not found match (different decompile count or not_entrant state).
1000     } else if (is < id) {
1001       l = mid + 1;
1002     } else {
1003       h = mid - 1;
1004     }
1005   }
1006   return nullptr;
1007 }
1008 
1009 void SCCache::invalidate_entry(SCCEntry* entry) {
1010   assert(entry!= nullptr, "all entries should be read already");
1011   if (entry->not_entrant()) {
1012     return; // Someone invalidated it already
1013   }
1014 #ifdef ASSERT
1015   bool found = false;
1016   if (_for_read) {
1017     uint count = _load_header->entries_count();
1018     uint i = 0;
1019     for(; i < count; i++) {
1020       if (entry == &(_load_entries[i])) {
1021         break;
1022       }
1023     }
1024     found = (i < count);
1025   }
1026   if (!found && _for_write) {
1027     uint count = _store_entries_cnt;
1028     uint i = 0;
1029     for(; i < count; i++) {
1030       if (entry == &(_store_entries[i])) {
1031         break;
1032       }
1033     }
1034     found = (i < count);
1035   }
1036   assert(found, "entry should exist");
1037 #endif
1038   entry->set_not_entrant();
1039   {
1040     uint name_offset = entry->offset() + entry->name_offset();
1041     const char* name;
1042     if (SCCache::is_loaded(entry)) {
1043       name = _load_buffer + name_offset;
1044     } else {
1045       name = _store_buffer + name_offset;
1046     }
1047     uint level   = entry->comp_level();
1048     uint comp_id = entry->comp_id();
1049     uint decomp  = entry->decompile();
1050     bool clinit_brs = entry->has_clinit_barriers();
1051     log_info(scc, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)",
1052                            name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1053   }
1054   if (entry->next() != nullptr) {
1055     entry = entry->next();
1056     assert(entry->has_clinit_barriers(), "expecting only such entries here");
1057     invalidate_entry(entry);
1058   }
1059 }
1060 
1061 extern "C" {
1062   static int uint_cmp(const void *i, const void *j) {
1063     uint a = *(uint *)i;
1064     uint b = *(uint *)j;
1065     return a > b ? 1 : a < b ? -1 : 0;
1066   }
1067 }
1068 
1069 bool SCCache::finish_write() {
1070   if (!align_write()) {
1071     return false;
1072   }
1073   uint strings_offset = _write_position;
1074   int strings_count = store_strings();
1075   if (strings_count < 0) {
1076     return false;
1077   }
1078   if (!align_write()) {
1079     return false;
1080   }
1081   uint strings_size = _write_position - strings_offset;
1082 
1083   uint entries_count = 0; // Number of entrant (useful) code entries
1084   uint entries_offset = _write_position;
1085 
1086   uint store_count = _store_entries_cnt;
1087   if (store_count > 0) {
1088     uint header_size = (uint)align_up(sizeof(SCCHeader),  DATA_ALIGNMENT);
1089     const char* vm_version = VM_Version::internal_vm_info_string();
1090     uint vm_version_size = (uint)align_up(strlen(vm_version) + 1, DATA_ALIGNMENT);
1091     uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1092     uint code_count = store_count + load_count;
1093     uint search_count = code_count * 2;
1094     uint search_size = search_count * sizeof(uint);
1095     uint entries_size = (uint)align_up(code_count * sizeof(SCCEntry), DATA_ALIGNMENT); // In bytes
1096     uint preload_entries_cnt = 0;
1097     uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1098     uint preload_entries_size = code_count * sizeof(uint);
1099     // _write_position should include code and strings
1100     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1101     uint total_size = _write_position + _load_size + header_size + vm_version_size +
1102                      code_alignment + search_size + preload_entries_size + entries_size;
1103 
1104     // Create ordered search table for entries [id, index];
1105     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1106     char* buffer = NEW_C_HEAP_ARRAY(char, total_size + DATA_ALIGNMENT, mtCode);
1107     char* start = align_up(buffer, DATA_ALIGNMENT);
1108     char* current = start + header_size; // Skip header
1109     uint jvm_version_offset = current - start;
1110     copy_bytes(vm_version, (address)current, (uint)strlen(vm_version) + 1);
1111     current += vm_version_size;
1112 
1113     SCCEntry* entries_address = _store_entries; // Pointer to latest entry
1114     uint not_entrant_nb = 0;
1115     uint max_size = 0;
1116     // Add old entries first
1117     if (_for_read && (_load_header != nullptr)) {
1118       for(uint i = 0; i < load_count; i++) {
1119         if (_load_entries[i].load_fail()) {
1120           continue;
1121         }
1122         if (_load_entries[i].not_entrant()) {
1123           log_info(scc, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id());
1124           not_entrant_nb++;
1125           if (_load_entries[i].for_preload()) {
1126             // Skip not entrant preload code:
1127             // we can't pre-load code which may have failing dependencies.
1128             continue;
1129           }
1130           _load_entries[i].set_entrant(); // Reset
1131         } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) {
1132           // record entrant first version code for pre-loading
1133           preload_entries[preload_entries_cnt++] = entries_count;
1134         }
1135         {
1136           uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT);
1137           if (size > max_size) {
1138             max_size = size;
1139           }
1140           copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size);
1141           _load_entries[i].set_offset(current - start); // New offset
1142           current += size;
1143           uint n = write_bytes(&(_load_entries[i]), sizeof(SCCEntry));
1144           if (n != sizeof(SCCEntry)) {
1145             FREE_C_HEAP_ARRAY(char, buffer);
1146             FREE_C_HEAP_ARRAY(uint, search);
1147             return false;
1148           }
1149           search[entries_count*2 + 0] = _load_entries[i].id();
1150           search[entries_count*2 + 1] = entries_count;
1151           entries_count++;
1152         }
1153       }
1154     }
1155     // SCCEntry entries were allocated in reverse in store buffer.
1156     // Process them in reverse order to cache first code first.
1157     for (int i = store_count - 1; i >= 0; i--) {
1158       if (entries_address[i].load_fail()) {
1159         continue;
1160       }
1161       if (entries_address[i].not_entrant()) {
1162         log_info(scc, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : ""));
1163         not_entrant_nb++;
1164         if (entries_address[i].for_preload()) {
1165           // Skip not entrant preload code:
1166           // we can't pre-load code which may have failing dependencies.
1167           continue;
1168         }
1169         entries_address[i].set_entrant(); // Reset
1170       } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) {
1171         // record entrant first version code for pre-loading
1172         preload_entries[preload_entries_cnt++] = entries_count;
1173       }
1174       {
1175         entries_address[i].set_next(nullptr); // clear pointers before storing data
1176         uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1177         if (size > max_size) {
1178           max_size = size;
1179         }
1180         copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1181         entries_address[i].set_offset(current - start); // New offset
1182         entries_address[i].update_method_for_writing();
1183         current += size;
1184         uint n = write_bytes(&(entries_address[i]), sizeof(SCCEntry));
1185         if (n != sizeof(SCCEntry)) {
1186           FREE_C_HEAP_ARRAY(char, buffer);
1187           FREE_C_HEAP_ARRAY(uint, search);
1188           return false;
1189         }
1190         search[entries_count*2 + 0] = entries_address[i].id();
1191         search[entries_count*2 + 1] = entries_count;
1192         entries_count++;
1193       }
1194     }
1195     if (entries_count == 0) {
1196       log_info(scc, exit)("No new entires, cache files %s was not %s", _cache_path, (_for_read ? "updated" : "created"));
1197       FREE_C_HEAP_ARRAY(char, buffer);
1198       FREE_C_HEAP_ARRAY(uint, search);
1199       return true; // Nothing to write
1200     }
1201     assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1202     // Write strings
1203     if (strings_count > 0) {
1204       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1205       strings_offset = (current - start); // New offset
1206       current += strings_size;
1207     }
1208     uint preload_entries_offset = (current - start);
1209     preload_entries_size = preload_entries_cnt * sizeof(uint);
1210     if (preload_entries_size > 0) {
1211       copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1212       current += preload_entries_size;
1213       log_info(scc, exit)("Wrote %d preload entries to Startup Code Cache '%s'", preload_entries_cnt, _cache_path);
1214     }
1215     if (preload_entries != nullptr) {
1216       FREE_C_HEAP_ARRAY(uint, preload_entries);
1217     }
1218 
1219     uint new_entries_offset = (current - start); // New offset
1220     // Sort and store search table
1221     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1222     search_size = 2 * entries_count * sizeof(uint);
1223     copy_bytes((const char*)search, (address)current, search_size);
1224     FREE_C_HEAP_ARRAY(uint, search);
1225     current += search_size;
1226 
1227     // Write entries
1228     entries_size = entries_count * sizeof(SCCEntry); // New size
1229     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1230     current += entries_size;
1231     log_info(scc, exit)("Wrote %d SCCEntry entries (%d were not entrant, %d max size) to Startup Code Cache '%s'", entries_count, not_entrant_nb, max_size, _cache_path);
1232 
1233     uint size = (current - start);
1234     assert(size <= total_size, "%d > %d", size , total_size);
1235 
1236     // Finalize header
1237     SCCHeader* header = (SCCHeader*)start;
1238     header->init(jvm_version_offset, size,
1239                  (uint)strings_count, strings_offset,
1240                  entries_count, new_entries_offset,
1241                  preload_entries_cnt, preload_entries_offset,
1242                  _use_meta_ptrs);
1243     log_info(scc, init)("Wrote header to Startup Code Cache '%s'", _cache_path);
1244 
1245     // Now store to file
1246 #ifdef _WINDOWS  // On Windows, need WRITE permission to remove the file.
1247     chmod(_cache_path, _S_IREAD | _S_IWRITE);
1248 #endif
1249     // Use remove() to delete the existing file because, on Unix, this will
1250     // allow processes that have it open continued access to the file.
1251     remove(_cache_path);
1252     int fd = os::open(_cache_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
1253     if (fd < 0) {
1254       log_warning(scc, exit)("Unable to create Startup Code Cache file '%s': (%s)", _cache_path, os::strerror(errno));
1255       FREE_C_HEAP_ARRAY(char, buffer);
1256       exit_vm_on_store_failure();
1257       return false;
1258     } else {
1259       log_info(scc, exit)("Opened for write Startup Code Cache '%s'", _cache_path);
1260     }
1261     bool success = os::write(fd, start, (size_t)size);
1262     if (!success) {
1263       log_warning(scc, exit)("Failed to write %d bytes to Startup Code Cache file '%s': (%s)", size, _cache_path, os::strerror(errno));
1264       FREE_C_HEAP_ARRAY(char, buffer);
1265       exit_vm_on_store_failure();
1266       return false;
1267     }
1268     log_info(scc, exit)("Wrote %d bytes to Startup Code Cache '%s'", size, _cache_path);
1269     if (::close(fd) < 0) {
1270       log_warning(scc, exit)("Failed to close for write Startup Code Cache file '%s'", _cache_path);
1271       exit_vm_on_store_failure();
1272     } else {
1273       log_info(scc, exit)("Closed for write Startup Code Cache '%s'", _cache_path);
1274     }
1275     FREE_C_HEAP_ARRAY(char, buffer);
1276   }
1277   return true;
1278 }
1279 
1280 bool SCCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1281   assert(start == cgen->assembler()->pc(), "wrong buffer");
1282   SCCache* cache = open_for_read();
1283   if (cache == nullptr) {
1284     return false;
1285   }
1286   SCCEntry* entry = cache->find_entry(SCCEntry::Stub, (uint)id);
1287   if (entry == nullptr) {
1288     return false;
1289   }
1290   uint entry_position = entry->offset();
1291   // Read name
1292   uint name_offset = entry->name_offset() + entry_position;
1293   uint name_size   = entry->name_size(); // Includes '/0'
1294   const char* saved_name = cache->addr(name_offset);
1295   if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1296     log_warning(scc)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1297     cache->set_failed();
1298     exit_vm_on_load_failure();
1299     return false;
1300   }
1301   log_info(scc,stubs)("Reading stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1302   // Read code
1303   uint code_offset = entry->code_offset() + entry_position;
1304   uint code_size   = entry->code_size();
1305   copy_bytes(cache->addr(code_offset), start, code_size);
1306   cgen->assembler()->code_section()->set_end(start + code_size);
1307   log_info(scc,stubs)("Read stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1308   return true;
1309 }
1310 
1311 bool SCCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1312   SCCache* cache = open_for_write();
1313   if (cache == nullptr) {
1314     return false;
1315   }
1316   log_info(scc, stubs)("Writing stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1317   if (!cache->align_write()) {
1318     return false;
1319   }
1320 #ifdef ASSERT
1321   CodeSection* cs = cgen->assembler()->code_section();
1322   if (cs->has_locs()) {
1323     uint reloc_count = cs->locs_count();
1324     tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1325     // Collect additional data
1326     RelocIterator iter(cs);
1327     while (iter.next()) {
1328       switch (iter.type()) {
1329         case relocInfo::none:
1330           break;
1331         default: {
1332           iter.print_current_on(tty);
1333           fatal("stub's relocation %d unimplemented", (int)iter.type());
1334           break;
1335         }
1336       }
1337     }
1338   }
1339 #endif
1340   uint entry_position = cache->_write_position;
1341 
1342   // Write code
1343   uint code_offset = 0;
1344   uint code_size = cgen->assembler()->pc() - start;
1345   uint n = cache->write_bytes(start, code_size);
1346   if (n != code_size) {
1347     return false;
1348   }
1349   // Write name
1350   uint name_offset = cache->_write_position - entry_position;
1351   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1352   n = cache->write_bytes(name, name_size);
1353   if (n != name_size) {
1354     return false;
1355   }
1356   uint entry_size = cache->_write_position - entry_position;
1357   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
1358                                           code_offset, code_size, 0, 0,
1359                                           SCCEntry::Stub, (uint32_t)id);
1360   log_info(scc, stubs)("Wrote stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1361   return true;
1362 }
1363 
1364 Klass* SCCReader::read_klass(const methodHandle& comp_method, bool shared) {
1365   uint code_offset = read_position();
1366   uint state = *(uint*)addr(code_offset);
1367   uint init_state = (state  & 1);
1368   uint array_dim  = (state >> 1);
1369   code_offset += sizeof(int);
1370   if (_cache->use_meta_ptrs() && shared) {
1371     uint klass_offset = *(uint*)addr(code_offset);
1372     code_offset += sizeof(uint);
1373     set_read_position(code_offset);
1374     Klass* k = (Klass*)((address)SharedBaseAddress + klass_offset);
1375     if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1376       // Something changed in CDS
1377       set_lookup_failed();
1378       log_info(scc)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
1379       return nullptr;
1380     }
1381     assert(k->is_klass(), "sanity");
1382     ResourceMark rm;
1383     const char* comp_name = comp_method->name_and_sig_as_C_string();
1384     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
1385       set_lookup_failed();
1386       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
1387                        compile_id(), comp_name, comp_level(), k->external_name());
1388       return nullptr;
1389     } else
1390     // Allow not initialized klass which was uninitialized during code caching or for preload
1391     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
1392       set_lookup_failed();
1393       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
1394                        compile_id(), comp_name, comp_level(), k->external_name());
1395       return nullptr;
1396     }
1397     if (array_dim > 0) {
1398       assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
1399       Klass* ak = k->array_klass_or_null(array_dim);
1400       // FIXME: what would it take to create an array class on the fly?
1401 //      Klass* ak = k->array_klass(dim, JavaThread::current());
1402 //      guarantee(JavaThread::current()->pending_exception() == nullptr, "");
1403       if (ak == nullptr) {
1404         set_lookup_failed();
1405         log_info(scc)("%d (L%d): %d-dimension array klass lookup failed: %s",
1406                          compile_id(), comp_level(), array_dim, k->external_name());
1407       }
1408       log_info(scc)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
1409       return ak;
1410     } else {
1411       log_info(scc)("%d (L%d): Shared klass lookup: %s",
1412                     compile_id(), comp_level(), k->external_name());
1413       return k;
1414     }
1415   }
1416   int name_length = *(int*)addr(code_offset);
1417   code_offset += sizeof(int);
1418   const char* dest = addr(code_offset);
1419   code_offset += name_length + 1;
1420   set_read_position(code_offset);
1421   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length);
1422   if (klass_sym == nullptr) {
1423     set_lookup_failed();
1424     log_info(scc)("%d (L%d): Probe failed for class %s",
1425                      compile_id(), comp_level(), &(dest[0]));
1426     return nullptr;
1427   }
1428   // Use class loader of compiled method.
1429   Thread* thread = Thread::current();
1430   Handle loader(thread, comp_method->method_holder()->class_loader());
1431   Handle protection_domain(thread, comp_method->method_holder()->protection_domain());
1432   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader, protection_domain);
1433   assert(!thread->has_pending_exception(), "should not throw");
1434   if (k == nullptr && !loader.is_null()) {
1435     // Try default loader and domain
1436     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle(), Handle());
1437     assert(!thread->has_pending_exception(), "should not throw");
1438   }
1439   if (k != nullptr) {
1440     // Allow not initialized klass which was uninitialized during code caching
1441     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) {
1442       set_lookup_failed();
1443       log_info(scc)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0]));
1444       return nullptr;
1445     }
1446     log_info(scc)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name());
1447   } else {
1448     set_lookup_failed();
1449     log_info(scc)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0]));
1450     return nullptr;
1451   }
1452   return k;
1453 }
1454 
1455 Method* SCCReader::read_method(const methodHandle& comp_method, bool shared) {
1456   uint code_offset = read_position();
1457   if (_cache->use_meta_ptrs() && shared) {
1458     uint method_offset = *(uint*)addr(code_offset);
1459     code_offset += sizeof(uint);
1460     set_read_position(code_offset);
1461     Method* m = (Method*)((address)SharedBaseAddress + method_offset);
1462     if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
1463       // Something changed in CDS
1464       set_lookup_failed();
1465       log_info(scc)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
1466       return nullptr;
1467     }
1468     assert(m->is_method(), "sanity");
1469     ResourceMark rm;
1470     const char* comp_name = comp_method->name_and_sig_as_C_string();
1471     Klass* k = m->method_holder();
1472     if (!k->is_instance_klass()) {
1473       set_lookup_failed();
1474       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass", compile_id(), comp_name, comp_level(), k->external_name());
1475       return nullptr;
1476     } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1477       set_lookup_failed();
1478       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS", compile_id(), comp_name, comp_level(), k->external_name());
1479       return nullptr;
1480     } else if (!InstanceKlass::cast(k)->is_loaded()) {
1481       set_lookup_failed();
1482       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not loaded", compile_id(), comp_name, comp_level(), k->external_name());
1483       return nullptr;
1484     } else if (!InstanceKlass::cast(k)->is_linked()) {
1485       set_lookup_failed();
1486       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s", compile_id(), comp_name, comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
1487       return nullptr;
1488     }
1489     log_info(scc)("%d (L%d): Shared method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1490     return m;
1491   }
1492   int holder_length = *(int*)addr(code_offset);
1493   code_offset += sizeof(int);
1494   int name_length = *(int*)addr(code_offset);
1495   code_offset += sizeof(int);
1496   int signat_length = *(int*)addr(code_offset);
1497   code_offset += sizeof(int);
1498 
1499   const char* dest = addr(code_offset);
1500   code_offset += holder_length + 1 + name_length + 1 + signat_length + 1;
1501   set_read_position(code_offset);
1502   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length);
1503   if (klass_sym == nullptr) {
1504     set_lookup_failed();
1505     log_info(scc)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0]));
1506     return nullptr;
1507   }
1508   // Use class loader of compiled method.
1509   Thread* thread = Thread::current();
1510   Handle loader(thread, comp_method->method_holder()->class_loader());
1511   Handle protection_domain(thread, comp_method->method_holder()->protection_domain());
1512   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader, protection_domain);
1513   assert(!thread->has_pending_exception(), "should not throw");
1514   if (k == nullptr && !loader.is_null()) {
1515     // Try default loader and domain
1516     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle(), Handle());
1517     assert(!thread->has_pending_exception(), "should not throw");
1518   }
1519   if (k != nullptr) {
1520     if (!k->is_instance_klass()) {
1521       set_lookup_failed();
1522       log_info(scc)("%d (L%d): Lookup failed for holder %s: not instance klass",
1523                        compile_id(), comp_level(), &(dest[0]));
1524       return nullptr;
1525     } else if (!InstanceKlass::cast(k)->is_linked()) {
1526       set_lookup_failed();
1527       log_info(scc)("%d (L%d): Lookup failed for holder %s: not linked",
1528                        compile_id(), comp_level(), &(dest[0]));
1529       return nullptr;
1530     }
1531     log_info(scc)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name());
1532   } else {
1533     set_lookup_failed();
1534     log_info(scc)("%d (L%d): Lookup failed for holder %s",
1535                   compile_id(), comp_level(), &(dest[0]));
1536     return nullptr;
1537   }
1538   TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length);
1539   int pos = holder_length + 1 + name_length + 1;
1540   TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length);
1541   if (name_sym == nullptr) {
1542     set_lookup_failed();
1543     log_info(scc)("%d (L%d): Probe failed for method name %s",
1544                      compile_id(), comp_level(), &(dest[holder_length + 1]));
1545     return nullptr;
1546   }
1547   if (sign_sym == nullptr) {
1548     set_lookup_failed();
1549     log_info(scc)("%d (L%d): Probe failed for method signature %s",
1550                      compile_id(), comp_level(), &(dest[pos]));
1551     return nullptr;
1552   }
1553   Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym);
1554   if (m != nullptr) {
1555     ResourceMark rm;
1556     log_info(scc)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1557   } else {
1558     set_lookup_failed();
1559     log_info(scc)("%d (L%d): Lookup failed for method %s::%s%s",
1560                      compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos]));
1561     return nullptr;
1562   }
1563   return m;
1564 }
1565 
1566 bool SCCache::write_klass(Klass* klass) {
1567   if (klass->is_hidden()) { // Skip such nmethod
1568     set_lookup_failed();
1569     return false;
1570   }
1571   bool can_use_meta_ptrs = _use_meta_ptrs;
1572   uint array_dim = 0;
1573   if (klass->is_objArray_klass()) {
1574     array_dim = ObjArrayKlass::cast(klass)->dimension();
1575     klass     = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
1576   }
1577   uint init_state = 0;
1578   if (klass->is_instance_klass()) {
1579     InstanceKlass* ik = InstanceKlass::cast(klass);
1580     ClassLoaderData* cld = ik->class_loader_data();
1581     if (!cld->is_builtin_class_loader_data()) {
1582       set_lookup_failed();
1583       return false;
1584     }
1585     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1586       _for_preload = false;
1587       // Bailout if code has clinit barriers:
1588       // method will be recompiled without them in any case
1589       if (_has_clinit_barriers) {
1590         set_lookup_failed();
1591         return false;
1592       }
1593       can_use_meta_ptrs = false;
1594     }
1595     init_state = (ik->is_initialized() ? 1 : 0);
1596   }
1597   ResourceMark rm;
1598   uint state = (array_dim << 1) | (init_state & 1);
1599   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(klass)) {
1600     DataKind kind = DataKind::Klass_Shared;
1601     uint n = write_bytes(&kind, sizeof(int));
1602     if (n != sizeof(int)) {
1603       return false;
1604     }
1605     // Record state of instance klass initialization.
1606     n = write_bytes(&state, sizeof(int));
1607     if (n != sizeof(int)) {
1608       return false;
1609     }
1610     uint klass_offset = CDSAccess::delta_from_shared_address_base((address)klass);
1611     n = write_bytes(&klass_offset, sizeof(uint));
1612     if (n != sizeof(uint)) {
1613       return false;
1614     }
1615     log_info(scc)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(),
1616                   (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1617                   (array_dim > 0 ? " (object array)" : ""),
1618                   klass_offset);
1619     return true;
1620   }
1621   // Bailout if code has clinit barriers:
1622   // method will be recompiled without them in any case
1623   if (_for_preload && _has_clinit_barriers) {
1624     set_lookup_failed();
1625     return false;
1626   }
1627   _for_preload = false;
1628   log_info(scc,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name());
1629   DataKind kind = DataKind::Klass;
1630   uint n = write_bytes(&kind, sizeof(int));
1631   if (n != sizeof(int)) {
1632     return false;
1633   }
1634   // Record state of instance klass initialization.
1635   n = write_bytes(&state, sizeof(int));
1636   if (n != sizeof(int)) {
1637     return false;
1638   }
1639   Symbol* name = klass->name();
1640   int name_length = name->utf8_length();
1641   int total_length = name_length + 1;
1642   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1643   name->as_C_string(dest, total_length);
1644   dest[total_length - 1] = '\0';
1645   LogTarget(Info, scc, loader) log;
1646   if (log.is_enabled()) {
1647     LogStream ls(log);
1648     oop loader = klass->class_loader();
1649     oop domain = klass->protection_domain();
1650     ls.print("Class %s loader: ", dest);
1651     if (loader == nullptr) {
1652       ls.print("nullptr");
1653     } else {
1654       loader->print_value_on(&ls);
1655     }
1656     ls.print(" domain: ");
1657     if (domain == nullptr) {
1658       ls.print("nullptr");
1659     } else {
1660       domain->print_value_on(&ls);
1661     }
1662     ls.cr();
1663   }
1664   n = write_bytes(&name_length, sizeof(int));
1665   if (n != sizeof(int)) {
1666     return false;
1667   }
1668   n = write_bytes(dest, total_length);
1669   if (n != (uint)total_length) {
1670     return false;
1671   }
1672   log_info(scc)("%d (L%d): Wrote klass: %s%s%s",
1673                 compile_id(), comp_level(),
1674                 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1675                 (array_dim > 0 ? " (object array)" : ""));
1676   return true;
1677 }
1678 
1679 bool SCCache::write_method(Method* method) {
1680   bool can_use_meta_ptrs = _use_meta_ptrs;
1681   Klass* klass = method->method_holder();
1682   if (klass->is_instance_klass()) {
1683     InstanceKlass* ik = InstanceKlass::cast(klass);
1684     ClassLoaderData* cld = ik->class_loader_data();
1685     if (!cld->is_builtin_class_loader_data()) {
1686       set_lookup_failed();
1687       return false;
1688     }
1689     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1690       _for_preload = false;
1691       // Bailout if code has clinit barriers:
1692       // method will be recompiled without them in any case
1693       if (_has_clinit_barriers) {
1694         set_lookup_failed();
1695         return false;
1696       }
1697       can_use_meta_ptrs = false;
1698     }
1699   }
1700   ResourceMark rm;
1701   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(method)) {
1702     DataKind kind = DataKind::Method_Shared;
1703     uint n = write_bytes(&kind, sizeof(int));
1704     if (n != sizeof(int)) {
1705       return false;
1706     }
1707     uint method_offset = CDSAccess::delta_from_shared_address_base((address)method);
1708     n = write_bytes(&method_offset, sizeof(uint));
1709     if (n != sizeof(uint)) {
1710       return false;
1711     }
1712     log_info(scc)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
1713     return true;
1714   }
1715   // Bailout if code has clinit barriers:
1716   // method will be recompiled without them in any case
1717   if (_for_preload && _has_clinit_barriers) {
1718     set_lookup_failed();
1719     return false;
1720   }
1721   _for_preload = false;
1722   log_info(scc,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string());
1723   if (method->is_hidden()) { // Skip such nmethod
1724     set_lookup_failed();
1725     return false;
1726   }
1727   DataKind kind = DataKind::Method;
1728   uint n = write_bytes(&kind, sizeof(int));
1729   if (n != sizeof(int)) {
1730     return false;
1731   }
1732   Symbol* name   = method->name();
1733   Symbol* holder = method->klass_name();
1734   Symbol* signat = method->signature();
1735   int name_length   = name->utf8_length();
1736   int holder_length = holder->utf8_length();
1737   int signat_length = signat->utf8_length();
1738 
1739   // Write sizes and strings
1740   int total_length = holder_length + 1 + name_length + 1 + signat_length + 1;
1741   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1742   holder->as_C_string(dest, total_length);
1743   dest[holder_length] = '\0';
1744   int pos = holder_length + 1;
1745   name->as_C_string(&(dest[pos]), (total_length - pos));
1746   pos += name_length;
1747   dest[pos++] = '\0';
1748   signat->as_C_string(&(dest[pos]), (total_length - pos));
1749   dest[total_length - 1] = '\0';
1750 
1751   LogTarget(Info, scc, loader) log;
1752   if (log.is_enabled()) {
1753     LogStream ls(log);
1754     oop loader = klass->class_loader();
1755     oop domain = klass->protection_domain();
1756     ls.print("Holder %s loader: ", dest);
1757     if (loader == nullptr) {
1758       ls.print("nullptr");
1759     } else {
1760       loader->print_value_on(&ls);
1761     }
1762     ls.print(" domain: ");
1763     if (domain == nullptr) {
1764       ls.print("nullptr");
1765     } else {
1766       domain->print_value_on(&ls);
1767     }
1768     ls.cr();
1769   }
1770 
1771   n = write_bytes(&holder_length, sizeof(int));
1772   if (n != sizeof(int)) {
1773     return false;
1774   }
1775   n = write_bytes(&name_length, sizeof(int));
1776   if (n != sizeof(int)) {
1777     return false;
1778   }
1779   n = write_bytes(&signat_length, sizeof(int));
1780   if (n != sizeof(int)) {
1781     return false;
1782   }
1783   n = write_bytes(dest, total_length);
1784   if (n != (uint)total_length) {
1785     return false;
1786   }
1787   dest[holder_length] = ' ';
1788   dest[holder_length + 1 + name_length] = ' ';
1789   log_info(scc)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest);
1790   return true;
1791 }
1792 
1793 // Repair the pc relative information in the code after load
1794 bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer,
1795                                  OopRecorder* oop_recorder, ciMethod* target) {
1796   bool success = true;
1797   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1798     uint code_offset = read_position();
1799     int reloc_count = *(int*)addr(code_offset);
1800     code_offset += sizeof(int);
1801     if (reloc_count == 0) {
1802       set_read_position(code_offset);
1803       continue;
1804     }
1805     // Read _locs_point (as offset from start)
1806     int locs_point_off = *(int*)addr(code_offset);
1807     code_offset += sizeof(int);
1808     uint reloc_size = reloc_count * sizeof(relocInfo);
1809     CodeSection* cs  = buffer->code_section(i);
1810     if (cs->locs_capacity() < reloc_count) {
1811       cs->expand_locs(reloc_count);
1812     }
1813     relocInfo* reloc_start = cs->locs_start();
1814     copy_bytes(addr(code_offset), (address)reloc_start, reloc_size);
1815     code_offset += reloc_size;
1816     cs->set_locs_end(reloc_start + reloc_count);
1817     cs->set_locs_point(cs->start() + locs_point_off);
1818 
1819     // Read additional relocation data: uint per relocation
1820     uint  data_size  = reloc_count * sizeof(uint);
1821     uint* reloc_data = (uint*)addr(code_offset);
1822     code_offset += data_size;
1823     set_read_position(code_offset);
1824     LogStreamHandle(Info, scc, reloc) log;
1825     if (log.is_enabled()) {
1826       log.print_cr("======== read code section %d relocations [%d]:", i, reloc_count);
1827     }
1828     RelocIterator iter(cs);
1829     int j = 0;
1830     while (iter.next()) {
1831       switch (iter.type()) {
1832         case relocInfo::none:
1833           break;
1834         case relocInfo::oop_type: {
1835           VM_ENTRY_MARK;
1836           oop_Relocation* r = (oop_Relocation*)iter.reloc();
1837           if (r->oop_is_immediate()) {
1838             assert(reloc_data[j] == (uint)j, "should be");
1839             methodHandle comp_method(THREAD, target->get_Method());
1840             jobject jo = read_oop(THREAD, comp_method);
1841             if (lookup_failed()) {
1842               success = false;
1843               break;
1844             }
1845             r->set_value((address)jo);
1846           } else if (false) {
1847             // Get already updated value from OopRecorder.
1848             assert(oop_recorder != nullptr, "sanity");
1849             int index = r->oop_index();
1850             jobject jo = oop_recorder->oop_at(index);
1851             oop obj = JNIHandles::resolve(jo);
1852             r->set_value(*reinterpret_cast<address*>(&obj));
1853           }
1854           break;
1855         }
1856         case relocInfo::metadata_type: {
1857           VM_ENTRY_MARK;
1858           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
1859           Metadata* m;
1860           if (r->metadata_is_immediate()) {
1861             assert(reloc_data[j] == (uint)j, "should be");
1862             methodHandle comp_method(THREAD, target->get_Method());
1863             m = read_metadata(comp_method);
1864             if (lookup_failed()) {
1865               success = false;
1866               break;
1867             }
1868           } else {
1869             // Get already updated value from OopRecorder.
1870             assert(oop_recorder != nullptr, "sanity");
1871             int index = r->metadata_index();
1872             m = oop_recorder->metadata_at(index);
1873           }
1874           r->set_value((address)m);
1875           break;
1876         }
1877         case relocInfo::virtual_call_type:   // Fall through. They all call resolve_*_call blobs.
1878         case relocInfo::opt_virtual_call_type:
1879         case relocInfo::static_call_type: {
1880           address dest = _cache->address_for_id(reloc_data[j]);
1881           if (dest != (address)-1) {
1882             ((CallRelocation*)iter.reloc())->set_destination(dest);
1883           }
1884           break;
1885         }
1886         case relocInfo::trampoline_stub_type: {
1887           address dest = _cache->address_for_id(reloc_data[j]);
1888           if (dest != (address)-1) {
1889             ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
1890           }
1891           break;
1892         }
1893         case relocInfo::static_stub_type:
1894           break;
1895         case relocInfo::runtime_call_type: {
1896           address dest = _cache->address_for_id(reloc_data[j]);
1897           if (dest != (address)-1) {
1898             ((CallRelocation*)iter.reloc())->set_destination(dest);
1899           }
1900           break;
1901         }
1902         case relocInfo::runtime_call_w_cp_type:
1903           fatal("runtime_call_w_cp_type unimplemented");
1904           //address destination = iter.reloc()->value();
1905           break;
1906         case relocInfo::external_word_type: {
1907           address target = _cache->address_for_id(reloc_data[j]);
1908           // Add external address to global table
1909           int index = ExternalsRecorder::find_index(target);
1910           // Update index in relocation
1911           Relocation::add_jint(iter.data(), index);
1912           external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1913           assert(reloc->target() == target, "sanity");
1914           reloc->set_value(target); // Patch address in the code
1915           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1916           break;
1917         }
1918         case relocInfo::internal_word_type:
1919           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1920           break;
1921         case relocInfo::section_word_type:
1922           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1923           break;
1924         case relocInfo::poll_type:
1925           break;
1926         case relocInfo::poll_return_type:
1927           break;
1928         case relocInfo::post_call_nop_type:
1929           break;
1930         case relocInfo::entry_guard_type:
1931           break;
1932         default:
1933           fatal("relocation %d unimplemented", (int)iter.type());
1934           break;
1935       }
1936       if (success && log.is_enabled()) {
1937         iter.print_current_on(&log);
1938       }
1939       j++;
1940     }
1941     assert(j <= (int)reloc_count, "sanity");
1942   }
1943   return success;
1944 }
1945 
1946 bool SCCReader::read_code(CodeBuffer* buffer, CodeBuffer* orig_buffer, uint code_offset) {
1947   assert(code_offset == align_up(code_offset, DATA_ALIGNMENT), "%d not aligned to %d", code_offset, DATA_ALIGNMENT);
1948   assert(buffer->blob() != nullptr, "sanity");
1949   SCCodeSection* scc_cs = (SCCodeSection*)addr(code_offset);
1950   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1951     CodeSection* cs = buffer->code_section(i);
1952     // Read original section size and address.
1953     uint orig_size = scc_cs[i]._size;
1954     log_debug(scc)("======== read code section %d [%d]:", i, orig_size);
1955     uint orig_size_align = align_up(orig_size, DATA_ALIGNMENT);
1956     if (i != (int)CodeBuffer::SECT_INSTS) {
1957       buffer->initialize_section_size(cs, orig_size_align);
1958     }
1959     if (orig_size_align > (uint)cs->capacity()) { // Will not fit
1960       log_info(scc)("%d (L%d): original code section %d size %d > current capacity %d",
1961                        compile_id(), comp_level(), i, orig_size, cs->capacity());
1962       return false;
1963     }
1964     if (orig_size == 0) {
1965       assert(cs->size() == 0, "should match");
1966       continue;  // skip trivial section
1967     }
1968     address orig_start = scc_cs[i]._origin_address;
1969 
1970     // Populate fake original buffer (no code allocation in CodeCache).
1971     // It is used for relocations to calculate sections addesses delta.
1972     CodeSection* orig_cs = orig_buffer->code_section(i);
1973     assert(!orig_cs->is_allocated(), "This %d section should not be set", i);
1974     orig_cs->initialize(orig_start, orig_size);
1975 
1976     // Load code to new buffer.
1977     address code_start = cs->start();
1978     copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align);
1979     cs->set_end(code_start + orig_size);
1980   }
1981 
1982   return true;
1983 }
1984 
1985 bool SCCache::load_exception_blob(CodeBuffer* buffer, int* pc_offset) {
1986 #ifdef ASSERT
1987   LogStreamHandle(Debug, scc, nmethod) log;
1988   if (log.is_enabled()) {
1989     FlagSetting fs(PrintRelocations, true);
1990     buffer->print_on(&log);
1991   }
1992 #endif
1993   SCCache* cache = open_for_read();
1994   if (cache == nullptr) {
1995     return false;
1996   }
1997   SCCEntry* entry = cache->find_entry(SCCEntry::Blob, 999);
1998   if (entry == nullptr) {
1999     return false;
2000   }
2001   SCCReader reader(cache, entry, nullptr);
2002   return reader.compile_blob(buffer, pc_offset);
2003 }
2004 
2005 bool SCCReader::compile_blob(CodeBuffer* buffer, int* pc_offset) {
2006   uint entry_position = _entry->offset();
2007 
2008   // Read pc_offset
2009   *pc_offset = *(int*)addr(entry_position);
2010 
2011   // Read name
2012   uint name_offset = entry_position + _entry->name_offset();
2013   uint name_size = _entry->name_size(); // Includes '/0'
2014   const char* name = addr(name_offset);
2015 
2016   log_info(scc, stubs)("%d (L%d): Reading blob '%s' with pc_offset %d from Startup Code Cache '%s'",
2017                        compile_id(), comp_level(), name, *pc_offset, _cache->cache_path());
2018 
2019   if (strncmp(buffer->name(), name, (name_size - 1)) != 0) {
2020     log_warning(scc)("%d (L%d): Saved blob's name '%s' is different from '%s'",
2021                      compile_id(), comp_level(), name, buffer->name());
2022     ((SCCache*)_cache)->set_failed();
2023     exit_vm_on_load_failure();
2024     return false;
2025   }
2026 
2027   // Create fake original CodeBuffer
2028   CodeBuffer orig_buffer(name);
2029 
2030   // Read code
2031   uint code_offset = entry_position + _entry->code_offset();
2032   if (!read_code(buffer, &orig_buffer, code_offset)) {
2033     return false;
2034   }
2035 
2036   // Read relocations
2037   uint reloc_offset = entry_position + _entry->reloc_offset();
2038   set_read_position(reloc_offset);
2039   if (!read_relocations(buffer, &orig_buffer, nullptr, nullptr)) {
2040     return false;
2041   }
2042 
2043   log_info(scc, stubs)("%d (L%d): Read blob '%s' from Startup Code Cache '%s'",
2044                        compile_id(), comp_level(), name, _cache->cache_path());
2045 #ifdef ASSERT
2046   LogStreamHandle(Debug, scc, nmethod) log;
2047   if (log.is_enabled()) {
2048     FlagSetting fs(PrintRelocations, true);
2049     buffer->print_on(&log);
2050     buffer->decode();
2051   }
2052 #endif
2053   return true;
2054 }
2055 
2056 bool SCCache::write_relocations(CodeBuffer* buffer, uint& all_reloc_size) {
2057   uint all_reloc_count = 0;
2058   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2059     CodeSection* cs = buffer->code_section(i);
2060     uint reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2061     all_reloc_count += reloc_count;
2062   }
2063   all_reloc_size = all_reloc_count * sizeof(relocInfo);
2064   bool success = true;
2065   uint* reloc_data = NEW_C_HEAP_ARRAY(uint, all_reloc_count, mtCode);
2066   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2067     CodeSection* cs = buffer->code_section(i);
2068     int reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2069     uint n = write_bytes(&reloc_count, sizeof(int));
2070     if (n != sizeof(int)) {
2071       success = false;
2072       break;
2073     }
2074     if (reloc_count == 0) {
2075       continue;
2076     }
2077     // Write _locs_point (as offset from start)
2078     int locs_point_off = cs->locs_point_off();
2079     n = write_bytes(&locs_point_off, sizeof(int));
2080     if (n != sizeof(int)) {
2081       success = false;
2082       break;
2083     }
2084     relocInfo* reloc_start = cs->locs_start();
2085     uint reloc_size      = reloc_count * sizeof(relocInfo);
2086     n = write_bytes(reloc_start, reloc_size);
2087     if (n != reloc_size) {
2088       success = false;
2089       break;
2090     }
2091     LogStreamHandle(Info, scc, reloc) log;
2092     if (log.is_enabled()) {
2093       log.print_cr("======== write code section %d relocations [%d]:", i, reloc_count);
2094     }
2095     // Collect additional data
2096     RelocIterator iter(cs);
2097     bool has_immediate = false;
2098     int j = 0;
2099     while (iter.next()) {
2100       reloc_data[j] = 0; // initialize
2101       switch (iter.type()) {
2102         case relocInfo::none:
2103           break;
2104         case relocInfo::oop_type: {
2105           oop_Relocation* r = (oop_Relocation*)iter.reloc();
2106           if (r->oop_is_immediate()) {
2107             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2108             has_immediate = true;
2109           }
2110           break;
2111         }
2112         case relocInfo::metadata_type: {
2113           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2114           if (r->metadata_is_immediate()) {
2115             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2116             has_immediate = true;
2117           }
2118           break;
2119         }
2120         case relocInfo::virtual_call_type:  // Fall through. They all call resolve_*_call blobs.
2121         case relocInfo::opt_virtual_call_type:
2122         case relocInfo::static_call_type: {
2123           CallRelocation* r = (CallRelocation*)iter.reloc();
2124           address dest = r->destination();
2125           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2126             dest = (address)-1;    // do nothing in this case when loading this relocation
2127           }
2128           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2129           break;
2130         }
2131         case relocInfo::trampoline_stub_type: {
2132           address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2133           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2134           break;
2135         }
2136         case relocInfo::static_stub_type:
2137           break;
2138         case relocInfo::runtime_call_type: {
2139           // Record offset of runtime destination
2140           CallRelocation* r = (CallRelocation*)iter.reloc();
2141           address dest = r->destination();
2142           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2143             dest = (address)-1;    // do nothing in this case when loading this relocation
2144           }
2145           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2146           break;
2147         }
2148         case relocInfo::runtime_call_w_cp_type:
2149           fatal("runtime_call_w_cp_type unimplemented");
2150           break;
2151         case relocInfo::external_word_type: {
2152           // Record offset of runtime target
2153           address target = ((external_word_Relocation*)iter.reloc())->target();
2154           reloc_data[j] = _table->id_for_address(target, iter, buffer);
2155           break;
2156         }
2157         case relocInfo::internal_word_type:
2158           break;
2159         case relocInfo::section_word_type:
2160           break;
2161         case relocInfo::poll_type:
2162           break;
2163         case relocInfo::poll_return_type:
2164           break;
2165         case relocInfo::post_call_nop_type:
2166           break;
2167         case relocInfo::entry_guard_type:
2168           break;
2169         default:
2170           fatal("relocation %d unimplemented", (int)iter.type());
2171           break;
2172       }
2173       if (log.is_enabled()) {
2174         iter.print_current_on(&log);
2175       }
2176       j++;
2177     }
2178     assert(j <= (int)reloc_count, "sanity");
2179     // Write additional relocation data: uint per relocation
2180     uint data_size = reloc_count * sizeof(uint);
2181     n = write_bytes(reloc_data, data_size);
2182     if (n != data_size) {
2183       success = false;
2184       break;
2185     }
2186     if (has_immediate) {
2187       // Save information about immediates in this Code Section
2188       RelocIterator iter_imm(cs);
2189       int j = 0;
2190       while (iter_imm.next()) {
2191         switch (iter_imm.type()) {
2192           case relocInfo::oop_type: {
2193             oop_Relocation* r = (oop_Relocation*)iter_imm.reloc();
2194             if (r->oop_is_immediate()) {
2195               assert(reloc_data[j] == (uint)j, "should be");
2196               jobject jo = *(jobject*)(r->oop_addr()); // Handle currently
2197               if (!write_oop(jo)) {
2198                 success = false;
2199               }
2200             }
2201             break;
2202           }
2203           case relocInfo::metadata_type: {
2204             metadata_Relocation* r = (metadata_Relocation*)iter_imm.reloc();
2205             if (r->metadata_is_immediate()) {
2206               assert(reloc_data[j] == (uint)j, "should be");
2207               Metadata* m = r->metadata_value();
2208               if (!write_metadata(m)) {
2209                 success = false;
2210               }
2211             }
2212             break;
2213           }
2214           default:
2215             break;
2216         }
2217         if (!success) {
2218           break;
2219         }
2220         j++;
2221       } // while (iter_imm.next())
2222     } // if (has_immediate)
2223   } // for(i < SECT_LIMIT)
2224   FREE_C_HEAP_ARRAY(uint, reloc_data);
2225   return success;
2226 }
2227 
2228 bool SCCache::write_code(CodeBuffer* buffer, uint& code_size) {
2229   assert(_write_position == align_up(_write_position, DATA_ALIGNMENT), "%d not aligned to %d", _write_position, DATA_ALIGNMENT);
2230   //assert(buffer->blob() != nullptr, "sanity");
2231   uint code_offset = _write_position;
2232   uint cb_total_size = (uint)buffer->total_content_size();
2233   // Write information about Code sections first.
2234   SCCodeSection scc_cs[CodeBuffer::SECT_LIMIT];
2235   uint scc_cs_size = (uint)(sizeof(SCCodeSection) * CodeBuffer::SECT_LIMIT);
2236   uint offset = align_up(scc_cs_size, DATA_ALIGNMENT);
2237   uint total_size = 0;
2238   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2239     const CodeSection* cs = buffer->code_section(i);
2240     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2241     uint cs_size = (uint)cs->size();
2242     scc_cs[i]._size = cs_size;
2243     scc_cs[i]._origin_address = (cs_size == 0) ? nullptr : cs->start();
2244     scc_cs[i]._offset = (cs_size == 0) ? 0 : (offset + total_size);
2245     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2246     total_size += align_up(cs_size, DATA_ALIGNMENT);
2247   }
2248   uint n = write_bytes(scc_cs, scc_cs_size);
2249   if (n != scc_cs_size) {
2250     return false;
2251   }
2252   if (!align_write()) {
2253     return false;
2254   }
2255   assert(_write_position == (code_offset + offset), "%d  != (%d + %d)", _write_position, code_offset, offset);
2256   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2257     const CodeSection* cs = buffer->code_section(i);
2258     uint cs_size = (uint)cs->size();
2259     if (cs_size == 0) {
2260       continue;  // skip trivial section
2261     }
2262     assert((_write_position - code_offset) == scc_cs[i]._offset, "%d != %d", _write_position, scc_cs[i]._offset);
2263     // Write code
2264     n = write_bytes(cs->start(), cs_size);
2265     if (n != cs_size) {
2266       return false;
2267     }
2268     if (!align_write()) {
2269       return false;
2270     }
2271   }
2272   assert((_write_position - code_offset) == (offset + total_size), "(%d - %d) != (%d + %d)", _write_position, code_offset, offset, total_size);
2273   code_size = total_size;
2274   return true;
2275 }
2276 
2277 bool SCCache::store_exception_blob(CodeBuffer* buffer, int pc_offset) {
2278   SCCache* cache = open_for_write();
2279   if (cache == nullptr) {
2280     return false;
2281   }
2282   log_info(scc, stubs)("Writing blob '%s' to Startup Code Cache '%s'", buffer->name(), cache->_cache_path);
2283 
2284 #ifdef ASSERT
2285   LogStreamHandle(Debug, scc, nmethod) log;
2286   if (log.is_enabled()) {
2287     FlagSetting fs(PrintRelocations, true);
2288     buffer->print_on(&log);
2289     buffer->decode();
2290   }
2291 #endif
2292   if (!cache->align_write()) {
2293     return false;
2294   }
2295   uint entry_position = cache->_write_position;
2296 
2297   // Write pc_offset
2298   uint n = cache->write_bytes(&pc_offset, sizeof(int));
2299   if (n != sizeof(int)) {
2300     return false;
2301   }
2302 
2303   // Write name
2304   const char* name = buffer->name();
2305   uint name_offset = cache->_write_position - entry_position;
2306   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
2307   n = cache->write_bytes(name, name_size);
2308   if (n != name_size) {
2309     return false;
2310   }
2311 
2312   // Write code section
2313   if (!cache->align_write()) {
2314     return false;
2315   }
2316   uint code_offset = cache->_write_position - entry_position;
2317   uint code_size = 0;
2318   if (!cache->write_code(buffer, code_size)) {
2319     return false;
2320   }
2321   // Write relocInfo array
2322   uint reloc_offset = cache->_write_position - entry_position;
2323   uint reloc_size = 0;
2324   if (!cache->write_relocations(buffer, reloc_size)) {
2325     return false;
2326   }
2327 
2328   uint entry_size = cache->_write_position - entry_position;
2329   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
2330                                           code_offset, code_size, reloc_offset, reloc_size,
2331                                           SCCEntry::Blob, (uint32_t)999);
2332   log_info(scc, stubs)("Wrote stub '%s' to Startup Code Cache '%s'", name, cache->_cache_path);
2333   return true;
2334 }
2335 
2336 DebugInformationRecorder* SCCReader::read_debug_info(OopRecorder* oop_recorder) {
2337   uint code_offset = align_up(read_position(), DATA_ALIGNMENT);
2338   int data_size  = *(int*)addr(code_offset);
2339   code_offset   += sizeof(int);
2340   int pcs_length = *(int*)addr(code_offset);
2341   code_offset   += sizeof(int);
2342 
2343   log_debug(scc)("======== read DebugInfo [%d, %d]:", data_size, pcs_length);
2344 
2345   // Aligned initial sizes
2346   int data_size_align  = align_up(data_size, DATA_ALIGNMENT);
2347   int pcs_length_align = pcs_length + 1;
2348   assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity");
2349   DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length);
2350 
2351   copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align);
2352   recorder->stream()->set_position(data_size);
2353   code_offset += data_size;
2354 
2355   uint pcs_size = pcs_length * sizeof(PcDesc);
2356   copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size);
2357   code_offset += pcs_size;
2358   set_read_position(code_offset);
2359   return recorder;
2360 }
2361 
2362 bool SCCache::write_debug_info(DebugInformationRecorder* recorder) {
2363   if (!align_write()) {
2364     return false;
2365   }
2366   // Don't call data_size() and pcs_size(). They will freeze OopRecorder.
2367   int data_size = recorder->stream()->position(); // In bytes
2368   uint n = write_bytes(&data_size, sizeof(int));
2369   if (n != sizeof(int)) {
2370     return false;
2371   }
2372   int pcs_length = recorder->pcs_length(); // In bytes
2373   n = write_bytes(&pcs_length, sizeof(int));
2374   if (n != sizeof(int)) {
2375     return false;
2376   }
2377   n = write_bytes(recorder->stream()->buffer(), data_size);
2378   if (n != (uint)data_size) {
2379     return false;
2380   }
2381   uint pcs_size = pcs_length * sizeof(PcDesc);
2382   n = write_bytes(recorder->pcs(), pcs_size);
2383   if (n != pcs_size) {
2384     return false;
2385   }
2386   return true;
2387 }
2388 
2389 OopMapSet* SCCReader::read_oop_maps() {
2390   uint code_offset = read_position();
2391   int om_count = *(int*)addr(code_offset);
2392   code_offset += sizeof(int);
2393 
2394   log_debug(scc)("======== read oop maps [%d]:", om_count);
2395 
2396   OopMapSet* oop_maps = new OopMapSet(om_count);
2397   for (int i = 0; i < (int)om_count; i++) {
2398     int data_size = *(int*)addr(code_offset);
2399     code_offset += sizeof(int);
2400 
2401     OopMap* oop_map = new OopMap(data_size);
2402     // Preserve allocated stream
2403     CompressedWriteStream* stream = oop_map->write_stream();
2404 
2405     // Read data which overwrites default data
2406     copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap));
2407     code_offset += sizeof(OopMap);
2408     stream->set_position(data_size);
2409     oop_map->set_write_stream(stream);
2410     if (data_size > 0) {
2411       copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size);
2412       code_offset += data_size;
2413     }
2414 #ifdef ASSERT
2415     oop_map->_locs_length = 0;
2416     oop_map->_locs_used   = nullptr;
2417 #endif
2418     oop_maps->add(oop_map);
2419   }
2420   set_read_position(code_offset);
2421   return oop_maps;
2422 }
2423 
2424 bool SCCache::write_oop_maps(OopMapSet* oop_maps) {
2425   uint om_count = oop_maps->size();
2426   uint n = write_bytes(&om_count, sizeof(int));
2427   if (n != sizeof(int)) {
2428     return false;
2429   }
2430   for (int i = 0; i < (int)om_count; i++) {
2431     OopMap* om = oop_maps->at(i);
2432     int data_size = om->data_size();
2433     n = write_bytes(&data_size, sizeof(int));
2434     if (n != sizeof(int)) {
2435       return false;
2436     }
2437     n = write_bytes(om, sizeof(OopMap));
2438     if (n != sizeof(OopMap)) {
2439       return false;
2440     }
2441     n = write_bytes(om->data(), (uint)data_size);
2442     if (n != (uint)data_size) {
2443       return false;
2444     }
2445   }
2446   return true;
2447 }
2448 
2449 jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2450   uint code_offset = read_position();
2451   oop obj = nullptr;
2452   DataKind kind = *(DataKind*)addr(code_offset);
2453   code_offset += sizeof(DataKind);
2454   set_read_position(code_offset);
2455   if (kind == DataKind::Null) {
2456     return nullptr;
2457   } else if (kind == DataKind::No_Data) {
2458     return (jobject)Universe::non_oop_word();
2459   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2460     Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
2461     if (k == nullptr) {
2462       return nullptr;
2463     }
2464     obj = k->java_mirror();
2465     if (obj == nullptr) {
2466       set_lookup_failed();
2467       log_info(scc)("Lookup failed for java_mirror of klass %s", k->external_name());
2468       return nullptr;
2469     }
2470   } else if (kind == DataKind::Primitive) {
2471     code_offset = read_position();
2472     int t = *(int*)addr(code_offset);
2473     code_offset += sizeof(int);
2474     set_read_position(code_offset);
2475     BasicType bt = (BasicType)t;
2476     obj = java_lang_Class::primitive_mirror(bt);
2477     log_info(scc)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2478   } else if (kind == DataKind::String_Shared) {
2479     code_offset = read_position();
2480     int k = *(int*)addr(code_offset);
2481     code_offset += sizeof(int);
2482     set_read_position(code_offset);
2483     obj = CDSAccess::get_archived_object(k);
2484     assert(k == CDSAccess::get_archived_object_permanent_index(obj), "sanity");
2485   } else if (kind == DataKind::String) {
2486     code_offset = read_position();
2487     int length = *(int*)addr(code_offset);
2488     code_offset += sizeof(int);
2489     set_read_position(code_offset);
2490     const char* dest = addr(code_offset);
2491     set_read_position(code_offset + length);
2492     obj = StringTable::intern(&(dest[0]), thread);
2493     if (obj == nullptr) {
2494       set_lookup_failed();
2495       log_info(scc)("%d (L%d): Lookup failed for String %s",
2496                        compile_id(), comp_level(), &(dest[0]));
2497       return nullptr;
2498     }
2499     assert(java_lang_String::is_instance(obj), "must be string");
2500     log_info(scc)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
2501   } else if (kind == DataKind::SysLoader) {
2502     obj = SystemDictionary::java_system_loader();
2503     log_info(scc)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2504   } else if (kind == DataKind::PlaLoader) {
2505     obj = SystemDictionary::java_platform_loader();
2506     log_info(scc)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2507   } else if (kind == DataKind::MH_Oop_Shared) {
2508     code_offset = read_position();
2509     int k = *(int*)addr(code_offset);
2510     code_offset += sizeof(int);
2511     set_read_position(code_offset);
2512     obj = CDSAccess::get_archived_object(k);
2513     assert(k == CDSAccess::get_archived_object_permanent_index(obj), "sanity");
2514   } else {
2515     set_lookup_failed();
2516     log_info(scc)("%d (L%d): Unknown oop's kind: %d",
2517                      compile_id(), comp_level(), (int)kind);
2518     return nullptr;
2519   }
2520   return JNIHandles::make_local(thread, obj);
2521 }
2522 
2523 bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) {
2524   uint code_offset = read_position();
2525   int oop_count = *(int*)addr(code_offset);
2526   code_offset += sizeof(int);
2527   set_read_position(code_offset);
2528   log_debug(scc)("======== read oops [%d]:", oop_count);
2529   if (oop_count == 0) {
2530     return true;
2531   }
2532   {
2533     VM_ENTRY_MARK;
2534     methodHandle comp_method(THREAD, target->get_Method());
2535     for (int i = 1; i < oop_count; i++) {
2536       jobject jo = read_oop(THREAD, comp_method);
2537       if (lookup_failed()) {
2538         return false;
2539       }
2540       if (oop_recorder->is_real(jo)) {
2541         oop_recorder->find_index(jo);
2542       } else {
2543         oop_recorder->allocate_oop_index(jo);
2544       }
2545       LogStreamHandle(Debug, scc, oops) log;
2546       if (log.is_enabled()) {
2547         log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2548         if (jo == (jobject)Universe::non_oop_word()) {
2549           log.print("non-oop word");
2550         } else if (jo == nullptr) {
2551           log.print("nullptr-oop");
2552         } else {
2553           JNIHandles::resolve(jo)->print_value_on(&log);
2554         }
2555         log.cr();
2556       }
2557     }
2558   }
2559   return true;
2560 }
2561 
2562 Metadata* SCCReader::read_metadata(const methodHandle& comp_method) {
2563   uint code_offset = read_position();
2564   Metadata* m = nullptr;
2565   DataKind kind = *(DataKind*)addr(code_offset);
2566   code_offset += sizeof(DataKind);
2567   set_read_position(code_offset);
2568   if (kind == DataKind::Null) {
2569     m = (Metadata*)nullptr;
2570   } else if (kind == DataKind::No_Data) {
2571     m = (Metadata*)Universe::non_oop_word();
2572   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2573     m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2574   } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2575     m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2576   } else if (kind == DataKind::MethodCnts) {
2577     kind = *(DataKind*)addr(code_offset);
2578     bool shared = (kind == DataKind::Method_Shared);
2579     assert(kind == DataKind::Method || shared, "Sanity");
2580     code_offset += sizeof(DataKind);
2581     set_read_position(code_offset);
2582     m = (Metadata*)read_method(comp_method, shared);
2583     if (m != nullptr) {
2584       Method* method = (Method*)m;
2585       m = method->get_method_counters(Thread::current());
2586       if (m == nullptr) {
2587         set_lookup_failed();
2588         log_info(scc)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2589       } else {
2590         log_info(scc)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2591       }
2592     }
2593   } else {
2594     set_lookup_failed();
2595     log_info(scc)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2596   }
2597   return m;
2598 }
2599 
2600 bool SCCReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) {
2601   uint code_offset = read_position();
2602   int metadata_count = *(int*)addr(code_offset);
2603   code_offset += sizeof(int);
2604   set_read_position(code_offset);
2605 
2606   log_debug(scc)("======== read metadata [%d]:", metadata_count);
2607 
2608   if (metadata_count == 0) {
2609     return true;
2610   }
2611   {
2612     VM_ENTRY_MARK;
2613     methodHandle comp_method(THREAD, target->get_Method());
2614 
2615     for (int i = 1; i < metadata_count; i++) {
2616       Metadata* m = read_metadata(comp_method);
2617       if (lookup_failed()) {
2618         return false;
2619       }
2620       if (oop_recorder->is_real(m)) {
2621         oop_recorder->find_index(m);
2622       } else {
2623         oop_recorder->allocate_metadata_index(m);
2624       }
2625       LogTarget(Debug, scc, metadata) log;
2626       if (log.is_enabled()) {
2627         LogStream ls(log);
2628         ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2629         if (m == (Metadata*)Universe::non_oop_word()) {
2630           ls.print("non-metadata word");
2631         } else if (m == nullptr) {
2632           ls.print("nullptr-oop");
2633         } else {
2634           Metadata::print_value_on_maybe_null(&ls, m);
2635         }
2636         ls.cr();
2637       }
2638     }
2639   }
2640   return true;
2641 }
2642 
2643 bool SCCache::write_oop(jobject& jo) {
2644   DataKind kind;
2645   uint n = 0;
2646   oop obj = JNIHandles::resolve(jo);
2647   if (jo == nullptr) {
2648     kind = DataKind::Null;
2649     n = write_bytes(&kind, sizeof(int));
2650     if (n != sizeof(int)) {
2651       return false;
2652     }
2653   } else if (jo == (jobject)Universe::non_oop_word()) {
2654     kind = DataKind::No_Data;
2655     n = write_bytes(&kind, sizeof(int));
2656     if (n != sizeof(int)) {
2657       return false;
2658     }
2659   } else if (java_lang_Class::is_instance(obj)) {
2660     if (java_lang_Class::is_primitive(obj)) {
2661       int bt = (int)java_lang_Class::primitive_type(obj);
2662       kind = DataKind::Primitive;
2663       n = write_bytes(&kind, sizeof(int));
2664       if (n != sizeof(int)) {
2665         return false;
2666       }
2667       n = write_bytes(&bt, sizeof(int));
2668       if (n != sizeof(int)) {
2669         return false;
2670       }
2671       log_info(scc)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2672     } else {
2673       Klass* klass = java_lang_Class::as_Klass(obj);
2674       if (!write_klass(klass)) {
2675         return false;
2676       }
2677     }
2678   } else if (java_lang_String::is_instance(obj)) { // herere
2679     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 1 means obj is a "permanent heap object"
2680     if (k > 0) {
2681       kind = DataKind::String_Shared;
2682       n = write_bytes(&kind, sizeof(int));
2683       if (n != sizeof(int)) {
2684         return false;
2685       }
2686       n = write_bytes(&k, sizeof(int));
2687       if (n != sizeof(int)) {
2688         return false;
2689       }
2690       return true;
2691     }
2692     kind = DataKind::String;
2693     n = write_bytes(&kind, sizeof(int));
2694     if (n != sizeof(int)) {
2695       return false;
2696     }
2697     ResourceMark rm;
2698     size_t length_sz = 0;
2699     const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2700     int length = (int)length_sz; // FIXME -- cast
2701     length++; // write tailing '/0'
2702     n = write_bytes(&length, sizeof(int));
2703     if (n != sizeof(int)) {
2704       return false;
2705     }
2706     n = write_bytes(string, (uint)length);
2707     if (n != (uint)length) {
2708       return false;
2709     }
2710     log_info(scc)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
2711   } else if (java_lang_Module::is_instance(obj)) {
2712     fatal("Module object unimplemented");
2713   } else if (java_lang_ClassLoader::is_instance(obj)) {
2714     if (obj == SystemDictionary::java_system_loader()) {
2715       kind = DataKind::SysLoader;
2716       log_info(scc)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2717     } else if (obj == SystemDictionary::java_platform_loader()) {
2718       kind = DataKind::PlaLoader;
2719       log_info(scc)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2720     } else {
2721       fatal("ClassLoader object unimplemented");
2722       return false;
2723     }
2724     n = write_bytes(&kind, sizeof(int));
2725     if (n != sizeof(int)) {
2726       return false;
2727     }
2728   } else { // herere
2729     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 1 means obj is a "permanent heap object"
2730     if (k > 0) {
2731       kind = DataKind::MH_Oop_Shared;
2732       n = write_bytes(&kind, sizeof(int));
2733       if (n != sizeof(int)) {
2734         return false;
2735       }
2736       n = write_bytes(&k, sizeof(int));
2737       if (n != sizeof(int)) {
2738         return false;
2739       }
2740       return true;
2741     }
2742     // Unhandled oop - bailout
2743     set_lookup_failed();
2744     log_info(scc, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
2745                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2746     return false;
2747   }
2748   return true;
2749 }
2750 
2751 bool SCCache::write_oops(OopRecorder* oop_recorder) {
2752   int oop_count = oop_recorder->oop_count();
2753   uint n = write_bytes(&oop_count, sizeof(int));
2754   if (n != sizeof(int)) {
2755     return false;
2756   }
2757   log_debug(scc)("======== write oops [%d]:", oop_count);
2758 
2759   for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr
2760     jobject jo = oop_recorder->oop_at(i);
2761     LogStreamHandle(Info, scc, oops) log;
2762     if (log.is_enabled()) {
2763       log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2764       if (jo == (jobject)Universe::non_oop_word()) {
2765         log.print("non-oop word");
2766       } else if (jo == nullptr) {
2767         log.print("nullptr-oop");
2768       } else {
2769         JNIHandles::resolve(jo)->print_value_on(&log);
2770       }
2771       log.cr();
2772     }
2773     if (!write_oop(jo)) {
2774       return false;
2775     }
2776   }
2777   return true;
2778 }
2779 
2780 bool SCCache::write_metadata(Metadata* m) {
2781   uint n = 0;
2782   if (m == nullptr) {
2783     DataKind kind = DataKind::Null;
2784     n = write_bytes(&kind, sizeof(int));
2785     if (n != sizeof(int)) {
2786       return false;
2787     }
2788   } else if (m == (Metadata*)Universe::non_oop_word()) {
2789     DataKind kind = DataKind::No_Data;
2790     n = write_bytes(&kind, sizeof(int));
2791     if (n != sizeof(int)) {
2792       return false;
2793     }
2794   } else if (m->is_klass()) {
2795     if (!write_klass((Klass*)m)) {
2796       return false;
2797     }
2798   } else if (m->is_method()) {
2799     if (!write_method((Method*)m)) {
2800       return false;
2801     }
2802   } else if (m->is_methodCounters()) {
2803     DataKind kind = DataKind::MethodCnts;
2804     n = write_bytes(&kind, sizeof(int));
2805     if (n != sizeof(int)) {
2806       return false;
2807     }
2808     if (!write_method(((MethodCounters*)m)->method())) {
2809       return false;
2810     }
2811     log_info(scc)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2812   } else { // Not supported
2813     fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2814     return false;
2815   }
2816   return true;
2817 }
2818 
2819 bool SCCache::write_metadata(OopRecorder* oop_recorder) {
2820   int metadata_count = oop_recorder->metadata_count();
2821   uint n = write_bytes(&metadata_count, sizeof(int));
2822   if (n != sizeof(int)) {
2823     return false;
2824   }
2825 
2826   log_debug(scc)("======== write metadata [%d]:", metadata_count);
2827 
2828   for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr
2829     Metadata* m = oop_recorder->metadata_at(i);
2830     LogStreamHandle(Debug, scc, metadata) log;
2831     if (log.is_enabled()) {
2832       log.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2833       if (m == (Metadata*)Universe::non_oop_word()) {
2834         log.print("non-metadata word");
2835       } else if (m == nullptr) {
2836         log.print("nullptr-oop");
2837       } else {
2838         Metadata::print_value_on_maybe_null(&log, m);
2839       }
2840       log.cr();
2841     }
2842     if (!write_metadata(m)) {
2843       return false;
2844     }
2845   }
2846   return true;
2847 }
2848 
2849 bool SCCReader::read_dependencies(Dependencies* dependencies) {
2850   uint code_offset = read_position();
2851   int dependencies_size = *(int*)addr(code_offset);
2852 
2853   log_debug(scc)("======== read dependencies [%d]:", dependencies_size);
2854 
2855   code_offset += sizeof(int);
2856   code_offset = align_up(code_offset, DATA_ALIGNMENT);
2857   if (dependencies_size > 0) {
2858     dependencies->set_content((u_char*)addr(code_offset), dependencies_size);
2859   }
2860   code_offset += dependencies_size;
2861   set_read_position(code_offset);
2862   return true;
2863 }
2864 
2865 bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
2866   TraceTime t1("SC total load time", &_t_totalLoad, enable_timers(), false);
2867   CompileTask* task = env->task();
2868   SCCEntry* entry = task->scc_entry();
2869   bool preload = task->preload();
2870   assert(entry != nullptr, "sanity");
2871   SCCache* cache = open_for_read();
2872   if (cache == nullptr) {
2873     return false;
2874   }
2875   if (log_is_enabled(Info, scc, nmethod)) {
2876     uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count();
2877     VM_ENTRY_MARK;
2878     ResourceMark rm;
2879     methodHandle method(THREAD, target->get_Method());
2880     const char* target_name = method->name_and_sig_as_C_string();
2881     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
2882     bool clinit_brs = entry->has_clinit_barriers();
2883     log_info(scc, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
2884                            task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
2885                            target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : ""),
2886                            (entry->ignore_decompile() ? ", ignore_decomp" : ""));
2887   }
2888   ReadingMark rdmk;
2889   if (rdmk.failed()) {
2890     // Cache is closed, cannot touch anything.
2891     return false;
2892   }
2893 
2894   SCCReader reader(cache, entry, task);
2895   bool success = reader.compile(env, target, entry_bci, compiler);
2896   if (success) {
2897     task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
2898   } else {
2899     entry->set_load_fail();
2900   }
2901   return success;
2902 }
2903 
2904 SCCReader::SCCReader(SCCache* cache, SCCEntry* entry, CompileTask* task) {
2905   _cache = cache;
2906   _entry   = entry;
2907   _load_buffer = cache->cache_buffer();
2908   _read_position = 0;
2909   if (task != nullptr) {
2910     _compile_id = task->compile_id();
2911     _comp_level = task->comp_level();
2912     _preload    = task->preload();
2913   } else {
2914     _compile_id = 0;
2915     _comp_level = 0;
2916     _preload    = false;
2917   }
2918   _lookup_failed = false;
2919 }
2920 
2921 bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler) {
2922   uint entry_position = _entry->offset();
2923   uint code_offset = entry_position + _entry->code_offset();
2924   set_read_position(code_offset);
2925 
2926   // Read flags
2927   int flags = *(int*)addr(code_offset);
2928   code_offset += sizeof(int);
2929   bool has_monitors      = (flags & 0x1) != 0;
2930   bool has_wide_vectors  = (flags & 0x2) != 0;
2931   bool has_unsafe_access = (flags & 0x4) != 0;
2932   bool has_scoped_access = (flags & 0x8) != 0;
2933 
2934   int orig_pc_offset = *(int*)addr(code_offset);
2935   code_offset += sizeof(int);
2936   int frame_size = *(int*)addr(code_offset);
2937   code_offset += sizeof(int);
2938 
2939   // Read offsets
2940   CodeOffsets* offsets = (CodeOffsets*)addr(code_offset);
2941   code_offset += sizeof(CodeOffsets);
2942 
2943   // Create Debug Information Recorder to record scopes, oopmaps, etc.
2944   OopRecorder* oop_recorder = new OopRecorder(env->arena());
2945   env->set_oop_recorder(oop_recorder);
2946 
2947   set_read_position(code_offset);
2948 
2949   // Write OopRecorder data
2950   if (!read_oops(oop_recorder, target)) {
2951     return false;
2952   }
2953   if (!read_metadata(oop_recorder, target)) {
2954     return false;
2955   }
2956 
2957   // Read Debug info
2958   DebugInformationRecorder* recorder = read_debug_info(oop_recorder);
2959   if (recorder == nullptr) {
2960     return false;
2961   }
2962   env->set_debug_info(recorder);
2963 
2964   // Read Dependencies (compressed already)
2965   Dependencies* dependencies = new Dependencies(env);
2966   if (!read_dependencies(dependencies)) {
2967     return false;
2968   }
2969   env->set_dependencies(dependencies);
2970 
2971   // Read oop maps
2972   OopMapSet* oop_maps = read_oop_maps();
2973   if (oop_maps == nullptr) {
2974     return false;
2975   }
2976 
2977   // Read exception handles
2978   code_offset = read_position();
2979   int exc_table_length = *(int*)addr(code_offset);
2980   code_offset += sizeof(int);
2981   ExceptionHandlerTable handler_table(MAX2(exc_table_length, 4));
2982   if (exc_table_length > 0) {
2983     handler_table.set_length(exc_table_length);
2984     uint exc_table_size = handler_table.size_in_bytes();
2985     copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size);
2986     code_offset += exc_table_size;
2987   }
2988 
2989   // Read null check table
2990   int nul_chk_length = *(int*)addr(code_offset);
2991   code_offset += sizeof(int);
2992   ImplicitExceptionTable nul_chk_table;
2993   if (nul_chk_length > 0) {
2994     nul_chk_table.set_size(nul_chk_length);
2995     nul_chk_table.set_len(nul_chk_length);
2996     uint nul_chk_size = nul_chk_table.size_in_bytes();
2997     copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry));
2998     code_offset += nul_chk_size;
2999   }
3000 
3001   uint reloc_size = _entry->reloc_size();
3002   CodeBuffer buffer("Compile::Fill_buffer", _entry->code_size(), reloc_size);
3003   buffer.initialize_oop_recorder(oop_recorder);
3004 
3005   const char* name = addr(entry_position + _entry->name_offset());
3006 
3007   // Create fake original CodeBuffer
3008   CodeBuffer orig_buffer(name);
3009 
3010   // Read code
3011   if (!read_code(&buffer, &orig_buffer, align_up(code_offset, DATA_ALIGNMENT))) {
3012     return false;
3013   }
3014 
3015   // Read relocations
3016   uint reloc_offset = entry_position + _entry->reloc_offset();
3017   set_read_position(reloc_offset);
3018   if (!read_relocations(&buffer, &orig_buffer, oop_recorder, target)) {
3019     return false;
3020   }
3021 
3022   log_info(scc, nmethod)("%d (L%d): Read nmethod '%s' from Startup Code Cache '%s'", compile_id(), comp_level(), name, _cache->cache_path());
3023 #ifdef ASSERT
3024   LogStreamHandle(Debug, scc, nmethod) log;
3025   if (log.is_enabled()) {
3026     FlagSetting fs(PrintRelocations, true);
3027     buffer.print_on(&log);
3028     buffer.decode();
3029   }
3030 #endif
3031 
3032   if (VerifyCachedCode) {
3033     return false;
3034   }
3035 
3036   // Register nmethod
3037   TraceTime t1("SC total nmethod register time", &_t_totalRegister, enable_timers(), false);
3038   env->register_method(target, entry_bci,
3039                        offsets, orig_pc_offset,
3040                        &buffer, frame_size,
3041                        oop_maps, &handler_table,
3042                        &nul_chk_table, compiler,
3043                        _entry->has_clinit_barriers(),
3044                        false,
3045                        has_unsafe_access,
3046                        has_wide_vectors,
3047                        has_monitors,
3048                        has_scoped_access,
3049                        0, true /* install_code */,
3050                        (SCCEntry *)_entry);
3051   CompileTask* task = env->task();
3052   bool success = task->is_success();
3053   if (success) {
3054     ((SCCEntry *)_entry)->set_loaded();
3055   }
3056   return success;
3057 }
3058 
3059 // No concurency for writing to cache file because this method is called from
3060 // ciEnv::register_method() under MethodCompileQueue_lock and Compile_lock locks.
3061 SCCEntry* SCCache::store_nmethod(const methodHandle& method,
3062                      int comp_id,
3063                      int entry_bci,
3064                      CodeOffsets* offsets,
3065                      int orig_pc_offset,
3066                      DebugInformationRecorder* recorder,
3067                      Dependencies* dependencies,
3068                      CodeBuffer* buffer,
3069                      int frame_size,
3070                      OopMapSet* oop_maps,
3071                      ExceptionHandlerTable* handler_table,
3072                      ImplicitExceptionTable* nul_chk_table,
3073                      AbstractCompiler* compiler,
3074                      CompLevel comp_level,
3075                      bool has_clinit_barriers,
3076                      bool for_preload,
3077                      bool has_unsafe_access,
3078                      bool has_wide_vectors,
3079                      bool has_monitors,
3080                      bool has_scoped_access) {
3081   if (!CDSConfig::is_dumping_cached_code()) {
3082     return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
3083   }
3084   if (entry_bci != InvocationEntryBci) {
3085     return nullptr; // No OSR
3086   }
3087   if (compiler->is_c1() && (comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile)) {
3088     // Cache tier1 compilations
3089   } else if (!compiler->is_c2()) {
3090     return nullptr; // Only C2 now
3091   }
3092   TraceTime t1("SC total store time", &_t_totalStore, enable_timers(), false);
3093   SCCache* cache = open_for_write();
3094   if (cache == nullptr) {
3095     return nullptr; // Cache file is closed
3096   }
3097   SCCEntry* entry = cache->write_nmethod(method, comp_id, entry_bci, offsets, orig_pc_offset, recorder, dependencies, buffer,
3098                                   frame_size, oop_maps, handler_table, nul_chk_table, compiler, comp_level,
3099                                   has_clinit_barriers, for_preload, has_unsafe_access, has_wide_vectors, has_monitors, has_scoped_access);
3100   if (entry == nullptr) {
3101     log_info(scc, nmethod)("%d (L%d): nmethod store attempt failed", comp_id, (int)comp_level);
3102   }
3103   return entry;
3104 }
3105 
3106 SCCEntry* SCCache::write_nmethod(const methodHandle& method,
3107                                  int comp_id,
3108                                  int entry_bci,
3109                                  CodeOffsets* offsets,
3110                                  int orig_pc_offset,
3111                                  DebugInformationRecorder* recorder,
3112                                  Dependencies* dependencies,
3113                                  CodeBuffer* buffer,
3114                                  int frame_size,
3115                                  OopMapSet* oop_maps,
3116                                  ExceptionHandlerTable* handler_table,
3117                                  ImplicitExceptionTable* nul_chk_table,
3118                                  AbstractCompiler* compiler,
3119                                  CompLevel comp_level,
3120                                  bool has_clinit_barriers,
3121                                  bool for_preload,
3122                                  bool has_unsafe_access,
3123                                  bool has_wide_vectors,
3124                                  bool has_monitors,
3125                                  bool has_scoped_access) {
3126 //  if (method->is_hidden()) {
3127 //    ResourceMark rm;
3128 //    log_info(scc, nmethod)("%d (L%d): Skip hidden method '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3129 //    return nullptr;
3130 //  }
3131   if (buffer->before_expand() != nullptr) {
3132     ResourceMark rm;
3133     log_info(scc, nmethod)("%d (L%d): Skip nmethod with expanded buffer '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3134     return nullptr;
3135   }
3136 #ifdef ASSERT
3137   LogStreamHandle(Debug, scc, nmethod) log;
3138   if (log.is_enabled()) {
3139     tty->print_cr(" == store_nmethod");
3140     FlagSetting fs(PrintRelocations, true);
3141     buffer->print_on(&log);
3142     buffer->decode();
3143   }
3144 #endif
3145   assert(!has_clinit_barriers || _gen_preload_code, "sanity");
3146   Method* m = method();
3147   bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)m); // herere
3148   InstanceKlass* holder = m->method_holder();
3149   bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class();
3150   bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
3151   if (!builtin_loader) {
3152     ResourceMark rm;
3153     log_info(scc, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
3154     return nullptr;
3155   }
3156   if (for_preload && !(method_in_cds && klass_in_cds)) {
3157     ResourceMark rm;
3158     log_info(scc, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3159     return nullptr;
3160   }
3161   assert(!for_preload || method_in_cds, "sanity");
3162   _for_preload = for_preload;
3163   _has_clinit_barriers = has_clinit_barriers;
3164 
3165   if (!align_write()) {
3166     return nullptr;
3167   }
3168   _compile_id = comp_id;
3169   _comp_level = (int)comp_level;
3170 
3171   uint entry_position = _write_position;
3172 
3173   uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count();
3174 
3175   // Is this one-step workflow assembly phase?
3176   // In this phase compilation is done based on saved profiling data
3177   // without application run. Ignore decompilation counters in such case.
3178   // Also ignore it for C1 code because it is decompiled unconditionally
3179   // when C2 generated code is published.
3180   bool ignore_decompile = (comp_level == CompLevel_limited_profile) ||
3181                           CDSConfig::is_dumping_final_static_archive();
3182 
3183   // Write name
3184   uint name_offset = 0;
3185   uint name_size   = 0;
3186   uint hash = 0;
3187   uint n;
3188   {
3189     ResourceMark rm;
3190     const char* name   = method->name_and_sig_as_C_string();
3191     log_info(scc, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to Startup Code Cache '%s'",
3192                            comp_id, (int)comp_level, name, comp_level, decomp,
3193                            (ignore_decompile ? ", ignore_decomp" : ""),
3194                            (has_clinit_barriers ? ", has clinit barriers" : ""), _cache_path);
3195 
3196     LogStreamHandle(Info, scc, loader) log;
3197     if (log.is_enabled()) {
3198       oop loader = holder->class_loader();
3199       oop domain = holder->protection_domain();
3200       log.print("Holder: ");
3201       holder->print_value_on(&log);
3202       log.print(" loader: ");
3203       if (loader == nullptr) {
3204         log.print("nullptr");
3205       } else {
3206         loader->print_value_on(&log);
3207       }
3208       log.print(" domain: ");
3209       if (domain == nullptr) {
3210         log.print("nullptr");
3211       } else {
3212         domain->print_value_on(&log);
3213       }
3214       log.cr();
3215     }
3216     name_offset = _write_position  - entry_position;
3217     name_size   = (uint)strlen(name) + 1; // Includes '/0'
3218     n = write_bytes(name, name_size);
3219     if (n != name_size) {
3220       return nullptr;
3221     }
3222     hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
3223   }
3224 
3225   if (!align_write()) {
3226     return nullptr;
3227   }
3228 
3229   uint code_offset = _write_position - entry_position;
3230 
3231   int flags = (has_scoped_access ? 0x8 : 0) |
3232               (has_unsafe_access ? 0x4 : 0) |
3233               (has_wide_vectors  ? 0x2 : 0) |
3234               (has_monitors      ? 0x1 : 0);
3235   n = write_bytes(&flags, sizeof(int));
3236   if (n != sizeof(int)) {
3237     return nullptr;
3238   }
3239 
3240   n = write_bytes(&orig_pc_offset, sizeof(int));
3241   if (n != sizeof(int)) {
3242     return nullptr;
3243   }
3244 
3245   n = write_bytes(&frame_size, sizeof(int));
3246   if (n != sizeof(int)) {
3247     return nullptr;
3248   }
3249 
3250   // Write offsets
3251   n = write_bytes(offsets, sizeof(CodeOffsets));
3252   if (n != sizeof(CodeOffsets)) {
3253     return nullptr;
3254   }
3255 
3256   // Write OopRecorder data
3257   if (!write_oops(buffer->oop_recorder())) {
3258     if (lookup_failed() && !failed()) {
3259       // Skip this method and reposition file
3260       set_write_position(entry_position);
3261     }
3262     return nullptr;
3263   }
3264   if (!write_metadata(buffer->oop_recorder())) {
3265     if (lookup_failed() && !failed()) {
3266       // Skip this method and reposition file
3267       set_write_position(entry_position);
3268     }
3269     return nullptr;
3270   }
3271 
3272   // Write Debug info
3273   if (!write_debug_info(recorder)) {
3274     return nullptr;
3275   }
3276   // Write Dependencies
3277   int dependencies_size = (int)dependencies->size_in_bytes();
3278   n = write_bytes(&dependencies_size, sizeof(int));
3279   if (n != sizeof(int)) {
3280     return nullptr;
3281   }
3282   if (!align_write()) {
3283     return nullptr;
3284   }
3285   n = write_bytes(dependencies->content_bytes(), dependencies_size);
3286   if (n != (uint)dependencies_size) {
3287     return nullptr;
3288   }
3289 
3290   // Write oop maps
3291   if (!write_oop_maps(oop_maps)) {
3292     return nullptr;
3293   }
3294 
3295   // Write exception handles
3296   int exc_table_length = handler_table->length();
3297   n = write_bytes(&exc_table_length, sizeof(int));
3298   if (n != sizeof(int)) {
3299     return nullptr;
3300   }
3301   uint exc_table_size = handler_table->size_in_bytes();
3302   n = write_bytes(handler_table->table(), exc_table_size);
3303   if (n != exc_table_size) {
3304     return nullptr;
3305   }
3306 
3307   // Write null check table
3308   int nul_chk_length = nul_chk_table->len();
3309   n = write_bytes(&nul_chk_length, sizeof(int));
3310   if (n != sizeof(int)) {
3311     return nullptr;
3312   }
3313   uint nul_chk_size = nul_chk_table->size_in_bytes();
3314   n = write_bytes(nul_chk_table->data(), nul_chk_size);
3315   if (n != nul_chk_size) {
3316     return nullptr;
3317   }
3318 
3319   // Write code section
3320   if (!align_write()) {
3321     return nullptr;
3322   }
3323   uint code_size = 0;
3324   if (!write_code(buffer, code_size)) {
3325     return nullptr;
3326   }
3327   // Write relocInfo array
3328   uint reloc_offset = _write_position - entry_position;
3329   uint reloc_size = 0;
3330   if (!write_relocations(buffer, reloc_size)) {
3331     if (lookup_failed() && !failed()) {
3332       // Skip this method and reposition file
3333       set_write_position(entry_position);
3334     }
3335     return nullptr;
3336   }
3337   uint entry_size = _write_position - entry_position;
3338 
3339   SCCEntry* entry = new (this) SCCEntry(entry_position, entry_size, name_offset, name_size,
3340                                         code_offset, code_size, reloc_offset, reloc_size,
3341                                         SCCEntry::Code, hash, (uint)comp_level, (uint)comp_id, decomp,
3342                                         has_clinit_barriers, _for_preload, ignore_decompile);
3343   if (method_in_cds) {
3344     entry->set_method(m);
3345   }
3346 #ifdef ASSERT
3347   if (has_clinit_barriers || _for_preload) {
3348     assert(for_preload, "sanity");
3349     assert(entry->method() != nullptr, "sanity");
3350   }
3351 #endif
3352   {
3353     ResourceMark rm;
3354     const char* name   = method->name_and_sig_as_C_string();
3355     log_info(scc, nmethod)("%d (L%d): Wrote nmethod '%s'%s to Startup Code Cache '%s'",
3356                            comp_id, (int)comp_level, name, (_for_preload ? " (for preload)" : ""), _cache_path);
3357   }
3358   if (VerifyCachedCode) {
3359     return nullptr;
3360   }
3361   return entry;
3362 }
3363 
3364 static void print_helper1(outputStream* st, const char* name, int count) {
3365   if (count > 0) {
3366     st->print(" %s=%d", name, count);
3367   }
3368 }
3369 static void print_helper(outputStream* st, const char* name, int stats[6+3][6], int idx) {
3370   int total = stats[idx][0];
3371   if (total > 0) {
3372     st->print("  %s:", name);
3373     print_helper1(st, "total",               stats[idx][0]);
3374     //print_helper1(st, "for_preload",         stats[idx][2]); // implied by Tier5
3375     print_helper1(st, "loaded",              stats[idx][3]);
3376     print_helper1(st, "invalidated",         stats[idx][4]);
3377     print_helper1(st, "failed",              stats[idx][5]);
3378     print_helper1(st, "has_clinit_barriers", stats[idx][1]);
3379     st->cr();
3380   }
3381 }
3382 
3383 void SCCache::print_statistics_on(outputStream* st) {
3384   SCCache* cache = open_for_read();
3385   if (cache != nullptr) {
3386     ReadingMark rdmk;
3387     if (rdmk.failed()) {
3388       // Cache is closed, cannot touch anything.
3389       return;
3390     }
3391 
3392     uint count = cache->_load_header->entries_count();
3393     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3394     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3395 
3396     int stats[6 + 3][6] = {0};
3397     for (uint i = 0; i < count; i++) {
3398       int index = search_entries[2*i + 1];
3399       SCCEntry* entry = &(load_entries[index]);
3400 
3401       int lvl = entry->kind();
3402       if (entry->kind() == SCCEntry::Code) {
3403         lvl += entry->comp_level() + (entry->for_preload() ? 1 : 0);
3404       }
3405       ++stats[lvl][0]; // total
3406       if (entry->has_clinit_barriers()) {
3407         ++stats[lvl][1];
3408       }
3409       if (entry->for_preload()) {
3410         ++stats[lvl][2];
3411       }
3412       if (entry->is_loaded()) {
3413         ++stats[lvl][3];
3414       }
3415       if (entry->not_entrant()) {
3416         ++stats[lvl][4];
3417       }
3418       if (entry->load_fail()) {
3419         ++stats[lvl][5];
3420       }
3421     }
3422 
3423     print_helper(st, "None", stats, SCCEntry::None);
3424     print_helper(st, "Stub", stats, SCCEntry::Stub);
3425     print_helper(st, "Blob", stats, SCCEntry::Blob);
3426     for (int lvl = 0; lvl <= CompLevel_full_optimization + 1; lvl++) {
3427       ResourceMark rm;
3428       stringStream ss;
3429       ss.print("SC T%d", lvl);
3430       print_helper(st, ss.freeze(), stats, SCCEntry::Code + lvl);
3431     }
3432 
3433   } else {
3434     st->print_cr("failed to open SCA at %s", CachedCodeFile);
3435   }
3436 }
3437 
3438 void SCCache::print_on(outputStream* st) {
3439   SCCache* cache = open_for_read();
3440   if (cache != nullptr) {
3441     ReadingMark rdmk;
3442     if (rdmk.failed()) {
3443       // Cache is closed, cannot touch anything.
3444       return;
3445     }
3446 
3447     uint count = cache->_load_header->entries_count();
3448     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3449     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3450 
3451     for (uint i = 0; i < count; i++) {
3452       int index = search_entries[2*i + 1];
3453       SCCEntry* entry = &(load_entries[index]);
3454 
3455       st->print_cr("%4u: %4u: K%u L%u offset=%u decompile=%u size=%u code_size=%u%s%s%s%s",
3456                 i, index, entry->kind(), entry->comp_level(), entry->offset(),
3457                 entry->decompile(), entry->size(), entry->code_size(),
3458                 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
3459                 entry->for_preload()         ? " for_preload"         : "",
3460                 entry->is_loaded()           ? " loaded"              : "",
3461                 entry->not_entrant()         ? " not_entrant"         : "");
3462       st->print_raw("         ");
3463       SCCReader reader(cache, entry, nullptr);
3464       reader.print_on(st);
3465     }
3466   } else {
3467     st->print_cr("failed to open SCA at %s", CachedCodeFile);
3468   }
3469 }
3470 
3471 void SCCache::print_unused_entries_on(outputStream* st) {
3472   LogStreamHandle(Info, scc, init) info;
3473   if (info.is_enabled()) {
3474     SCCache::iterate([&](SCCEntry* entry) {
3475       if (!entry->is_loaded()) {
3476         MethodTrainingData* mtd = MethodTrainingData::lookup_for(entry->method());
3477         if (mtd != nullptr) {
3478           if (mtd->has_holder()) {
3479             if (mtd->holder()->method_holder()->is_initialized()) {
3480               ResourceMark rm;
3481               mtd->iterate_all_compiles([&](CompileTrainingData* ctd) {
3482                 if ((uint)ctd->level() == entry->comp_level()) {
3483                   if (ctd->init_deps_left() == 0) {
3484                     nmethod* nm = mtd->holder()->code();
3485                     if (nm == nullptr) {
3486                       if (mtd->holder()->queued_for_compilation()) {
3487                         return; // scheduled for compilation
3488                       }
3489                     } else if ((uint)nm->comp_level() >= entry->comp_level()) {
3490                       return; // already online compiled and superseded by a more optimal method
3491                     }
3492                     info.print("SCC entry not loaded: ");
3493                     ctd->print_on(&info);
3494                     info.cr();
3495                   }
3496                 }
3497               });
3498             } else {
3499               // not yet initialized
3500             }
3501           } else {
3502             info.print("SCC entry doesn't have a holder: ");
3503             mtd->print_on(&info);
3504             info.cr();
3505           }
3506         }
3507       }
3508     });
3509   }
3510 }
3511 
3512 void SCCReader::print_on(outputStream* st) {
3513   uint entry_position = _entry->offset();
3514   set_read_position(entry_position);
3515 
3516   // Read name
3517   uint name_offset = entry_position + _entry->name_offset();
3518   uint name_size = _entry->name_size(); // Includes '/0'
3519   const char* name = addr(name_offset);
3520 
3521   st->print_cr("  name: %s", name);
3522 }
3523 
3524 #define _extrs_max 80
3525 #define _stubs_max 120
3526 #define _blobs_max 100
3527 #define _shared_blobs_max 24
3528 #define _C2_blobs_max 25
3529 #define _C1_blobs_max (_blobs_max - _shared_blobs_max - _C2_blobs_max)
3530 #define _all_max 300
3531 
3532 #define SET_ADDRESS(type, addr)                           \
3533   {                                                       \
3534     type##_addr[type##_length++] = (address) (addr);      \
3535     assert(type##_length <= type##_max, "increase size"); \
3536   }
3537 
3538 static bool initializing = false;
3539 void SCAddressTable::init() {
3540   if (_complete || initializing) return; // Done already
3541   initializing = true;
3542   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3543   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3544   _blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3545 
3546   // Divide _blobs_addr array to chunks because they could be initialized in parrallel
3547   _C2_blobs_addr = _blobs_addr + _shared_blobs_max;// C2 blobs addresses stored after shared blobs
3548   _C1_blobs_addr = _C2_blobs_addr + _C2_blobs_max; // C1 blobs addresses stored after C2 blobs
3549 
3550   _extrs_length = 0;
3551   _stubs_length = 0;
3552   _blobs_length = 0;       // for shared blobs
3553   _C1_blobs_length = 0;
3554   _C2_blobs_length = 0;
3555   _final_blobs_length = 0; // Depends on numnber of C1 blobs
3556 
3557   // Runtime methods
3558 #ifdef COMPILER2
3559   SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3560 #endif
3561 #ifdef COMPILER1
3562   SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3563   SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3564 #endif
3565 
3566   SET_ADDRESS(_extrs, CompressedOops::base_addr());
3567 #if INCLUDE_G1GC
3568   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3569   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3570 #endif
3571 
3572 #if INCLUDE_SHENANDOAHGC
3573   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3574   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3575   SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3576   SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3577   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3578   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3579   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3580   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3581   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3582   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3583 #endif
3584 
3585   SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3586   SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3587 #if defined(AMD64) && !defined(ZERO)
3588   SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3589   SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3590 #endif // AMD64
3591   SET_ADDRESS(_extrs, SharedRuntime::d2f);
3592   SET_ADDRESS(_extrs, SharedRuntime::d2i);
3593   SET_ADDRESS(_extrs, SharedRuntime::d2l);
3594   SET_ADDRESS(_extrs, SharedRuntime::dcos);
3595   SET_ADDRESS(_extrs, SharedRuntime::dexp);
3596   SET_ADDRESS(_extrs, SharedRuntime::dlog);
3597   SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3598   SET_ADDRESS(_extrs, SharedRuntime::dpow);
3599   SET_ADDRESS(_extrs, SharedRuntime::dsin);
3600   SET_ADDRESS(_extrs, SharedRuntime::dtan);
3601   SET_ADDRESS(_extrs, SharedRuntime::f2i);
3602   SET_ADDRESS(_extrs, SharedRuntime::f2l);
3603 #ifndef ZERO
3604   SET_ADDRESS(_extrs, SharedRuntime::drem);
3605   SET_ADDRESS(_extrs, SharedRuntime::frem);
3606 #endif
3607   SET_ADDRESS(_extrs, SharedRuntime::l2d);
3608   SET_ADDRESS(_extrs, SharedRuntime::l2f);
3609   SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3610   SET_ADDRESS(_extrs, SharedRuntime::lmul);
3611   SET_ADDRESS(_extrs, SharedRuntime::lrem);
3612 #if INCLUDE_JVMTI
3613   SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3614 #endif /* INCLUDE_JVMTI */
3615   BarrierSet* bs = BarrierSet::barrier_set();
3616   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3617     SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3618   }
3619   SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3620   SET_ADDRESS(_extrs, Thread::current);
3621 
3622   SET_ADDRESS(_extrs, os::javaTimeMillis);
3623   SET_ADDRESS(_extrs, os::javaTimeNanos);
3624 
3625 #if INCLUDE_JVMTI
3626   SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3627 #endif /* INCLUDE_JVMTI */
3628   SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3629 #ifndef PRODUCT
3630   SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3631   SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3632 #endif
3633 
3634 #ifndef ZERO
3635 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3636   SET_ADDRESS(_extrs, MacroAssembler::debug64);
3637 #endif
3638 #if defined(AMD64)
3639   SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3640 #endif
3641 #endif
3642 
3643 #ifdef COMPILER1
3644 #ifdef X86
3645   SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3646   SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3647   SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3648   SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3649 #endif
3650 #endif
3651 
3652   // addresses of fields in AOT runtime constants area
3653   address* p = AOTRuntimeConstants::field_addresses_list();
3654   while (*p != nullptr) {
3655     SET_ADDRESS(_extrs, *p++);
3656   }
3657   // Stubs
3658   SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3659   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3660 /*
3661   SET_ADDRESS(_stubs, StubRoutines::throw_AbstractMethodError_entry());
3662   SET_ADDRESS(_stubs, StubRoutines::throw_IncompatibleClassChangeError_entry());
3663   SET_ADDRESS(_stubs, StubRoutines::throw_NullPointerException_at_call_entry());
3664   SET_ADDRESS(_stubs, StubRoutines::throw_StackOverflowError_entry());
3665   SET_ADDRESS(_stubs, StubRoutines::throw_delayed_StackOverflowError_entry());
3666 */
3667   SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3668   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3669   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3670   SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3671   SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3672 
3673   SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3674   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3675   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3676 
3677   JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3678 
3679 
3680   SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3681   SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3682   SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3683   SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3684   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3685   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3686 
3687   SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3688   SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3689   SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3690   SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3691   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3692   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3693 
3694   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3695   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3696   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3697   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3698   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3699   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3700 
3701   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3702   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3703   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3704   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3705   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3706   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3707 
3708   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3709   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3710 
3711   SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3712   SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3713 
3714   SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3715   SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3716   SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3717   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3718   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3719   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3720 
3721   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3722   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3723 
3724   SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3725   SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3726   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3727   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3728   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3729   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3730   SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3731   SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3732   SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3733   SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3734   SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3735   SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3736   SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3737   SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3738   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3739   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3740   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3741   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3742   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3743   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3744   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3745   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3746 
3747   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3748 
3749   SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3750   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3751   SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3752 
3753   SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3754   SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3755   SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3756   SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3757   SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3758   SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3759   SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3760   SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3761 
3762   SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3763 
3764   SET_ADDRESS(_stubs, StubRoutines::dexp());
3765   SET_ADDRESS(_stubs, StubRoutines::dlog());
3766   SET_ADDRESS(_stubs, StubRoutines::dlog10());
3767   SET_ADDRESS(_stubs, StubRoutines::dpow());
3768   SET_ADDRESS(_stubs, StubRoutines::dsin());
3769   SET_ADDRESS(_stubs, StubRoutines::dcos());
3770   SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3771   SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3772   SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3773   SET_ADDRESS(_stubs, StubRoutines::dtan());
3774 
3775   SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3776   SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3777 
3778 #if defined(AMD64) && !defined(ZERO)
3779   SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3780   SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3781   SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3782   SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3783   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3784   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3785   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3786   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3787   SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3788   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3789   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3790   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3791   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3792   // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3793   // See C2_MacroAssembler::load_iota_indices().
3794   for (int i = 0; i < 6; i++) {
3795     SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3796   }
3797 #endif
3798 #if defined(AARCH64) && !defined(ZERO)
3799   SET_ADDRESS(_stubs, StubRoutines::aarch64::d2i_fixup());
3800   SET_ADDRESS(_stubs, StubRoutines::aarch64::f2i_fixup());
3801   SET_ADDRESS(_stubs, StubRoutines::aarch64::d2l_fixup());
3802   SET_ADDRESS(_stubs, StubRoutines::aarch64::f2l_fixup());
3803   SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_mask());
3804   SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_flip());
3805   SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_mask());
3806   SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_flip());
3807   SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3808   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3809   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3810   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3811   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3812   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3813   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3814   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3815   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3816   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3817   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3818   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3819   SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3820 #endif
3821 
3822   // Blobs
3823   SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub());
3824   SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub());
3825   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3826   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3827   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_static_call_stub());
3828   SET_ADDRESS(_blobs, SharedRuntime::deopt_blob()->entry_point());
3829   SET_ADDRESS(_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3830   SET_ADDRESS(_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3831 #ifdef COMPILER2
3832   SET_ADDRESS(_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3833 #endif
3834 
3835   SET_ADDRESS(_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3836   SET_ADDRESS(_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3837   SET_ADDRESS(_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3838   SET_ADDRESS(_blobs, SharedRuntime::throw_StackOverflowError_entry());
3839   SET_ADDRESS(_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3840 
3841   assert(_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _blobs_length);
3842   _final_blobs_length = _blobs_length;
3843   _complete = true;
3844   log_info(scc,init)("External addresses and stubs recorded");
3845 }
3846 
3847 void SCAddressTable::init_opto() {
3848 #ifdef COMPILER2
3849   // OptoRuntime Blobs
3850   SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3851   SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3852   SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3853   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3854   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3855   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3856   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3857   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3858   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3859   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3860   SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3861   SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3862   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3863   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3864   SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3865   SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3866   SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3867   SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3868 #if INCLUDE_JVMTI
3869   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3870   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3871   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3872   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3873 #endif /* INCLUDE_JVMTI */
3874 #endif
3875 
3876   assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3877   _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_length));
3878   _opto_complete = true;
3879   log_info(scc,init)("OptoRuntime Blobs recorded");
3880 }
3881 
3882 void SCAddressTable::init_c1() {
3883 #ifdef COMPILER1
3884   // Runtime1 Blobs
3885   for (int i = 0; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3886     C1StubId id = (C1StubId)i;
3887     if (Runtime1::blob_for(id) == nullptr) {
3888       log_info(scc, init)("C1 blob %s is missing", Runtime1::name_for(id));
3889       continue;
3890     }
3891     if (Runtime1::entry_for(id) == nullptr) {
3892       log_info(scc, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3893       continue;
3894     }
3895     address entry = Runtime1::entry_for(id);
3896     SET_ADDRESS(_C1_blobs, entry);
3897   }
3898 #if INCLUDE_G1GC
3899   if (UseG1GC) {
3900     G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3901     address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3902     SET_ADDRESS(_C1_blobs, entry);
3903     entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3904     SET_ADDRESS(_C1_blobs, entry);
3905   }
3906 #endif // INCLUDE_G1GC
3907 #if INCLUDE_ZGC
3908   if (UseZGC) {
3909     ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3910     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3911     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3912     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3913     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3914   }
3915 #endif // INCLUDE_ZGC
3916 #if INCLUDE_SHENANDOAHGC
3917   if (UseShenandoahGC) {
3918     ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3919     SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3920     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3921     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3922     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3923     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3924   }
3925 #endif // INCLUDE_SHENANDOAHGC
3926 #endif // COMPILER1
3927 
3928   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3929   _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_max + _C1_blobs_length));
3930   _c1_complete = true;
3931   log_info(scc,init)("Runtime1 Blobs recorded");
3932 }
3933 
3934 #undef SET_ADDRESS
3935 #undef _extrs_max
3936 #undef _stubs_max
3937 #undef _blobs_max
3938 #undef _shared_blobs_max
3939 #undef _C1_blobs_max
3940 #undef _C2_blobs_max
3941 
3942 SCAddressTable::~SCAddressTable() {
3943   if (_extrs_addr != nullptr) {
3944     FREE_C_HEAP_ARRAY(address, _extrs_addr);
3945   }
3946   if (_stubs_addr != nullptr) {
3947     FREE_C_HEAP_ARRAY(address, _stubs_addr);
3948   }
3949   if (_blobs_addr != nullptr) {
3950     FREE_C_HEAP_ARRAY(address, _blobs_addr);
3951   }
3952 }
3953 
3954 #define MAX_STR_COUNT 200
3955 static const char* _C_strings[MAX_STR_COUNT] = {nullptr};
3956 static int _C_strings_count = 0;
3957 static int _C_strings_s[MAX_STR_COUNT] = {0};
3958 static int _C_strings_id[MAX_STR_COUNT] = {0};
3959 static int _C_strings_len[MAX_STR_COUNT] = {0};
3960 static int _C_strings_hash[MAX_STR_COUNT] = {0};
3961 static int _C_strings_used = 0;
3962 
3963 void SCCache::load_strings() {
3964   uint strings_count  = _load_header->strings_count();
3965   if (strings_count == 0) {
3966     return;
3967   }
3968   uint strings_offset = _load_header->strings_offset();
3969   uint strings_size   = _load_header->entries_offset() - strings_offset;
3970   uint data_size = (uint)(strings_count * sizeof(uint));
3971   uint* sizes = (uint*)addr(strings_offset);
3972   uint* hashs = (uint*)addr(strings_offset + data_size);
3973   strings_size -= 2 * data_size;
3974   // We have to keep cached strings longer than _cache buffer
3975   // because they are refernced from compiled code which may
3976   // still be executed on VM exit after _cache is freed.
3977   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3978   memcpy(p, addr(strings_offset + 2 * data_size), strings_size);
3979   _C_strings_buf = p;
3980   assert(strings_count <= MAX_STR_COUNT, "sanity");
3981   for (uint i = 0; i < strings_count; i++) {
3982     _C_strings[i] = p;
3983     uint len = sizes[i];
3984     _C_strings_s[i] = i;
3985     _C_strings_id[i] = i;
3986     _C_strings_len[i] = len;
3987     _C_strings_hash[i] = hashs[i];
3988     p += len;
3989   }
3990   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3991   _C_strings_count = strings_count;
3992   _C_strings_used  = strings_count;
3993   log_info(scc, init)("Load %d C strings at offset %d from Startup Code Cache '%s'", _C_strings_count, strings_offset, _cache_path);
3994 }
3995 
3996 int SCCache::store_strings() {
3997   uint offset = _write_position;
3998   uint length = 0;
3999   if (_C_strings_used > 0) {
4000     // Write sizes first
4001     for (int i = 0; i < _C_strings_used; i++) {
4002       uint len = _C_strings_len[i] + 1; // Include 0
4003       length += len;
4004       assert(len < 1000, "big string: %s", _C_strings[i]);
4005       uint n = write_bytes(&len, sizeof(uint));
4006       if (n != sizeof(uint)) {
4007         return -1;
4008       }
4009     }
4010     // Write hashs
4011     for (int i = 0; i < _C_strings_used; i++) {
4012       uint n = write_bytes(&(_C_strings_hash[i]), sizeof(uint));
4013       if (n != sizeof(uint)) {
4014         return -1;
4015       }
4016     }
4017     for (int i = 0; i < _C_strings_used; i++) {
4018       uint len = _C_strings_len[i] + 1; // Include 0
4019       uint n = write_bytes(_C_strings[_C_strings_s[i]], len);
4020       if (n != len) {
4021         return -1;
4022       }
4023     }
4024     log_info(scc, exit)("Wrote %d C strings of total length %d at offset %d to Startup Code Cache '%s'",
4025                         _C_strings_used, length, offset, _cache_path);
4026   }
4027   return _C_strings_used;
4028 }
4029 
4030 void SCCache::add_new_C_string(const char* str) {
4031   assert(for_write(), "only when storing code");
4032   _table->add_C_string(str);
4033 }
4034 
4035 void SCAddressTable::add_C_string(const char* str) {
4036   if (str != nullptr && _complete && (_opto_complete || _c1_complete)) {
4037     // Check previous strings address
4038     for (int i = 0; i < _C_strings_count; i++) {
4039       if (_C_strings[i] == str) {
4040         return; // Found existing one
4041       }
4042     }
4043     // Add new one
4044     if (_C_strings_count < MAX_STR_COUNT) {
4045       log_trace(scc)("add_C_string: [%d] " INTPTR_FORMAT " %s", _C_strings_count, p2i(str), str);
4046       _C_strings_id[_C_strings_count] = -1; // Init
4047       _C_strings[_C_strings_count++] = str;
4048     } else {
4049       CompileTask* task = ciEnv::current()->task();
4050       log_info(scc)("%d (L%d): Number of C strings > max %d %s",
4051                        task->compile_id(), task->comp_level(), MAX_STR_COUNT, str);
4052     }
4053   }
4054 }
4055 
4056 int SCAddressTable::id_for_C_string(address str) {
4057   for (int i = 0; i < _C_strings_count; i++) {
4058     if (_C_strings[i] == (const char*)str) { // found
4059       int id = _C_strings_id[i];
4060       if (id >= 0) {
4061         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
4062         return id; // Found recorded
4063       }
4064       // Search for the same string content
4065       int len = (int)strlen((const char*)str);
4066       int hash = java_lang_String::hash_code((const jbyte*)str, len);
4067       for (int j = 0; j < _C_strings_used; j++) {
4068         if ((_C_strings_len[j] == len) && (_C_strings_hash[j] == hash)) {
4069           _C_strings_id[i] = j; // Found match
4070           return j;
4071         }
4072       }
4073       // Not found in recorded, add new
4074       id = _C_strings_used++;
4075       _C_strings_s[id] = i;
4076       _C_strings_id[i] = id;
4077       _C_strings_len[id] = len;
4078       _C_strings_hash[id] = hash;
4079       return id;
4080     }
4081   }
4082   return -1;
4083 }
4084 
4085 address SCAddressTable::address_for_C_string(int idx) {
4086   assert(idx < _C_strings_count, "sanity");
4087   return (address)_C_strings[idx];
4088 }
4089 
4090 int search_address(address addr, address* table, uint length) {
4091   for (int i = 0; i < (int)length; i++) {
4092     if (table[i] == addr) {
4093       return i;
4094     }
4095   }
4096   return -1;
4097 }
4098 
4099 address SCAddressTable::address_for_id(int idx) {
4100   if (!_complete) {
4101     fatal("SCA table is not complete");
4102   }
4103   if (idx == -1) {
4104     return (address)-1;
4105   }
4106   uint id = (uint)idx;
4107   if (id >= _all_max && idx < (_all_max + _C_strings_count)) {
4108     return address_for_C_string(idx - _all_max);
4109   }
4110   if (idx < 0 || id == (_extrs_length + _stubs_length + _final_blobs_length)) {
4111     fatal("Incorrect id %d for SCA table", id);
4112   }
4113   if (idx > (_all_max + _C_strings_count)) {
4114     return (address)os::init + idx;
4115   }
4116   if (id < _extrs_length) {
4117     return _extrs_addr[id];
4118   }
4119   id -= _extrs_length;
4120   if (id < _stubs_length) {
4121     return _stubs_addr[id];
4122   }
4123   id -= _stubs_length;
4124   if (id < _final_blobs_length) {
4125     return _blobs_addr[id];
4126   }
4127   return nullptr;
4128 }
4129 
4130 int SCAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer) {
4131   int id = -1;
4132   if (addr == (address)-1) { // Static call stub has jump to itself
4133     return id;
4134   }
4135   if (!_complete) {
4136     fatal("SCA table is not complete");
4137   }
4138   // Seach for C string
4139   id = id_for_C_string(addr);
4140   if (id >=0) {
4141     return id + _all_max;
4142   }
4143   if (StubRoutines::contains(addr)) {
4144     // Search in stubs
4145     id = search_address(addr, _stubs_addr, _stubs_length);
4146     if (id < 0) {
4147       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
4148       if (desc == nullptr) {
4149         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
4150       }
4151       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
4152       fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in SCA table", p2i(addr), sub_name);
4153     } else {
4154       id += _extrs_length;
4155     }
4156   } else {
4157     CodeBlob* cb = CodeCache::find_blob(addr);
4158     if (cb != nullptr) {
4159       // Search in code blobs
4160       id = search_address(addr, _blobs_addr, _final_blobs_length);
4161       if (id < 0) {
4162         fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in SCA table", p2i(addr), cb->name());
4163       } else {
4164         id += _extrs_length + _stubs_length;
4165       }
4166     } else {
4167       // Search in runtime functions
4168       id = search_address(addr, _extrs_addr, _extrs_length);
4169       if (id < 0) {
4170         ResourceMark rm;
4171         const int buflen = 1024;
4172         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
4173         int offset = 0;
4174         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
4175           if (offset > 0) {
4176             // Could be address of C string
4177             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
4178             CompileTask* task = ciEnv::current()->task();
4179             uint compile_id = 0;
4180             uint comp_level =0;
4181             if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
4182               compile_id = task->compile_id();
4183               comp_level = task->comp_level();
4184             }
4185             log_info(scc)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in SCA table",
4186                           compile_id, comp_level, p2i(addr), dist, (const char*)addr);
4187             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
4188             return dist;
4189           }
4190           fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in SCA table", p2i(addr), func_name, offset);
4191         } else {
4192           os::print_location(tty, p2i(addr), true);
4193           reloc.print_current_on(tty);
4194 #ifndef PRODUCT
4195           buffer->print_on(tty);
4196           buffer->decode();
4197 #endif // !PRODUCT
4198           fatal("Address " INTPTR_FORMAT " for <unknown> is missing in SCA table", p2i(addr));
4199         }
4200       }
4201     }
4202   }
4203   return id;
4204 }
4205 
4206 void AOTRuntimeConstants::initialize_from_runtime() {
4207   BarrierSet* bs = BarrierSet::barrier_set();
4208   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4209     CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
4210     _aot_runtime_constants._grain_shift = ctbs->grain_shift();
4211     _aot_runtime_constants._card_shift = ctbs->card_shift();
4212   }
4213 }
4214 
4215 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
4216 
4217 address AOTRuntimeConstants::_field_addresses_list[] = {
4218   grain_shift_address(),
4219   card_shift_address(),
4220   nullptr
4221 };
4222 
4223 
4224 void SCCache::wait_for_no_nmethod_readers() {
4225   while (true) {
4226     int cur = Atomic::load(&_nmethod_readers);
4227     int upd = -(cur + 1);
4228     if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
4229       // Success, no new readers should appear.
4230       break;
4231     }
4232   }
4233 
4234   // Now wait for all readers to leave.
4235   SpinYield w;
4236   while (Atomic::load(&_nmethod_readers) != -1) {
4237     w.wait();
4238   }
4239 }
4240 
4241 SCCache::ReadingMark::ReadingMark() {
4242   while (true) {
4243     int cur = Atomic::load(&_nmethod_readers);
4244     if (cur < 0) {
4245       // Cache is already closed, cannot proceed.
4246       _failed = true;
4247       return;
4248     }
4249     if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4250       // Successfully recorded ourselves as entered.
4251       _failed = false;
4252       return;
4253     }
4254   }
4255 }
4256 
4257 SCCache::ReadingMark::~ReadingMark() {
4258   if (_failed) {
4259     return;
4260   }
4261   while (true) {
4262     int cur = Atomic::load(&_nmethod_readers);
4263     if (cur > 0) {
4264       // Cache is open, we are counting down towards 0.
4265       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
4266         return;
4267       }
4268     } else {
4269       // Cache is closed, we are counting up towards -1.
4270       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4271         return;
4272       }
4273     }
4274   }
4275 }