1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/cdsAccess.hpp"
  28 #include "cds/cdsConfig.hpp"
  29 #include "cds/heapShared.hpp"
  30 #include "cds/metaspaceShared.hpp"
  31 #include "ci/ciConstant.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "ci/ciField.hpp"
  34 #include "ci/ciMethod.hpp"
  35 #include "ci/ciMethodData.hpp"
  36 #include "ci/ciObject.hpp"
  37 #include "ci/ciUtilities.inline.hpp"
  38 #include "classfile/javaAssertions.hpp"
  39 #include "classfile/stringTable.hpp"
  40 #include "classfile/symbolTable.hpp"
  41 #include "classfile/systemDictionary.hpp"
  42 #include "classfile/vmClasses.hpp"
  43 #include "classfile/vmIntrinsics.hpp"
  44 #include "code/codeBlob.hpp"
  45 #include "code/codeCache.hpp"
  46 #include "code/oopRecorder.inline.hpp"
  47 #include "code/SCCache.hpp"
  48 #include "compiler/abstractCompiler.hpp"
  49 #include "compiler/compilationPolicy.hpp"
  50 #include "compiler/compileBroker.hpp"
  51 #include "compiler/compileTask.hpp"
  52 #include "gc/g1/g1BarrierSetRuntime.hpp"
  53 #include "gc/shared/gcConfig.hpp"
  54 #include "logging/log.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/klass.inline.hpp"
  57 #include "oops/method.inline.hpp"
  58 #include "oops/trainingData.hpp"
  59 #include "prims/jvmtiThreadState.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/flags/flagSetting.hpp"
  62 #include "runtime/globals_extension.hpp"
  63 #include "runtime/handles.inline.hpp"
  64 #include "runtime/java.hpp"
  65 #include "runtime/jniHandles.inline.hpp"
  66 #include "runtime/os.inline.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stubCodeGenerator.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/timerTrace.hpp"
  71 #include "runtime/threadIdentifier.hpp"
  72 #include "utilities/ostream.hpp"
  73 #include "utilities/spinYield.hpp"
  74 #ifdef COMPILER1
  75 #include "c1/c1_Runtime1.hpp"
  76 #include "c1/c1_LIRAssembler.hpp"
  77 #include "gc/shared/c1/barrierSetC1.hpp"
  78 #include "gc/g1/c1/g1BarrierSetC1.hpp"
  79 #if INCLUDE_SHENANDOAHGC
  80 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  81 #endif
  82 #include "gc/z/c1/zBarrierSetC1.hpp"
  83 #endif
  84 #ifdef COMPILER2
  85 #include "opto/runtime.hpp"
  86 #endif
  87 #if INCLUDE_JVMCI
  88 #include "jvmci/jvmci.hpp"
  89 #endif
  90 #if INCLUDE_SHENANDOAHGC
  91 #include "gc/shenandoah/shenandoahRuntime.hpp"
  92 #endif
  93 
  94 #include <sys/stat.h>
  95 #include <errno.h>
  96 
  97 #ifndef O_BINARY       // if defined (Win32) use binary files.
  98 #define O_BINARY 0     // otherwise do nothing.
  99 #endif
 100 
 101 static elapsedTimer _t_totalLoad;
 102 static elapsedTimer _t_totalRegister;
 103 static elapsedTimer _t_totalFind;
 104 static elapsedTimer _t_totalStore;
 105 
 106 SCCache* SCCache::_cache = nullptr;
 107 
 108 static bool enable_timers() {
 109   return CITime || log_is_enabled(Info, init);
 110 }
 111 
 112 static void exit_vm_on_load_failure() {
 113   // Treat SCC warnings as error when RequireSharedSpaces is on.
 114   if (RequireSharedSpaces) {
 115     vm_exit_during_initialization("Unable to used startup cached code.", nullptr);
 116   }
 117 }
 118 
 119 static void exit_vm_on_store_failure() {
 120   // Treat SCC warnings as error when RequireSharedSpaces is on.
 121   if (RequireSharedSpaces) {
 122     tty->print_cr("Unable to create startup cached code.");
 123     // Failure during AOT code caching, we don't want to dump core
 124     vm_abort(false);
 125   }
 126 }
 127 void SCCache::initialize() {
 128   if (LoadCachedCode && !UseSharedSpaces) {
 129     return;
 130   }
 131   if (StoreCachedCode || LoadCachedCode) {
 132     if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
 133       FLAG_SET_DEFAULT(ClassInitBarrierMode, 1);
 134     }
 135   } else if (ClassInitBarrierMode > 0) {
 136     log_info(scc, init)("Set ClassInitBarrierMode to 0 because StoreCachedCode and LoadCachedCode are false.");
 137     FLAG_SET_DEFAULT(ClassInitBarrierMode, 0);
 138   }
 139   if ((LoadCachedCode || StoreCachedCode) && CachedCodeFile != nullptr) {
 140     const int len = (int)strlen(CachedCodeFile);
 141     // cache file path
 142     char* path  = NEW_C_HEAP_ARRAY(char, len+1, mtCode);
 143     memcpy(path, CachedCodeFile, len);
 144     path[len] = '\0';
 145     if (!open_cache(path)) {
 146       exit_vm_on_load_failure();
 147       return;
 148     }
 149     if (StoreCachedCode) {
 150       FLAG_SET_DEFAULT(FoldStableValues, false);
 151       FLAG_SET_DEFAULT(ForceUnreachable, true);
 152     }
 153     FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 154   }
 155 }
 156 
 157 void SCCache::init2() {
 158   if (!is_on()) {
 159     return;
 160   }
 161   // After Universe initialized
 162   BarrierSet* bs = BarrierSet::barrier_set();
 163   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
 164     address byte_map_base = ci_card_table_address_as<address>();
 165     if (is_on_for_write() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
 166       // Bail out since we can't encode card table base address with relocation
 167       log_warning(scc, init)("Can't create Startup Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
 168       close();
 169       exit_vm_on_load_failure();
 170     }
 171   }
 172   // initialize aot runtime constants as appropriate to this runtime
 173   AOTRuntimeConstants::initialize_from_runtime();
 174 
 175   if (!verify_vm_config()) {
 176     close();
 177     exit_vm_on_load_failure();
 178   }
 179 }
 180 
 181 void SCCache::print_timers_on(outputStream* st) {
 182   if (LoadCachedCode) {
 183     st->print_cr ("    SC Load Time:         %7.3f s", _t_totalLoad.seconds());
 184     st->print_cr ("      nmethod register:     %7.3f s", _t_totalRegister.seconds());
 185     st->print_cr ("      find cached code:     %7.3f s", _t_totalFind.seconds());
 186   }
 187   if (StoreCachedCode) {
 188     st->print_cr ("    SC Store Time:        %7.3f s", _t_totalStore.seconds());
 189   }
 190 }
 191 
 192 bool SCCache::is_C3_on() {
 193 #if INCLUDE_JVMCI
 194   if (UseJVMCICompiler) {
 195     return (StoreCachedCode || LoadCachedCode) && UseC2asC3;
 196   }
 197 #endif
 198   return false;
 199 }
 200 
 201 bool SCCache::is_code_load_thread_on() {
 202   return UseCodeLoadThread && LoadCachedCode;
 203 }
 204 
 205 bool SCCache::gen_preload_code(ciMethod* m, int entry_bci) {
 206   VM_ENTRY_MARK;
 207   return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
 208          CDSAccess::can_generate_cached_code(m->get_Method());
 209 }
 210 
 211 static void print_helper(nmethod* nm, outputStream* st) {
 212   SCCache::iterate([&](SCCEntry* e) {
 213     if (e->method() == nm->method()) {
 214       ResourceMark rm;
 215       stringStream ss;
 216       ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
 217       if (e->decompile() > 0) {
 218         ss.print("+D%d", e->decompile());
 219       }
 220       ss.print("[%s%s%s]",
 221                (e->is_loaded()   ? "L" : ""),
 222                (e->load_fail()   ? "F" : ""),
 223                (e->not_entrant() ? "I" : ""));
 224       ss.print("#%d", e->comp_id());
 225 
 226       st->print(" %s", ss.freeze());
 227     }
 228   });
 229 }
 230 
 231 void SCCache::close() {
 232   if (is_on()) {
 233     if (SCCache::is_on_for_read()) {
 234       LogStreamHandle(Info, init) log;
 235       if (log.is_enabled()) {
 236         log.print_cr("Startup Code Cache statistics (when closed): ");
 237         SCCache::print_statistics_on(&log);
 238         log.cr();
 239         SCCache::print_timers_on(&log);
 240 
 241         LogStreamHandle(Info, scc, init) log1;
 242         if (log1.is_enabled()) {
 243           SCCache::print_unused_entries_on(&log1);
 244         }
 245 
 246         LogStreamHandle(Info, scc, codecache) info_scc;
 247         if (info_scc.is_enabled()) {
 248           NMethodIterator iter(NMethodIterator::all);
 249           while (iter.next()) {
 250             nmethod* nm = iter.method();
 251             if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
 252               info_scc.print("%5d:%c%c%c%d:", nm->compile_id(),
 253                              (nm->method()->is_shared() ? 'S' : ' '),
 254                              (nm->is_scc() ? 'A' : ' '),
 255                              (nm->preloaded() ? 'P' : ' '),
 256                              nm->comp_level());
 257               print_helper(nm, &info_scc);
 258               info_scc.print(": ");
 259               CompileTask::print(&info_scc, nm, nullptr, true /*short_form*/);
 260 
 261               LogStreamHandle(Debug, scc, codecache) debug_scc;
 262               if (debug_scc.is_enabled()) {
 263                 MethodTrainingData* mtd = MethodTrainingData::lookup_for(nm->method());
 264                 if (mtd != nullptr) {
 265                   mtd->iterate_all_compiles([&](CompileTrainingData* ctd) {
 266                     debug_scc.print("     CTD: "); ctd->print_on(&debug_scc); debug_scc.cr();
 267                   });
 268                 }
 269               }
 270             }
 271           }
 272         }
 273       }
 274     }
 275 
 276     delete _cache; // Free memory
 277     _cache = nullptr;
 278   }
 279 }
 280 
 281 void SCCache::invalidate(SCCEntry* entry) {
 282   // This could be concurent execution
 283   if (entry != nullptr && is_on()) { // Request could come after cache is closed.
 284     _cache->invalidate_entry(entry);
 285   }
 286 }
 287 
 288 bool SCCache::is_loaded(SCCEntry* entry) {
 289   if (is_on() && _cache->cache_buffer() != nullptr) {
 290     return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
 291   }
 292   return false;
 293 }
 294 
 295 void SCCache::preload_code(JavaThread* thread) {
 296   if ((ClassInitBarrierMode == 0) || !is_on_for_read()) {
 297     return;
 298   }
 299   if ((DisableCachedCode & (1 << 3)) != 0) {
 300     return; // no preloaded code (level 5);
 301   }
 302   _cache->preload_startup_code(thread);
 303 }
 304 
 305 SCCEntry* SCCache::find_code_entry(const methodHandle& method, uint comp_level) {
 306   switch (comp_level) {
 307     case CompLevel_simple:
 308       if ((DisableCachedCode & (1 << 0)) != 0) {
 309         return nullptr;
 310       }
 311       break;
 312     case CompLevel_limited_profile:
 313       if ((DisableCachedCode & (1 << 1)) != 0) {
 314         return nullptr;
 315       }
 316       break;
 317     case CompLevel_full_optimization:
 318       if ((DisableCachedCode & (1 << 2)) != 0) {
 319         return nullptr;
 320       }
 321       break;
 322 
 323     default: return nullptr; // Level 1, 2, and 4 only
 324   }
 325   TraceTime t1("SC total find code time", &_t_totalFind, enable_timers(), false);
 326   if (is_on() && _cache->cache_buffer() != nullptr) {
 327     MethodData* md = method->method_data();
 328     uint decomp = (md == nullptr) ? 0 : md->decompile_count();
 329 
 330     ResourceMark rm;
 331     const char* target_name = method->name_and_sig_as_C_string();
 332     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
 333     SCCEntry* entry = _cache->find_entry(SCCEntry::Code, hash, comp_level, decomp);
 334     if (entry == nullptr) {
 335       log_info(scc, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash);
 336 #ifdef ASSERT
 337     } else {
 338       uint name_offset = entry->offset() + entry->name_offset();
 339       uint name_size   = entry->name_size(); // Includes '/0'
 340       const char* name = _cache->cache_buffer() + name_offset;
 341       if (strncmp(target_name, name, name_size) != 0) {
 342         assert(false, "SCA: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
 343       }
 344 #endif
 345     }
 346 
 347     DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
 348     if (directives->IgnorePrecompiledOption) {
 349       LogStreamHandle(Info, scc, compilation) log;
 350       if (log.is_enabled()) {
 351         log.print("Ignore cached code entry on level %d for ", comp_level);
 352         method->print_value_on(&log);
 353       }
 354       return nullptr;
 355     }
 356 
 357     return entry;
 358   }
 359   return nullptr;
 360 }
 361 
 362 void SCCache::add_C_string(const char* str) {
 363   if (is_on_for_write()) {
 364     _cache->add_new_C_string(str);
 365   }
 366 }
 367 
 368 bool SCCache::allow_const_field(ciConstant& value) {
 369   return !is_on() || !StoreCachedCode // Restrict only when we generate cache
 370         // Can not trust primitive too   || !is_reference_type(value.basic_type())
 371         // May disable this too for now  || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
 372         ;
 373 }
 374 
 375 bool SCCache::open_cache(const char* cache_path) {
 376   if (LoadCachedCode) {
 377     log_info(scc)("Trying to load Startup Code Cache '%s'", cache_path);
 378     struct stat st;
 379     if (os::stat(cache_path, &st) != 0) {
 380       log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path);
 381       return false;
 382     } else if ((st.st_mode & S_IFMT) != S_IFREG) {
 383       log_warning(scc, init)("Specified Startup Code Cache is not file '%s'", cache_path);
 384       return false;
 385     }
 386     int fd = os::open(cache_path, O_RDONLY | O_BINARY, 0);
 387     if (fd < 0) {
 388       if (errno == ENOENT) {
 389         log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path);
 390       } else {
 391         log_warning(scc, init)("Failed to open Startup Code Cache file '%s': (%s)", cache_path, os::strerror(errno));
 392       }
 393       return false;
 394     } else {
 395       log_info(scc, init)("Opened for read Startup Code Cache '%s'", cache_path);
 396     }
 397     SCCache* cache = new SCCache(cache_path, fd, (uint)st.st_size);
 398     bool failed = cache->failed();
 399     if (::close(fd) < 0) {
 400       log_warning(scc)("Failed to close for read Startup Code Cache file '%s'", cache_path);
 401       failed = true;
 402     }
 403     if (failed) {
 404       delete cache;
 405       _cache = nullptr;
 406       return false;
 407     }
 408     _cache = cache;
 409   }
 410   if (_cache == nullptr && StoreCachedCode) {
 411     SCCache* cache = new SCCache(cache_path, -1 /* fd */, 0 /* size */);
 412     if (cache->failed()) {
 413       delete cache;
 414       _cache = nullptr;
 415       return false;
 416     }
 417     _cache = cache;
 418   }
 419   return true;
 420 }
 421 
 422 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
 423 public:
 424   int _some_number;
 425   InstanceKlass* _some_klass;
 426   size_t _my_data_length;
 427   void* _my_data;
 428 };
 429 
 430 // Skeleton code for including cached code in CDS:
 431 //
 432 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
 433 //     E.g., you can build a hashtable to record what methods have been archived.
 434 //
 435 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
 436 //     allocated using CDSAccess::allocate_from_code_cache().
 437 //
 438 // [3] CachedCodeDirectory must be the very first allocation.
 439 //
 440 // [4] Two kinds of pointer can be stored:
 441 //     - A pointer p that points to metadata. CDSAccess::can_generate_cached_code(p) must return true.
 442 //     - A pointer to a buffer returned by CDSAccess::allocate_from_code_cache().
 443 //       (It's OK to point to an interior location within this buffer).
 444 //     Such pointers must be stored using CDSAccess::set_pointer()
 445 //
 446 // The buffers allocated by CDSAccess::allocate_from_code_cache() are in a contiguous region. At runtime, this
 447 // region is mapped to the beginning of the CodeCache (see _cds_code_space in codeCache.cpp). All the pointers
 448 // in this buffer are relocated as necessary (e.g., to account for the runtime location of the CodeCache).
 449 //
 450 // Example:
 451 //
 452 // # make sure hw.cds doesn't exist, so that it's regenerated (1.5 step training)
 453 // $ rm -f hw.cds; java -Xlog:cds,scc::uptime,tags,pid -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld
 454 //
 455 // # After training is finish, hw.cds should contain a CachedCodeDirectory. You can see the effect of relocation
 456 // # from the [scc] log.
 457 // $ java -Xlog:cds,scc -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld
 458 // [0.016s][info][scc] new workflow: cached code mapped at 0x7fef97ebc000
 459 // [0.016s][info][scc] _cached_code_directory->_some_klass     = 0x800009ca8 (java.lang.String)
 460 // [0.016s][info][scc] _cached_code_directory->_some_number    = 0
 461 // [0.016s][info][scc] _cached_code_directory->_my_data_length = 0
 462 // [0.016s][info][scc] _cached_code_directory->_my_data        = 0x7fef97ebc020 (32 bytes offset from base)
 463 //
 464 // The 1.5 step training may be hard to debug. If you want to run in a debugger, run the above training step
 465 // with an additional "-XX:+CDSManualFinalImage" command-line argument.
 466 
 467 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
 468 static CachedCodeDirectory* _cached_code_directory = nullptr;
 469 
 470 #if INCLUDE_CDS_JAVA_HEAP
 471 void SCCache::new_workflow_start_writing_cache() {
 472   CachedCodeDirectory* dir = (CachedCodeDirectory*)CDSAccess::allocate_from_code_cache(sizeof(CachedCodeDirectory));
 473   _cached_code_directory = dir;
 474 
 475   CDSAccess::set_pointer(&dir->_some_klass, vmClasses::String_klass());
 476 
 477   size_t n = 120;
 478   void* d = (void*)CDSAccess::allocate_from_code_cache(n);
 479   CDSAccess::set_pointer(&dir->_my_data, d);
 480 }
 481 
 482 void SCCache::new_workflow_end_writing_cache() {
 483   _cached_code_directory->dumptime_init_internal();
 484 }
 485 
 486 void SCCache::new_workflow_load_cache() {
 487   void* ptr = CodeCache::map_cached_code();
 488   if (ptr != nullptr) {
 489     ResourceMark rm;
 490     _cached_code_directory = (CachedCodeDirectory*)ptr;
 491 
 492     // CDS uses this to implement CDSAccess::get_archived_object(k)
 493     _cached_code_directory->runtime_init_internal();
 494 
 495     // At this point:
 496     // - CodeCache::initialize_heaps() has finished.
 497     // - CDS archive is fully mapped ("metadata", "heap" and "cached_code" regions are mapped)
 498     // - All pointers in the mapped CDS regions are relocated.
 499     // - CDSAccess::get_archived_object() works.
 500 
 501     // Data used by AOT compiler
 502     InstanceKlass* k = _cached_code_directory->_some_klass;
 503     log_info(scc)("new workflow: cached code mapped at %p", ptr);
 504     log_info(scc)("_cached_code_directory->_some_klass     = %p (%s)", k, k->external_name());
 505     log_info(scc)("_cached_code_directory->_some_number    = %d", _cached_code_directory->_some_number);
 506     log_info(scc)("_cached_code_directory->_my_data_length = %zu", _cached_code_directory->_my_data_length);
 507     log_info(scc)("_cached_code_directory->_my_data        = %p (%zu bytes offset from base)", _cached_code_directory->_my_data,
 508                   pointer_delta((address)_cached_code_directory->_my_data, (address)_cached_code_directory, 1));
 509   }
 510 }
 511 #endif // INCLUDE_CDS_JAVA_HEAP
 512 
 513 #define DATA_ALIGNMENT HeapWordSize
 514 
 515 SCCache::SCCache(const char* cache_path, int fd, uint load_size) {
 516   _load_header = nullptr;
 517   _cache_path = cache_path;
 518   _for_read  = LoadCachedCode;
 519   _for_write = StoreCachedCode;
 520   _load_size = load_size;
 521   _store_size = 0;
 522   _write_position = 0;
 523   _closing  = false;
 524   _failed = false;
 525   _lookup_failed = false;
 526   _table = nullptr;
 527   _load_entries = nullptr;
 528   _store_entries  = nullptr;
 529   _C_strings_buf  = nullptr;
 530   _load_buffer = nullptr;
 531   _store_buffer = nullptr;
 532   _C_load_buffer = nullptr;
 533   _C_store_buffer = nullptr;
 534   _store_entries_cnt = 0;
 535   _gen_preload_code = false;
 536   _for_preload = false;       // changed while storing entry data
 537   _has_clinit_barriers = false;
 538 
 539   _compile_id = 0;
 540   _comp_level = 0;
 541 
 542   _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false;
 543 
 544   // Read header at the begining of cache
 545   uint header_size = sizeof(SCCHeader);
 546   if (_for_read) {
 547     // Read cache
 548     _C_load_buffer = NEW_C_HEAP_ARRAY(char, load_size + DATA_ALIGNMENT, mtCode);
 549     _load_buffer = align_up(_C_load_buffer, DATA_ALIGNMENT);
 550     uint n = (uint)::read(fd, _load_buffer, load_size);
 551     if (n != load_size) {
 552       log_warning(scc, init)("Failed to read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache file '%s'", load_size, p2i(_load_buffer), _cache_path);
 553       set_failed();
 554       return;
 555     }
 556     log_info(scc, init)("Read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache '%s'", load_size, p2i(_load_buffer), _cache_path);
 557 
 558     _load_header = (SCCHeader*)addr(0);
 559     const char* scc_jvm_version = addr(_load_header->jvm_version_offset());
 560     if (strncmp(scc_jvm_version, VM_Version::internal_vm_info_string(), strlen(scc_jvm_version)) != 0) {
 561       log_warning(scc, init)("Disable Startup Code Cache: JVM version '%s' recorded in '%s' does not match current version '%s'", scc_jvm_version, _cache_path, VM_Version::internal_vm_info_string());
 562       set_failed();
 563       return;
 564     }
 565     if (!_load_header->verify_config(_cache_path, load_size)) {
 566       set_failed();
 567       return;
 568     }
 569     log_info(scc, init)("Read header from Startup Code Cache '%s'", cache_path);
 570     if (_load_header->has_meta_ptrs()) {
 571       assert(UseSharedSpaces, "should be verified already");
 572       _use_meta_ptrs = true; // Regardless UseMetadataPointers
 573       UseMetadataPointers = true;
 574     }
 575     // Read strings
 576     load_strings();
 577   }
 578   if (_for_write) {
 579     _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0);
 580 
 581     _C_store_buffer = NEW_C_HEAP_ARRAY(char, CachedCodeMaxSize + DATA_ALIGNMENT, mtCode);
 582     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 583     // Entries allocated at the end of buffer in reverse (as on stack).
 584     _store_entries = (SCCEntry*)align_up(_C_store_buffer + CachedCodeMaxSize, DATA_ALIGNMENT);
 585     log_info(scc, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %d", p2i(_store_buffer), CachedCodeMaxSize);
 586   }
 587   _table = new SCAddressTable();
 588 }
 589 
 590 void SCCache::init_table() {
 591   SCCache* cache = SCCache::cache();
 592   if (cache != nullptr && cache->_table != nullptr) {
 593     cache->_table->init();
 594   }
 595 }
 596 
 597 void SCCache::init_opto_table() {
 598   SCCache* cache = SCCache::cache();
 599   if (cache != nullptr && cache->_table != nullptr) {
 600     cache->_table->init_opto();
 601   }
 602 }
 603 
 604 void SCCache::init_c1_table() {
 605   SCCache* cache = SCCache::cache();
 606   if (cache != nullptr && cache->_table != nullptr) {
 607     cache->_table->init_c1();
 608   }
 609 }
 610 
 611 void SCConfig::record(bool use_meta_ptrs) {
 612   _flags = 0;
 613   if (use_meta_ptrs) {
 614     _flags |= metadataPointers;
 615   }
 616 #ifdef ASSERT
 617   _flags |= debugVM;
 618 #endif
 619   if (UseCompressedOops) {
 620     _flags |= compressedOops;
 621   }
 622   if (UseCompressedClassPointers) {
 623     _flags |= compressedClassPointers;
 624   }
 625   if (UseTLAB) {
 626     _flags |= useTLAB;
 627   }
 628   if (JavaAssertions::systemClassDefault()) {
 629     _flags |= systemClassAssertions;
 630   }
 631   if (JavaAssertions::userClassDefault()) {
 632     _flags |= userClassAssertions;
 633   }
 634   if (EnableContended) {
 635     _flags |= enableContendedPadding;
 636   }
 637   if (RestrictContended) {
 638     _flags |= restrictContendedPadding;
 639   }
 640   _compressedOopShift    = CompressedOops::shift();
 641   _compressedKlassShift  = CompressedKlassPointers::shift();
 642   _contendedPaddingWidth = ContendedPaddingWidth;
 643   _objectAlignment       = ObjectAlignmentInBytes;
 644   _gc                    = (uint)Universe::heap()->kind();
 645 }
 646 
 647 bool SCConfig::verify(const char* cache_path) const {
 648 #ifdef ASSERT
 649   if ((_flags & debugVM) == 0) {
 650     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by product VM, it can't be used by debug VM", cache_path);
 651     return false;
 652   }
 653 #else
 654   if ((_flags & debugVM) != 0) {
 655     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by debug VM, it can't be used by product VM", cache_path);
 656     return false;
 657   }
 658 #endif
 659 
 660   CollectedHeap::Name scc_gc = (CollectedHeap::Name)_gc;
 661   if (scc_gc != Universe::heap()->kind()) {
 662     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with different GC: %s vs current %s", cache_path, GCConfig::hs_err_name(scc_gc), GCConfig::hs_err_name());
 663     return false;
 664   }
 665 
 666   if (((_flags & compressedOops) != 0) != UseCompressedOops) {
 667     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedOops = %s", cache_path, UseCompressedOops ? "false" : "true");
 668     return false;
 669   }
 670   if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
 671     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedClassPointers = %s", cache_path, UseCompressedClassPointers ? "false" : "true");
 672     return false;
 673   }
 674 
 675   if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
 676     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::systemClassDefault() = %s", cache_path, JavaAssertions::systemClassDefault() ? "disabled" : "enabled");
 677     return false;
 678   }
 679   if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
 680     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::userClassDefault() = %s", cache_path, JavaAssertions::userClassDefault() ? "disabled" : "enabled");
 681     return false;
 682   }
 683 
 684   if (((_flags & enableContendedPadding) != 0) != EnableContended) {
 685     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with EnableContended = %s", cache_path, EnableContended ? "false" : "true");
 686     return false;
 687   }
 688   if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
 689     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with RestrictContended = %s", cache_path, RestrictContended ? "false" : "true");
 690     return false;
 691   }
 692   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 693     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedOops::shift() = %d vs current %d", cache_path, _compressedOopShift, CompressedOops::shift());
 694     return false;
 695   }
 696   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 697     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedKlassPointers::shift() = %d vs current %d", cache_path, _compressedKlassShift, CompressedKlassPointers::shift());
 698     return false;
 699   }
 700   if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
 701     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ContendedPaddingWidth = %d vs current %d", cache_path, _contendedPaddingWidth, ContendedPaddingWidth);
 702     return false;
 703   }
 704   if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
 705     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ObjectAlignmentInBytes = %d vs current %d", cache_path, _objectAlignment, ObjectAlignmentInBytes);
 706     return false;
 707   }
 708   return true;
 709 }
 710 
 711 bool SCCHeader::verify_config(const char* cache_path, uint load_size) const {
 712   if (_version != SCC_VERSION) {
 713     log_warning(scc, init)("Disable Startup Code Cache: different SCC version %d vs %d recorded in '%s'", SCC_VERSION, _version, cache_path);
 714     return false;
 715   }
 716   if (_cache_size != load_size) {
 717     log_warning(scc, init)("Disable Startup Code Cache: different cached code size %d vs %d recorded in '%s'", load_size, _cache_size, cache_path);
 718     return false;
 719   }
 720   if (has_meta_ptrs() && !UseSharedSpaces) {
 721     log_warning(scc, init)("Disable Startup Cached Code: '%s' contains metadata pointers but CDS is off", cache_path);
 722     return false;
 723   }
 724   return true;
 725 }
 726 
 727 volatile int SCCache::_nmethod_readers = 0;
 728 
 729 SCCache::~SCCache() {
 730   if (_closing) {
 731     return; // Already closed
 732   }
 733   // Stop any further access to cache.
 734   // Checked on entry to load_nmethod() and store_nmethod().
 735   _closing = true;
 736   if (_for_read) {
 737     // Wait for all load_nmethod() finish.
 738     wait_for_no_nmethod_readers();
 739   }
 740   // Prevent writing code into cache while we are closing it.
 741   // This lock held by ciEnv::register_method() which calls store_nmethod().
 742   MutexLocker ml(Compile_lock);
 743   if (for_write()) { // Finalize cache
 744     finish_write();
 745   }
 746   FREE_C_HEAP_ARRAY(char, _cache_path);
 747   if (_C_load_buffer != nullptr) {
 748     FREE_C_HEAP_ARRAY(char, _C_load_buffer);
 749     _C_load_buffer = nullptr;
 750     _load_buffer = nullptr;
 751   }
 752   if (_C_store_buffer != nullptr) {
 753     FREE_C_HEAP_ARRAY(char, _C_store_buffer);
 754     _C_store_buffer = nullptr;
 755     _store_buffer = nullptr;
 756   }
 757   if (_table != nullptr) {
 758     delete _table;
 759     _table = nullptr;
 760   }
 761 }
 762 
 763 SCCache* SCCache::open_for_read() {
 764   if (SCCache::is_on_for_read()) {
 765     return SCCache::cache();
 766   }
 767   return nullptr;
 768 }
 769 
 770 SCCache* SCCache::open_for_write() {
 771   if (SCCache::is_on_for_write()) {
 772     SCCache* cache = SCCache::cache();
 773     cache->clear_lookup_failed(); // Reset bit
 774     return cache;
 775   }
 776   return nullptr;
 777 }
 778 
 779 void copy_bytes(const char* from, address to, uint size) {
 780   assert(size > 0, "sanity");
 781   bool by_words = true;
 782   if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
 783     // Use wordwise copies if possible:
 784     Copy::disjoint_words((HeapWord*)from,
 785                          (HeapWord*)to,
 786                          ((size_t)size + HeapWordSize-1) / HeapWordSize);
 787   } else {
 788     by_words = false;
 789     Copy::conjoint_jbytes(from, to, (size_t)size);
 790   }
 791   log_trace(scc)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
 792 }
 793 
 794 void SCCReader::set_read_position(uint pos) {
 795   if (pos == _read_position) {
 796     return;
 797   }
 798   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 799   _read_position = pos;
 800 }
 801 
 802 bool SCCache::set_write_position(uint pos) {
 803   if (pos == _write_position) {
 804     return true;
 805   }
 806   if (_store_size < _write_position) {
 807     _store_size = _write_position; // Adjust during write
 808   }
 809   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 810   _write_position = pos;
 811   return true;
 812 }
 813 
 814 static char align_buffer[256] = { 0 };
 815 
 816 bool SCCache::align_write() {
 817   // We are not executing code from cache - we copy it by bytes first.
 818   // No need for big alignment (or at all).
 819   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 820   if (padding == DATA_ALIGNMENT) {
 821     return true;
 822   }
 823   uint n = write_bytes((const void*)&align_buffer, padding);
 824   if (n != padding) {
 825     return false;
 826   }
 827   log_trace(scc)("Adjust write alignment in Startup Code Cache '%s'", _cache_path);
 828   return true;
 829 }
 830 
 831 uint SCCache::write_bytes(const void* buffer, uint nbytes) {
 832   assert(for_write(), "Code Cache file is not created");
 833   if (nbytes == 0) {
 834     return 0;
 835   }
 836   uint new_position = _write_position + nbytes;
 837   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 838     log_warning(scc)("Failed to write %d bytes at offset %d to Startup Code Cache file '%s'. Increase CachedCodeMaxSize.",
 839                      nbytes, _write_position, _cache_path);
 840     set_failed();
 841     exit_vm_on_store_failure();
 842     return 0;
 843   }
 844   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 845   log_trace(scc)("Wrote %d bytes at offset %d to Startup Code Cache '%s'", nbytes, _write_position, _cache_path);
 846   _write_position += nbytes;
 847   if (_store_size < _write_position) {
 848     _store_size = _write_position;
 849   }
 850   return nbytes;
 851 }
 852 
 853 void SCCEntry::update_method_for_writing() {
 854   if (_method != nullptr) {
 855     _method = CDSAccess::method_in_cached_code(_method);
 856   }
 857 }
 858 
 859 void SCCEntry::print(outputStream* st) const {
 860   st->print_cr(" SCA entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s%s]",
 861                p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile,
 862                (_not_entrant? "not_entrant" : "entrant"),
 863                (_loaded ? ", loaded" : ""),
 864                (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
 865                (_for_preload ? ", for_preload" : ""),
 866                (_ignore_decompile ? ", ignore_decomp" : ""));
 867 }
 868 
 869 void* SCCEntry::operator new(size_t x, SCCache* cache) {
 870   return (void*)(cache->add_entry());
 871 }
 872 
 873 bool skip_preload(methodHandle mh) {
 874   if (!mh->method_holder()->is_loaded()) {
 875     return true;
 876   }
 877   DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
 878   if (directives->DontPreloadOption) {
 879     LogStreamHandle(Info, scc, init) log;
 880     if (log.is_enabled()) {
 881       log.print("Exclude preloading code for ");
 882       mh->print_value_on(&log);
 883     }
 884     return true;
 885   }
 886   return false;
 887 }
 888 
 889 void SCCache::preload_startup_code(TRAPS) {
 890   if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
 891     // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
 892     // level we want (that is CompLevel_full_optimization).
 893     return;
 894   }
 895   assert(_for_read, "sanity");
 896   uint count = _load_header->entries_count();
 897   if (_load_entries == nullptr) {
 898     // Read it
 899     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 900     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 901     log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path);
 902   }
 903   uint preload_entries_count = _load_header->preload_entries_count();
 904   if (preload_entries_count > 0) {
 905     uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
 906     log_info(scc, init)("Load %d preload entries from Startup Code Cache '%s'", preload_entries_count, _cache_path);
 907     uint count = MIN2(preload_entries_count, SCLoadStop);
 908     for (uint i = SCLoadStart; i < count; i++) {
 909       uint index = entries_index[i];
 910       SCCEntry* entry = &(_load_entries[index]);
 911       if (entry->not_entrant()) {
 912         continue;
 913       }
 914       methodHandle mh(THREAD, entry->method());
 915       assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
 916       if (skip_preload(mh)) {
 917         continue; // Exclude preloading for this method
 918       }
 919       assert(mh->method_holder()->is_loaded(), "");
 920       if (!mh->method_holder()->is_linked()) {
 921         assert(!HAS_PENDING_EXCEPTION, "");
 922         mh->method_holder()->link_class(THREAD);
 923         if (HAS_PENDING_EXCEPTION) {
 924           LogStreamHandle(Info, scc) log;
 925           if (log.is_enabled()) {
 926             ResourceMark rm;
 927             log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
 928             THREAD->pending_exception()->print_value_on(&log);
 929             if (log_is_enabled(Debug, scc)) {
 930               THREAD->pending_exception()->print_on(&log);
 931             }
 932           }
 933           CLEAR_PENDING_EXCEPTION;
 934         }
 935       }
 936       if (mh->scc_entry() != nullptr) {
 937         // Second C2 compilation of the same method could happen for
 938         // different reasons without marking first entry as not entrant.
 939         continue; // Keep old entry to avoid issues
 940       }
 941       mh->set_scc_entry(entry);
 942       CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0, false, CompileTask::Reason_Preload, CHECK);
 943     }
 944   }
 945 }
 946 
 947 static bool check_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp, SCCEntry* entry) {
 948   if (entry->kind() == kind) {
 949     assert(entry->id() == id, "sanity");
 950     if (kind != SCCEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
 951                                   (entry->comp_level() == comp_level) &&
 952                                   (entry->ignore_decompile() || entry->decompile() == decomp))) {
 953       return true; // Found
 954     }
 955   }
 956   return false;
 957 }
 958 
 959 SCCEntry* SCCache::find_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp) {
 960   assert(_for_read, "sanity");
 961   uint count = _load_header->entries_count();
 962   if (_load_entries == nullptr) {
 963     // Read it
 964     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 965     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 966     log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path);
 967   }
 968   // Binary search
 969   int l = 0;
 970   int h = count - 1;
 971   while (l <= h) {
 972     int mid = (l + h) >> 1;
 973     int ix = mid * 2;
 974     uint is = _search_entries[ix];
 975     if (is == id) {
 976       int index = _search_entries[ix + 1];
 977       SCCEntry* entry = &(_load_entries[index]);
 978       if (check_entry(kind, id, comp_level, decomp, entry)) {
 979         return entry; // Found
 980       }
 981       // Leaner search around (could be the same nmethod with different decompile count)
 982       for (int i = mid - 1; i >= l; i--) { // search back
 983         ix = i * 2;
 984         is = _search_entries[ix];
 985         if (is != id) {
 986           break;
 987         }
 988         index = _search_entries[ix + 1];
 989         SCCEntry* entry = &(_load_entries[index]);
 990         if (check_entry(kind, id, comp_level, decomp, entry)) {
 991           return entry; // Found
 992         }
 993       }
 994       for (int i = mid + 1; i <= h; i++) { // search forward
 995         ix = i * 2;
 996         is = _search_entries[ix];
 997         if (is != id) {
 998           break;
 999         }
1000         index = _search_entries[ix + 1];
1001         SCCEntry* entry = &(_load_entries[index]);
1002         if (check_entry(kind, id, comp_level, decomp, entry)) {
1003           return entry; // Found
1004         }
1005       }
1006       break; // Not found match (different decompile count or not_entrant state).
1007     } else if (is < id) {
1008       l = mid + 1;
1009     } else {
1010       h = mid - 1;
1011     }
1012   }
1013   return nullptr;
1014 }
1015 
1016 void SCCache::invalidate_entry(SCCEntry* entry) {
1017   assert(entry!= nullptr, "all entries should be read already");
1018   if (entry->not_entrant()) {
1019     return; // Someone invalidated it already
1020   }
1021 #ifdef ASSERT
1022   bool found = false;
1023   if (_for_read) {
1024     uint count = _load_header->entries_count();
1025     uint i = 0;
1026     for(; i < count; i++) {
1027       if (entry == &(_load_entries[i])) {
1028         break;
1029       }
1030     }
1031     found = (i < count);
1032   }
1033   if (!found && _for_write) {
1034     uint count = _store_entries_cnt;
1035     uint i = 0;
1036     for(; i < count; i++) {
1037       if (entry == &(_store_entries[i])) {
1038         break;
1039       }
1040     }
1041     found = (i < count);
1042   }
1043   assert(found, "entry should exist");
1044 #endif
1045   entry->set_not_entrant();
1046   {
1047     uint name_offset = entry->offset() + entry->name_offset();
1048     const char* name;
1049     if (SCCache::is_loaded(entry)) {
1050       name = _load_buffer + name_offset;
1051     } else {
1052       name = _store_buffer + name_offset;
1053     }
1054     uint level   = entry->comp_level();
1055     uint comp_id = entry->comp_id();
1056     uint decomp  = entry->decompile();
1057     bool clinit_brs = entry->has_clinit_barriers();
1058     log_info(scc, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)",
1059                            name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1060   }
1061   if (entry->next() != nullptr) {
1062     entry = entry->next();
1063     assert(entry->has_clinit_barriers(), "expecting only such entries here");
1064     invalidate_entry(entry);
1065   }
1066 }
1067 
1068 extern "C" {
1069   static int uint_cmp(const void *i, const void *j) {
1070     uint a = *(uint *)i;
1071     uint b = *(uint *)j;
1072     return a > b ? 1 : a < b ? -1 : 0;
1073   }
1074 }
1075 
1076 bool SCCache::finish_write() {
1077   if (!align_write()) {
1078     return false;
1079   }
1080   uint strings_offset = _write_position;
1081   int strings_count = store_strings();
1082   if (strings_count < 0) {
1083     return false;
1084   }
1085   if (!align_write()) {
1086     return false;
1087   }
1088   uint strings_size = _write_position - strings_offset;
1089 
1090   uint entries_count = 0; // Number of entrant (useful) code entries
1091   uint entries_offset = _write_position;
1092 
1093   uint store_count = _store_entries_cnt;
1094   if (store_count > 0) {
1095     uint header_size = (uint)align_up(sizeof(SCCHeader),  DATA_ALIGNMENT);
1096     const char* vm_version = VM_Version::internal_vm_info_string();
1097     uint vm_version_size = (uint)align_up(strlen(vm_version) + 1, DATA_ALIGNMENT);
1098     uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1099     uint code_count = store_count + load_count;
1100     uint search_count = code_count * 2;
1101     uint search_size = search_count * sizeof(uint);
1102     uint entries_size = (uint)align_up(code_count * sizeof(SCCEntry), DATA_ALIGNMENT); // In bytes
1103     uint preload_entries_cnt = 0;
1104     uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1105     uint preload_entries_size = code_count * sizeof(uint);
1106     // _write_position should include code and strings
1107     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1108     uint total_size = _write_position + _load_size + header_size + vm_version_size +
1109                      code_alignment + search_size + preload_entries_size + entries_size;
1110 
1111     // Create ordered search table for entries [id, index];
1112     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1113     char* buffer = NEW_C_HEAP_ARRAY(char, total_size + DATA_ALIGNMENT, mtCode);
1114     char* start = align_up(buffer, DATA_ALIGNMENT);
1115     char* current = start + header_size; // Skip header
1116     uint jvm_version_offset = current - start;
1117     copy_bytes(vm_version, (address)current, (uint)strlen(vm_version) + 1);
1118     current += vm_version_size;
1119 
1120     SCCEntry* entries_address = _store_entries; // Pointer to latest entry
1121     uint not_entrant_nb = 0;
1122     uint max_size = 0;
1123     // Add old entries first
1124     if (_for_read && (_load_header != nullptr)) {
1125       for(uint i = 0; i < load_count; i++) {
1126         if (_load_entries[i].load_fail()) {
1127           continue;
1128         }
1129         if (_load_entries[i].not_entrant()) {
1130           log_info(scc, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id());
1131           not_entrant_nb++;
1132           if (_load_entries[i].for_preload()) {
1133             // Skip not entrant preload code:
1134             // we can't pre-load code which may have failing dependencies.
1135             continue;
1136           }
1137           _load_entries[i].set_entrant(); // Reset
1138         } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) {
1139           // record entrant first version code for pre-loading
1140           preload_entries[preload_entries_cnt++] = entries_count;
1141         }
1142         {
1143           uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT);
1144           if (size > max_size) {
1145             max_size = size;
1146           }
1147           copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size);
1148           _load_entries[i].set_offset(current - start); // New offset
1149           current += size;
1150           uint n = write_bytes(&(_load_entries[i]), sizeof(SCCEntry));
1151           if (n != sizeof(SCCEntry)) {
1152             FREE_C_HEAP_ARRAY(char, buffer);
1153             FREE_C_HEAP_ARRAY(uint, search);
1154             return false;
1155           }
1156           search[entries_count*2 + 0] = _load_entries[i].id();
1157           search[entries_count*2 + 1] = entries_count;
1158           entries_count++;
1159         }
1160       }
1161     }
1162     // SCCEntry entries were allocated in reverse in store buffer.
1163     // Process them in reverse order to cache first code first.
1164     for (int i = store_count - 1; i >= 0; i--) {
1165       if (entries_address[i].load_fail()) {
1166         continue;
1167       }
1168       if (entries_address[i].not_entrant()) {
1169         log_info(scc, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : ""));
1170         not_entrant_nb++;
1171         if (entries_address[i].for_preload()) {
1172           // Skip not entrant preload code:
1173           // we can't pre-load code which may have failing dependencies.
1174           continue;
1175         }
1176         entries_address[i].set_entrant(); // Reset
1177       } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) {
1178         // record entrant first version code for pre-loading
1179         preload_entries[preload_entries_cnt++] = entries_count;
1180       }
1181       {
1182         entries_address[i].set_next(nullptr); // clear pointers before storing data
1183         uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1184         if (size > max_size) {
1185           max_size = size;
1186         }
1187         copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1188         entries_address[i].set_offset(current - start); // New offset
1189         entries_address[i].update_method_for_writing();
1190         current += size;
1191         uint n = write_bytes(&(entries_address[i]), sizeof(SCCEntry));
1192         if (n != sizeof(SCCEntry)) {
1193           FREE_C_HEAP_ARRAY(char, buffer);
1194           FREE_C_HEAP_ARRAY(uint, search);
1195           return false;
1196         }
1197         search[entries_count*2 + 0] = entries_address[i].id();
1198         search[entries_count*2 + 1] = entries_count;
1199         entries_count++;
1200       }
1201     }
1202     if (entries_count == 0) {
1203       log_info(scc, exit)("No new entires, cache files %s was not %s", _cache_path, (_for_read ? "updated" : "created"));
1204       FREE_C_HEAP_ARRAY(char, buffer);
1205       FREE_C_HEAP_ARRAY(uint, search);
1206       return true; // Nothing to write
1207     }
1208     assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1209     // Write strings
1210     if (strings_count > 0) {
1211       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1212       strings_offset = (current - start); // New offset
1213       current += strings_size;
1214     }
1215     uint preload_entries_offset = (current - start);
1216     preload_entries_size = preload_entries_cnt * sizeof(uint);
1217     if (preload_entries_size > 0) {
1218       copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1219       current += preload_entries_size;
1220       log_info(scc, exit)("Wrote %d preload entries to Startup Code Cache '%s'", preload_entries_cnt, _cache_path);
1221     }
1222     if (preload_entries != nullptr) {
1223       FREE_C_HEAP_ARRAY(uint, preload_entries);
1224     }
1225 
1226     uint new_entries_offset = (current - start); // New offset
1227     // Sort and store search table
1228     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1229     search_size = 2 * entries_count * sizeof(uint);
1230     copy_bytes((const char*)search, (address)current, search_size);
1231     FREE_C_HEAP_ARRAY(uint, search);
1232     current += search_size;
1233 
1234     // Write entries
1235     entries_size = entries_count * sizeof(SCCEntry); // New size
1236     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1237     current += entries_size;
1238     log_info(scc, exit)("Wrote %d SCCEntry entries (%d were not entrant, %d max size) to Startup Code Cache '%s'", entries_count, not_entrant_nb, max_size, _cache_path);
1239 
1240     uint size = (current - start);
1241     assert(size <= total_size, "%d > %d", size , total_size);
1242 
1243     // Finalize header
1244     SCCHeader* header = (SCCHeader*)start;
1245     header->init(jvm_version_offset, size,
1246                  (uint)strings_count, strings_offset,
1247                  entries_count, new_entries_offset,
1248                  preload_entries_cnt, preload_entries_offset,
1249                  _use_meta_ptrs);
1250     log_info(scc, init)("Wrote header to Startup Code Cache '%s'", _cache_path);
1251 
1252     // Now store to file
1253 #ifdef _WINDOWS  // On Windows, need WRITE permission to remove the file.
1254     chmod(_cache_path, _S_IREAD | _S_IWRITE);
1255 #endif
1256     // Use remove() to delete the existing file because, on Unix, this will
1257     // allow processes that have it open continued access to the file.
1258     remove(_cache_path);
1259     int fd = os::open(_cache_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
1260     if (fd < 0) {
1261       log_warning(scc, exit)("Unable to create Startup Code Cache file '%s': (%s)", _cache_path, os::strerror(errno));
1262       FREE_C_HEAP_ARRAY(char, buffer);
1263       exit_vm_on_store_failure();
1264       return false;
1265     } else {
1266       log_info(scc, exit)("Opened for write Startup Code Cache '%s'", _cache_path);
1267     }
1268     bool success = os::write(fd, start, (size_t)size);
1269     if (!success) {
1270       log_warning(scc, exit)("Failed to write %d bytes to Startup Code Cache file '%s': (%s)", size, _cache_path, os::strerror(errno));
1271       FREE_C_HEAP_ARRAY(char, buffer);
1272       exit_vm_on_store_failure();
1273       return false;
1274     }
1275     log_info(scc, exit)("Wrote %d bytes to Startup Code Cache '%s'", size, _cache_path);
1276     if (::close(fd) < 0) {
1277       log_warning(scc, exit)("Failed to close for write Startup Code Cache file '%s'", _cache_path);
1278       exit_vm_on_store_failure();
1279     } else {
1280       log_info(scc, exit)("Closed for write Startup Code Cache '%s'", _cache_path);
1281     }
1282     FREE_C_HEAP_ARRAY(char, buffer);
1283   }
1284   return true;
1285 }
1286 
1287 bool SCCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1288   assert(start == cgen->assembler()->pc(), "wrong buffer");
1289   SCCache* cache = open_for_read();
1290   if (cache == nullptr) {
1291     return false;
1292   }
1293   SCCEntry* entry = cache->find_entry(SCCEntry::Stub, (uint)id);
1294   if (entry == nullptr) {
1295     return false;
1296   }
1297   uint entry_position = entry->offset();
1298   // Read name
1299   uint name_offset = entry->name_offset() + entry_position;
1300   uint name_size   = entry->name_size(); // Includes '/0'
1301   const char* saved_name = cache->addr(name_offset);
1302   if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1303     log_warning(scc)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1304     cache->set_failed();
1305     exit_vm_on_load_failure();
1306     return false;
1307   }
1308   log_info(scc,stubs)("Reading stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1309   // Read code
1310   uint code_offset = entry->code_offset() + entry_position;
1311   uint code_size   = entry->code_size();
1312   copy_bytes(cache->addr(code_offset), start, code_size);
1313   cgen->assembler()->code_section()->set_end(start + code_size);
1314   log_info(scc,stubs)("Read stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1315   return true;
1316 }
1317 
1318 bool SCCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1319   SCCache* cache = open_for_write();
1320   if (cache == nullptr) {
1321     return false;
1322   }
1323   log_info(scc, stubs)("Writing stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1324   if (!cache->align_write()) {
1325     return false;
1326   }
1327 #ifdef ASSERT
1328   CodeSection* cs = cgen->assembler()->code_section();
1329   if (cs->has_locs()) {
1330     uint reloc_count = cs->locs_count();
1331     tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1332     // Collect additional data
1333     RelocIterator iter(cs);
1334     while (iter.next()) {
1335       switch (iter.type()) {
1336         case relocInfo::none:
1337           break;
1338         default: {
1339           iter.print_current_on(tty);
1340           fatal("stub's relocation %d unimplemented", (int)iter.type());
1341           break;
1342         }
1343       }
1344     }
1345   }
1346 #endif
1347   uint entry_position = cache->_write_position;
1348 
1349   // Write code
1350   uint code_offset = 0;
1351   uint code_size = cgen->assembler()->pc() - start;
1352   uint n = cache->write_bytes(start, code_size);
1353   if (n != code_size) {
1354     return false;
1355   }
1356   // Write name
1357   uint name_offset = cache->_write_position - entry_position;
1358   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1359   n = cache->write_bytes(name, name_size);
1360   if (n != name_size) {
1361     return false;
1362   }
1363   uint entry_size = cache->_write_position - entry_position;
1364   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
1365                                           code_offset, code_size, 0, 0,
1366                                           SCCEntry::Stub, (uint32_t)id);
1367   log_info(scc, stubs)("Wrote stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1368   return true;
1369 }
1370 
1371 Klass* SCCReader::read_klass(const methodHandle& comp_method, bool shared) {
1372   uint code_offset = read_position();
1373   uint state = *(uint*)addr(code_offset);
1374   uint init_state = (state  & 1);
1375   uint array_dim  = (state >> 1);
1376   code_offset += sizeof(int);
1377   if (_cache->use_meta_ptrs() && shared) {
1378     uint klass_offset = *(uint*)addr(code_offset);
1379     code_offset += sizeof(uint);
1380     set_read_position(code_offset);
1381     Klass* k = (Klass*)((address)SharedBaseAddress + klass_offset);
1382     if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1383       // Something changed in CDS
1384       set_lookup_failed();
1385       log_info(scc)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
1386       return nullptr;
1387     }
1388     assert(k->is_klass(), "sanity");
1389     ResourceMark rm;
1390     const char* comp_name = comp_method->name_and_sig_as_C_string();
1391     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
1392       set_lookup_failed();
1393       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
1394                        compile_id(), comp_name, comp_level(), k->external_name());
1395       return nullptr;
1396     } else
1397     // Allow not initialized klass which was uninitialized during code caching or for preload
1398     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
1399       set_lookup_failed();
1400       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
1401                        compile_id(), comp_name, comp_level(), k->external_name());
1402       return nullptr;
1403     }
1404     if (array_dim > 0) {
1405       assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
1406       Klass* ak = k->array_klass_or_null(array_dim);
1407       // FIXME: what would it take to create an array class on the fly?
1408 //      Klass* ak = k->array_klass(dim, JavaThread::current());
1409 //      guarantee(JavaThread::current()->pending_exception() == nullptr, "");
1410       if (ak == nullptr) {
1411         set_lookup_failed();
1412         log_info(scc)("%d (L%d): %d-dimension array klass lookup failed: %s",
1413                          compile_id(), comp_level(), array_dim, k->external_name());
1414       }
1415       log_info(scc)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
1416       return ak;
1417     } else {
1418       log_info(scc)("%d (L%d): Shared klass lookup: %s",
1419                     compile_id(), comp_level(), k->external_name());
1420       return k;
1421     }
1422   }
1423   int name_length = *(int*)addr(code_offset);
1424   code_offset += sizeof(int);
1425   const char* dest = addr(code_offset);
1426   code_offset += name_length + 1;
1427   set_read_position(code_offset);
1428   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length);
1429   if (klass_sym == nullptr) {
1430     set_lookup_failed();
1431     log_info(scc)("%d (L%d): Probe failed for class %s",
1432                      compile_id(), comp_level(), &(dest[0]));
1433     return nullptr;
1434   }
1435   // Use class loader of compiled method.
1436   Thread* thread = Thread::current();
1437   Handle loader(thread, comp_method->method_holder()->class_loader());
1438   Handle protection_domain(thread, comp_method->method_holder()->protection_domain());
1439   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader, protection_domain);
1440   assert(!thread->has_pending_exception(), "should not throw");
1441   if (k == nullptr && !loader.is_null()) {
1442     // Try default loader and domain
1443     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle(), Handle());
1444     assert(!thread->has_pending_exception(), "should not throw");
1445   }
1446   if (k != nullptr) {
1447     // Allow not initialized klass which was uninitialized during code caching
1448     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) {
1449       set_lookup_failed();
1450       log_info(scc)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0]));
1451       return nullptr;
1452     }
1453     log_info(scc)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name());
1454   } else {
1455     set_lookup_failed();
1456     log_info(scc)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0]));
1457     return nullptr;
1458   }
1459   return k;
1460 }
1461 
1462 Method* SCCReader::read_method(const methodHandle& comp_method, bool shared) {
1463   uint code_offset = read_position();
1464   if (_cache->use_meta_ptrs() && shared) {
1465     uint method_offset = *(uint*)addr(code_offset);
1466     code_offset += sizeof(uint);
1467     set_read_position(code_offset);
1468     Method* m = (Method*)((address)SharedBaseAddress + method_offset);
1469     if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
1470       // Something changed in CDS
1471       set_lookup_failed();
1472       log_info(scc)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
1473       return nullptr;
1474     }
1475     assert(m->is_method(), "sanity");
1476     ResourceMark rm;
1477     const char* comp_name = comp_method->name_and_sig_as_C_string();
1478     Klass* k = m->method_holder();
1479     if (!k->is_instance_klass()) {
1480       set_lookup_failed();
1481       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass", compile_id(), comp_name, comp_level(), k->external_name());
1482       return nullptr;
1483     } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1484       set_lookup_failed();
1485       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS", compile_id(), comp_name, comp_level(), k->external_name());
1486       return nullptr;
1487     } else if (!InstanceKlass::cast(k)->is_loaded()) {
1488       set_lookup_failed();
1489       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not loaded", compile_id(), comp_name, comp_level(), k->external_name());
1490       return nullptr;
1491     } else if (!InstanceKlass::cast(k)->is_linked()) {
1492       set_lookup_failed();
1493       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s", compile_id(), comp_name, comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
1494       return nullptr;
1495     }
1496     log_info(scc)("%d (L%d): Shared method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1497     return m;
1498   }
1499   int holder_length = *(int*)addr(code_offset);
1500   code_offset += sizeof(int);
1501   int name_length = *(int*)addr(code_offset);
1502   code_offset += sizeof(int);
1503   int signat_length = *(int*)addr(code_offset);
1504   code_offset += sizeof(int);
1505 
1506   const char* dest = addr(code_offset);
1507   code_offset += holder_length + 1 + name_length + 1 + signat_length + 1;
1508   set_read_position(code_offset);
1509   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length);
1510   if (klass_sym == nullptr) {
1511     set_lookup_failed();
1512     log_info(scc)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0]));
1513     return nullptr;
1514   }
1515   // Use class loader of compiled method.
1516   Thread* thread = Thread::current();
1517   Handle loader(thread, comp_method->method_holder()->class_loader());
1518   Handle protection_domain(thread, comp_method->method_holder()->protection_domain());
1519   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader, protection_domain);
1520   assert(!thread->has_pending_exception(), "should not throw");
1521   if (k == nullptr && !loader.is_null()) {
1522     // Try default loader and domain
1523     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle(), Handle());
1524     assert(!thread->has_pending_exception(), "should not throw");
1525   }
1526   if (k != nullptr) {
1527     if (!k->is_instance_klass()) {
1528       set_lookup_failed();
1529       log_info(scc)("%d (L%d): Lookup failed for holder %s: not instance klass",
1530                        compile_id(), comp_level(), &(dest[0]));
1531       return nullptr;
1532     } else if (!InstanceKlass::cast(k)->is_linked()) {
1533       set_lookup_failed();
1534       log_info(scc)("%d (L%d): Lookup failed for holder %s: not linked",
1535                        compile_id(), comp_level(), &(dest[0]));
1536       return nullptr;
1537     }
1538     log_info(scc)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name());
1539   } else {
1540     set_lookup_failed();
1541     log_info(scc)("%d (L%d): Lookup failed for holder %s",
1542                   compile_id(), comp_level(), &(dest[0]));
1543     return nullptr;
1544   }
1545   TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length);
1546   int pos = holder_length + 1 + name_length + 1;
1547   TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length);
1548   if (name_sym == nullptr) {
1549     set_lookup_failed();
1550     log_info(scc)("%d (L%d): Probe failed for method name %s",
1551                      compile_id(), comp_level(), &(dest[holder_length + 1]));
1552     return nullptr;
1553   }
1554   if (sign_sym == nullptr) {
1555     set_lookup_failed();
1556     log_info(scc)("%d (L%d): Probe failed for method signature %s",
1557                      compile_id(), comp_level(), &(dest[pos]));
1558     return nullptr;
1559   }
1560   Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym);
1561   if (m != nullptr) {
1562     ResourceMark rm;
1563     log_info(scc)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1564   } else {
1565     set_lookup_failed();
1566     log_info(scc)("%d (L%d): Lookup failed for method %s::%s%s",
1567                      compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos]));
1568     return nullptr;
1569   }
1570   return m;
1571 }
1572 
1573 bool SCCache::write_klass(Klass* klass) {
1574   if (klass->is_hidden()) { // Skip such nmethod
1575     set_lookup_failed();
1576     return false;
1577   }
1578   bool can_use_meta_ptrs = _use_meta_ptrs;
1579   uint array_dim = 0;
1580   if (klass->is_objArray_klass()) {
1581     array_dim = ObjArrayKlass::cast(klass)->dimension();
1582     klass     = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
1583   }
1584   uint init_state = 0;
1585   if (klass->is_instance_klass()) {
1586     InstanceKlass* ik = InstanceKlass::cast(klass);
1587     ClassLoaderData* cld = ik->class_loader_data();
1588     if (!cld->is_builtin_class_loader_data()) {
1589       set_lookup_failed();
1590       return false;
1591     }
1592     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1593       _for_preload = false;
1594       // Bailout if code has clinit barriers:
1595       // method will be recompiled without them in any case
1596       if (_has_clinit_barriers) {
1597         set_lookup_failed();
1598         return false;
1599       }
1600       can_use_meta_ptrs = false;
1601     }
1602     init_state = (ik->is_initialized() ? 1 : 0);
1603   }
1604   ResourceMark rm;
1605   uint state = (array_dim << 1) | (init_state & 1);
1606   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(klass)) {
1607     DataKind kind = DataKind::Klass_Shared;
1608     uint n = write_bytes(&kind, sizeof(int));
1609     if (n != sizeof(int)) {
1610       return false;
1611     }
1612     // Record state of instance klass initialization.
1613     n = write_bytes(&state, sizeof(int));
1614     if (n != sizeof(int)) {
1615       return false;
1616     }
1617     uint klass_offset = CDSAccess::delta_from_shared_address_base((address)klass);
1618     n = write_bytes(&klass_offset, sizeof(uint));
1619     if (n != sizeof(uint)) {
1620       return false;
1621     }
1622     log_info(scc)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(),
1623                   (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1624                   (array_dim > 0 ? " (object array)" : ""),
1625                   klass_offset);
1626     return true;
1627   }
1628   // Bailout if code has clinit barriers:
1629   // method will be recompiled without them in any case
1630   if (_for_preload && _has_clinit_barriers) {
1631     set_lookup_failed();
1632     return false;
1633   }
1634   _for_preload = false;
1635   log_info(scc,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name());
1636   DataKind kind = DataKind::Klass;
1637   uint n = write_bytes(&kind, sizeof(int));
1638   if (n != sizeof(int)) {
1639     return false;
1640   }
1641   // Record state of instance klass initialization.
1642   n = write_bytes(&state, sizeof(int));
1643   if (n != sizeof(int)) {
1644     return false;
1645   }
1646   Symbol* name = klass->name();
1647   int name_length = name->utf8_length();
1648   int total_length = name_length + 1;
1649   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1650   name->as_C_string(dest, total_length);
1651   dest[total_length - 1] = '\0';
1652   LogTarget(Info, scc, loader) log;
1653   if (log.is_enabled()) {
1654     LogStream ls(log);
1655     oop loader = klass->class_loader();
1656     oop domain = klass->protection_domain();
1657     ls.print("Class %s loader: ", dest);
1658     if (loader == nullptr) {
1659       ls.print("nullptr");
1660     } else {
1661       loader->print_value_on(&ls);
1662     }
1663     ls.print(" domain: ");
1664     if (domain == nullptr) {
1665       ls.print("nullptr");
1666     } else {
1667       domain->print_value_on(&ls);
1668     }
1669     ls.cr();
1670   }
1671   n = write_bytes(&name_length, sizeof(int));
1672   if (n != sizeof(int)) {
1673     return false;
1674   }
1675   n = write_bytes(dest, total_length);
1676   if (n != (uint)total_length) {
1677     return false;
1678   }
1679   log_info(scc)("%d (L%d): Wrote klass: %s%s%s",
1680                 compile_id(), comp_level(),
1681                 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1682                 (array_dim > 0 ? " (object array)" : ""));
1683   return true;
1684 }
1685 
1686 bool SCCache::write_method(Method* method) {
1687   bool can_use_meta_ptrs = _use_meta_ptrs;
1688   Klass* klass = method->method_holder();
1689   if (klass->is_instance_klass()) {
1690     InstanceKlass* ik = InstanceKlass::cast(klass);
1691     ClassLoaderData* cld = ik->class_loader_data();
1692     if (!cld->is_builtin_class_loader_data()) {
1693       set_lookup_failed();
1694       return false;
1695     }
1696     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1697       _for_preload = false;
1698       // Bailout if code has clinit barriers:
1699       // method will be recompiled without them in any case
1700       if (_has_clinit_barriers) {
1701         set_lookup_failed();
1702         return false;
1703       }
1704       can_use_meta_ptrs = false;
1705     }
1706   }
1707   ResourceMark rm;
1708   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(method)) {
1709     DataKind kind = DataKind::Method_Shared;
1710     uint n = write_bytes(&kind, sizeof(int));
1711     if (n != sizeof(int)) {
1712       return false;
1713     }
1714     uint method_offset = CDSAccess::delta_from_shared_address_base((address)method);
1715     n = write_bytes(&method_offset, sizeof(uint));
1716     if (n != sizeof(uint)) {
1717       return false;
1718     }
1719     log_info(scc)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
1720     return true;
1721   }
1722   // Bailout if code has clinit barriers:
1723   // method will be recompiled without them in any case
1724   if (_for_preload && _has_clinit_barriers) {
1725     set_lookup_failed();
1726     return false;
1727   }
1728   _for_preload = false;
1729   log_info(scc,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string());
1730   if (method->is_hidden()) { // Skip such nmethod
1731     set_lookup_failed();
1732     return false;
1733   }
1734   DataKind kind = DataKind::Method;
1735   uint n = write_bytes(&kind, sizeof(int));
1736   if (n != sizeof(int)) {
1737     return false;
1738   }
1739   Symbol* name   = method->name();
1740   Symbol* holder = method->klass_name();
1741   Symbol* signat = method->signature();
1742   int name_length   = name->utf8_length();
1743   int holder_length = holder->utf8_length();
1744   int signat_length = signat->utf8_length();
1745 
1746   // Write sizes and strings
1747   int total_length = holder_length + 1 + name_length + 1 + signat_length + 1;
1748   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1749   holder->as_C_string(dest, total_length);
1750   dest[holder_length] = '\0';
1751   int pos = holder_length + 1;
1752   name->as_C_string(&(dest[pos]), (total_length - pos));
1753   pos += name_length;
1754   dest[pos++] = '\0';
1755   signat->as_C_string(&(dest[pos]), (total_length - pos));
1756   dest[total_length - 1] = '\0';
1757 
1758   LogTarget(Info, scc, loader) log;
1759   if (log.is_enabled()) {
1760     LogStream ls(log);
1761     oop loader = klass->class_loader();
1762     oop domain = klass->protection_domain();
1763     ls.print("Holder %s loader: ", dest);
1764     if (loader == nullptr) {
1765       ls.print("nullptr");
1766     } else {
1767       loader->print_value_on(&ls);
1768     }
1769     ls.print(" domain: ");
1770     if (domain == nullptr) {
1771       ls.print("nullptr");
1772     } else {
1773       domain->print_value_on(&ls);
1774     }
1775     ls.cr();
1776   }
1777 
1778   n = write_bytes(&holder_length, sizeof(int));
1779   if (n != sizeof(int)) {
1780     return false;
1781   }
1782   n = write_bytes(&name_length, sizeof(int));
1783   if (n != sizeof(int)) {
1784     return false;
1785   }
1786   n = write_bytes(&signat_length, sizeof(int));
1787   if (n != sizeof(int)) {
1788     return false;
1789   }
1790   n = write_bytes(dest, total_length);
1791   if (n != (uint)total_length) {
1792     return false;
1793   }
1794   dest[holder_length] = ' ';
1795   dest[holder_length + 1 + name_length] = ' ';
1796   log_info(scc)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest);
1797   return true;
1798 }
1799 
1800 // Repair the pc relative information in the code after load
1801 bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer,
1802                                  OopRecorder* oop_recorder, ciMethod* target) {
1803   bool success = true;
1804   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1805     uint code_offset = read_position();
1806     int reloc_count = *(int*)addr(code_offset);
1807     code_offset += sizeof(int);
1808     if (reloc_count == 0) {
1809       set_read_position(code_offset);
1810       continue;
1811     }
1812     // Read _locs_point (as offset from start)
1813     int locs_point_off = *(int*)addr(code_offset);
1814     code_offset += sizeof(int);
1815     uint reloc_size = reloc_count * sizeof(relocInfo);
1816     CodeSection* cs  = buffer->code_section(i);
1817     if (cs->locs_capacity() < reloc_count) {
1818       cs->expand_locs(reloc_count);
1819     }
1820     relocInfo* reloc_start = cs->locs_start();
1821     copy_bytes(addr(code_offset), (address)reloc_start, reloc_size);
1822     code_offset += reloc_size;
1823     cs->set_locs_end(reloc_start + reloc_count);
1824     cs->set_locs_point(cs->start() + locs_point_off);
1825 
1826     // Read additional relocation data: uint per relocation
1827     uint  data_size  = reloc_count * sizeof(uint);
1828     uint* reloc_data = (uint*)addr(code_offset);
1829     code_offset += data_size;
1830     set_read_position(code_offset);
1831     LogStreamHandle(Info, scc, reloc) log;
1832     if (log.is_enabled()) {
1833       log.print_cr("======== read code section %d relocations [%d]:", i, reloc_count);
1834     }
1835     RelocIterator iter(cs);
1836     int j = 0;
1837     while (iter.next()) {
1838       switch (iter.type()) {
1839         case relocInfo::none:
1840           break;
1841         case relocInfo::oop_type: {
1842           VM_ENTRY_MARK;
1843           oop_Relocation* r = (oop_Relocation*)iter.reloc();
1844           if (r->oop_is_immediate()) {
1845             assert(reloc_data[j] == (uint)j, "should be");
1846             methodHandle comp_method(THREAD, target->get_Method());
1847             jobject jo = read_oop(THREAD, comp_method);
1848             if (lookup_failed()) {
1849               success = false;
1850               break;
1851             }
1852             r->set_value((address)jo);
1853           } else if (false) {
1854             // Get already updated value from OopRecorder.
1855             assert(oop_recorder != nullptr, "sanity");
1856             int index = r->oop_index();
1857             jobject jo = oop_recorder->oop_at(index);
1858             oop obj = JNIHandles::resolve(jo);
1859             r->set_value(*reinterpret_cast<address*>(&obj));
1860           }
1861           break;
1862         }
1863         case relocInfo::metadata_type: {
1864           VM_ENTRY_MARK;
1865           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
1866           Metadata* m;
1867           if (r->metadata_is_immediate()) {
1868             assert(reloc_data[j] == (uint)j, "should be");
1869             methodHandle comp_method(THREAD, target->get_Method());
1870             m = read_metadata(comp_method);
1871             if (lookup_failed()) {
1872               success = false;
1873               break;
1874             }
1875           } else {
1876             // Get already updated value from OopRecorder.
1877             assert(oop_recorder != nullptr, "sanity");
1878             int index = r->metadata_index();
1879             m = oop_recorder->metadata_at(index);
1880           }
1881           r->set_value((address)m);
1882           break;
1883         }
1884         case relocInfo::virtual_call_type:   // Fall through. They all call resolve_*_call blobs.
1885         case relocInfo::opt_virtual_call_type:
1886         case relocInfo::static_call_type: {
1887           address dest = _cache->address_for_id(reloc_data[j]);
1888           if (dest != (address)-1) {
1889             ((CallRelocation*)iter.reloc())->set_destination(dest);
1890           }
1891           break;
1892         }
1893         case relocInfo::trampoline_stub_type: {
1894           address dest = _cache->address_for_id(reloc_data[j]);
1895           if (dest != (address)-1) {
1896             ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
1897           }
1898           break;
1899         }
1900         case relocInfo::static_stub_type:
1901           break;
1902         case relocInfo::runtime_call_type: {
1903           address dest = _cache->address_for_id(reloc_data[j]);
1904           if (dest != (address)-1) {
1905             ((CallRelocation*)iter.reloc())->set_destination(dest);
1906           }
1907           break;
1908         }
1909         case relocInfo::runtime_call_w_cp_type:
1910           fatal("runtime_call_w_cp_type unimplemented");
1911           //address destination = iter.reloc()->value();
1912           break;
1913         case relocInfo::external_word_type: {
1914           address target = _cache->address_for_id(reloc_data[j]);
1915           // Add external address to global table
1916           int index = ExternalsRecorder::find_index(target);
1917           // Update index in relocation
1918           Relocation::add_jint(iter.data(), index);
1919           external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1920           assert(reloc->target() == target, "sanity");
1921           reloc->set_value(target); // Patch address in the code
1922           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1923           break;
1924         }
1925         case relocInfo::internal_word_type:
1926           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1927           break;
1928         case relocInfo::section_word_type:
1929           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1930           break;
1931         case relocInfo::poll_type:
1932           break;
1933         case relocInfo::poll_return_type:
1934           break;
1935         case relocInfo::post_call_nop_type:
1936           break;
1937         case relocInfo::entry_guard_type:
1938           break;
1939         default:
1940           fatal("relocation %d unimplemented", (int)iter.type());
1941           break;
1942       }
1943       if (success && log.is_enabled()) {
1944         iter.print_current_on(&log);
1945       }
1946       j++;
1947     }
1948     assert(j <= (int)reloc_count, "sanity");
1949   }
1950   return success;
1951 }
1952 
1953 bool SCCReader::read_code(CodeBuffer* buffer, CodeBuffer* orig_buffer, uint code_offset) {
1954   assert(code_offset == align_up(code_offset, DATA_ALIGNMENT), "%d not aligned to %d", code_offset, DATA_ALIGNMENT);
1955   assert(buffer->blob() != nullptr, "sanity");
1956   SCCodeSection* scc_cs = (SCCodeSection*)addr(code_offset);
1957   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1958     CodeSection* cs = buffer->code_section(i);
1959     // Read original section size and address.
1960     uint orig_size = scc_cs[i]._size;
1961     log_debug(scc)("======== read code section %d [%d]:", i, orig_size);
1962     uint orig_size_align = align_up(orig_size, DATA_ALIGNMENT);
1963     if (i != (int)CodeBuffer::SECT_INSTS) {
1964       buffer->initialize_section_size(cs, orig_size_align);
1965     }
1966     if (orig_size_align > (uint)cs->capacity()) { // Will not fit
1967       log_info(scc)("%d (L%d): original code section %d size %d > current capacity %d",
1968                        compile_id(), comp_level(), i, orig_size, cs->capacity());
1969       return false;
1970     }
1971     if (orig_size == 0) {
1972       assert(cs->size() == 0, "should match");
1973       continue;  // skip trivial section
1974     }
1975     address orig_start = scc_cs[i]._origin_address;
1976 
1977     // Populate fake original buffer (no code allocation in CodeCache).
1978     // It is used for relocations to calculate sections addesses delta.
1979     CodeSection* orig_cs = orig_buffer->code_section(i);
1980     assert(!orig_cs->is_allocated(), "This %d section should not be set", i);
1981     orig_cs->initialize(orig_start, orig_size);
1982 
1983     // Load code to new buffer.
1984     address code_start = cs->start();
1985     copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align);
1986     cs->set_end(code_start + orig_size);
1987   }
1988 
1989   return true;
1990 }
1991 
1992 bool SCCache::load_exception_blob(CodeBuffer* buffer, int* pc_offset) {
1993 #ifdef ASSERT
1994   LogStreamHandle(Debug, scc, nmethod) log;
1995   if (log.is_enabled()) {
1996     FlagSetting fs(PrintRelocations, true);
1997     buffer->print_on(&log);
1998   }
1999 #endif
2000   SCCache* cache = open_for_read();
2001   if (cache == nullptr) {
2002     return false;
2003   }
2004   SCCEntry* entry = cache->find_entry(SCCEntry::Blob, 999);
2005   if (entry == nullptr) {
2006     return false;
2007   }
2008   SCCReader reader(cache, entry, nullptr);
2009   return reader.compile_blob(buffer, pc_offset);
2010 }
2011 
2012 bool SCCReader::compile_blob(CodeBuffer* buffer, int* pc_offset) {
2013   uint entry_position = _entry->offset();
2014 
2015   // Read pc_offset
2016   *pc_offset = *(int*)addr(entry_position);
2017 
2018   // Read name
2019   uint name_offset = entry_position + _entry->name_offset();
2020   uint name_size = _entry->name_size(); // Includes '/0'
2021   const char* name = addr(name_offset);
2022 
2023   log_info(scc, stubs)("%d (L%d): Reading blob '%s' with pc_offset %d from Startup Code Cache '%s'",
2024                        compile_id(), comp_level(), name, *pc_offset, _cache->cache_path());
2025 
2026   if (strncmp(buffer->name(), name, (name_size - 1)) != 0) {
2027     log_warning(scc)("%d (L%d): Saved blob's name '%s' is different from '%s'",
2028                      compile_id(), comp_level(), name, buffer->name());
2029     ((SCCache*)_cache)->set_failed();
2030     exit_vm_on_load_failure();
2031     return false;
2032   }
2033 
2034   // Create fake original CodeBuffer
2035   CodeBuffer orig_buffer(name);
2036 
2037   // Read code
2038   uint code_offset = entry_position + _entry->code_offset();
2039   if (!read_code(buffer, &orig_buffer, code_offset)) {
2040     return false;
2041   }
2042 
2043   // Read relocations
2044   uint reloc_offset = entry_position + _entry->reloc_offset();
2045   set_read_position(reloc_offset);
2046   if (!read_relocations(buffer, &orig_buffer, nullptr, nullptr)) {
2047     return false;
2048   }
2049 
2050   log_info(scc, stubs)("%d (L%d): Read blob '%s' from Startup Code Cache '%s'",
2051                        compile_id(), comp_level(), name, _cache->cache_path());
2052 #ifdef ASSERT
2053   LogStreamHandle(Debug, scc, nmethod) log;
2054   if (log.is_enabled()) {
2055     FlagSetting fs(PrintRelocations, true);
2056     buffer->print_on(&log);
2057     buffer->decode();
2058   }
2059 #endif
2060   return true;
2061 }
2062 
2063 bool SCCache::write_relocations(CodeBuffer* buffer, uint& all_reloc_size) {
2064   uint all_reloc_count = 0;
2065   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2066     CodeSection* cs = buffer->code_section(i);
2067     uint reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2068     all_reloc_count += reloc_count;
2069   }
2070   all_reloc_size = all_reloc_count * sizeof(relocInfo);
2071   bool success = true;
2072   uint* reloc_data = NEW_C_HEAP_ARRAY(uint, all_reloc_count, mtCode);
2073   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2074     CodeSection* cs = buffer->code_section(i);
2075     int reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2076     uint n = write_bytes(&reloc_count, sizeof(int));
2077     if (n != sizeof(int)) {
2078       success = false;
2079       break;
2080     }
2081     if (reloc_count == 0) {
2082       continue;
2083     }
2084     // Write _locs_point (as offset from start)
2085     int locs_point_off = cs->locs_point_off();
2086     n = write_bytes(&locs_point_off, sizeof(int));
2087     if (n != sizeof(int)) {
2088       success = false;
2089       break;
2090     }
2091     relocInfo* reloc_start = cs->locs_start();
2092     uint reloc_size      = reloc_count * sizeof(relocInfo);
2093     n = write_bytes(reloc_start, reloc_size);
2094     if (n != reloc_size) {
2095       success = false;
2096       break;
2097     }
2098     LogStreamHandle(Info, scc, reloc) log;
2099     if (log.is_enabled()) {
2100       log.print_cr("======== write code section %d relocations [%d]:", i, reloc_count);
2101     }
2102     // Collect additional data
2103     RelocIterator iter(cs);
2104     bool has_immediate = false;
2105     int j = 0;
2106     while (iter.next()) {
2107       reloc_data[j] = 0; // initialize
2108       switch (iter.type()) {
2109         case relocInfo::none:
2110           break;
2111         case relocInfo::oop_type: {
2112           oop_Relocation* r = (oop_Relocation*)iter.reloc();
2113           if (r->oop_is_immediate()) {
2114             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2115             has_immediate = true;
2116           }
2117           break;
2118         }
2119         case relocInfo::metadata_type: {
2120           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2121           if (r->metadata_is_immediate()) {
2122             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2123             has_immediate = true;
2124           }
2125           break;
2126         }
2127         case relocInfo::virtual_call_type:  // Fall through. They all call resolve_*_call blobs.
2128         case relocInfo::opt_virtual_call_type:
2129         case relocInfo::static_call_type: {
2130           CallRelocation* r = (CallRelocation*)iter.reloc();
2131           address dest = r->destination();
2132           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2133             dest = (address)-1;    // do nothing in this case when loading this relocation
2134           }
2135           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2136           break;
2137         }
2138         case relocInfo::trampoline_stub_type: {
2139           address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2140           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2141           break;
2142         }
2143         case relocInfo::static_stub_type:
2144           break;
2145         case relocInfo::runtime_call_type: {
2146           // Record offset of runtime destination
2147           CallRelocation* r = (CallRelocation*)iter.reloc();
2148           address dest = r->destination();
2149           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2150             dest = (address)-1;    // do nothing in this case when loading this relocation
2151           }
2152           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2153           break;
2154         }
2155         case relocInfo::runtime_call_w_cp_type:
2156           fatal("runtime_call_w_cp_type unimplemented");
2157           break;
2158         case relocInfo::external_word_type: {
2159           // Record offset of runtime target
2160           address target = ((external_word_Relocation*)iter.reloc())->target();
2161           reloc_data[j] = _table->id_for_address(target, iter, buffer);
2162           break;
2163         }
2164         case relocInfo::internal_word_type:
2165           break;
2166         case relocInfo::section_word_type:
2167           break;
2168         case relocInfo::poll_type:
2169           break;
2170         case relocInfo::poll_return_type:
2171           break;
2172         case relocInfo::post_call_nop_type:
2173           break;
2174         case relocInfo::entry_guard_type:
2175           break;
2176         default:
2177           fatal("relocation %d unimplemented", (int)iter.type());
2178           break;
2179       }
2180       if (log.is_enabled()) {
2181         iter.print_current_on(&log);
2182       }
2183       j++;
2184     }
2185     assert(j <= (int)reloc_count, "sanity");
2186     // Write additional relocation data: uint per relocation
2187     uint data_size = reloc_count * sizeof(uint);
2188     n = write_bytes(reloc_data, data_size);
2189     if (n != data_size) {
2190       success = false;
2191       break;
2192     }
2193     if (has_immediate) {
2194       // Save information about immediates in this Code Section
2195       RelocIterator iter_imm(cs);
2196       int j = 0;
2197       while (iter_imm.next()) {
2198         switch (iter_imm.type()) {
2199           case relocInfo::oop_type: {
2200             oop_Relocation* r = (oop_Relocation*)iter_imm.reloc();
2201             if (r->oop_is_immediate()) {
2202               assert(reloc_data[j] == (uint)j, "should be");
2203               jobject jo = *(jobject*)(r->oop_addr()); // Handle currently
2204               if (!write_oop(jo)) {
2205                 success = false;
2206               }
2207             }
2208             break;
2209           }
2210           case relocInfo::metadata_type: {
2211             metadata_Relocation* r = (metadata_Relocation*)iter_imm.reloc();
2212             if (r->metadata_is_immediate()) {
2213               assert(reloc_data[j] == (uint)j, "should be");
2214               Metadata* m = r->metadata_value();
2215               if (!write_metadata(m)) {
2216                 success = false;
2217               }
2218             }
2219             break;
2220           }
2221           default:
2222             break;
2223         }
2224         if (!success) {
2225           break;
2226         }
2227         j++;
2228       } // while (iter_imm.next())
2229     } // if (has_immediate)
2230   } // for(i < SECT_LIMIT)
2231   FREE_C_HEAP_ARRAY(uint, reloc_data);
2232   return success;
2233 }
2234 
2235 bool SCCache::write_code(CodeBuffer* buffer, uint& code_size) {
2236   assert(_write_position == align_up(_write_position, DATA_ALIGNMENT), "%d not aligned to %d", _write_position, DATA_ALIGNMENT);
2237   //assert(buffer->blob() != nullptr, "sanity");
2238   uint code_offset = _write_position;
2239   uint cb_total_size = (uint)buffer->total_content_size();
2240   // Write information about Code sections first.
2241   SCCodeSection scc_cs[CodeBuffer::SECT_LIMIT];
2242   uint scc_cs_size = (uint)(sizeof(SCCodeSection) * CodeBuffer::SECT_LIMIT);
2243   uint offset = align_up(scc_cs_size, DATA_ALIGNMENT);
2244   uint total_size = 0;
2245   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2246     const CodeSection* cs = buffer->code_section(i);
2247     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2248     uint cs_size = (uint)cs->size();
2249     scc_cs[i]._size = cs_size;
2250     scc_cs[i]._origin_address = (cs_size == 0) ? nullptr : cs->start();
2251     scc_cs[i]._offset = (cs_size == 0) ? 0 : (offset + total_size);
2252     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2253     total_size += align_up(cs_size, DATA_ALIGNMENT);
2254   }
2255   uint n = write_bytes(scc_cs, scc_cs_size);
2256   if (n != scc_cs_size) {
2257     return false;
2258   }
2259   if (!align_write()) {
2260     return false;
2261   }
2262   assert(_write_position == (code_offset + offset), "%d  != (%d + %d)", _write_position, code_offset, offset);
2263   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2264     const CodeSection* cs = buffer->code_section(i);
2265     uint cs_size = (uint)cs->size();
2266     if (cs_size == 0) {
2267       continue;  // skip trivial section
2268     }
2269     assert((_write_position - code_offset) == scc_cs[i]._offset, "%d != %d", _write_position, scc_cs[i]._offset);
2270     // Write code
2271     n = write_bytes(cs->start(), cs_size);
2272     if (n != cs_size) {
2273       return false;
2274     }
2275     if (!align_write()) {
2276       return false;
2277     }
2278   }
2279   assert((_write_position - code_offset) == (offset + total_size), "(%d - %d) != (%d + %d)", _write_position, code_offset, offset, total_size);
2280   code_size = total_size;
2281   return true;
2282 }
2283 
2284 bool SCCache::store_exception_blob(CodeBuffer* buffer, int pc_offset) {
2285   SCCache* cache = open_for_write();
2286   if (cache == nullptr) {
2287     return false;
2288   }
2289   log_info(scc, stubs)("Writing blob '%s' to Startup Code Cache '%s'", buffer->name(), cache->_cache_path);
2290 
2291 #ifdef ASSERT
2292   LogStreamHandle(Debug, scc, nmethod) log;
2293   if (log.is_enabled()) {
2294     FlagSetting fs(PrintRelocations, true);
2295     buffer->print_on(&log);
2296     buffer->decode();
2297   }
2298 #endif
2299   if (!cache->align_write()) {
2300     return false;
2301   }
2302   uint entry_position = cache->_write_position;
2303 
2304   // Write pc_offset
2305   uint n = cache->write_bytes(&pc_offset, sizeof(int));
2306   if (n != sizeof(int)) {
2307     return false;
2308   }
2309 
2310   // Write name
2311   const char* name = buffer->name();
2312   uint name_offset = cache->_write_position - entry_position;
2313   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
2314   n = cache->write_bytes(name, name_size);
2315   if (n != name_size) {
2316     return false;
2317   }
2318 
2319   // Write code section
2320   if (!cache->align_write()) {
2321     return false;
2322   }
2323   uint code_offset = cache->_write_position - entry_position;
2324   uint code_size = 0;
2325   if (!cache->write_code(buffer, code_size)) {
2326     return false;
2327   }
2328   // Write relocInfo array
2329   uint reloc_offset = cache->_write_position - entry_position;
2330   uint reloc_size = 0;
2331   if (!cache->write_relocations(buffer, reloc_size)) {
2332     return false;
2333   }
2334 
2335   uint entry_size = cache->_write_position - entry_position;
2336   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
2337                                           code_offset, code_size, reloc_offset, reloc_size,
2338                                           SCCEntry::Blob, (uint32_t)999);
2339   log_info(scc, stubs)("Wrote stub '%s' to Startup Code Cache '%s'", name, cache->_cache_path);
2340   return true;
2341 }
2342 
2343 DebugInformationRecorder* SCCReader::read_debug_info(OopRecorder* oop_recorder) {
2344   uint code_offset = align_up(read_position(), DATA_ALIGNMENT);
2345   int data_size  = *(int*)addr(code_offset);
2346   code_offset   += sizeof(int);
2347   int pcs_length = *(int*)addr(code_offset);
2348   code_offset   += sizeof(int);
2349 
2350   log_debug(scc)("======== read DebugInfo [%d, %d]:", data_size, pcs_length);
2351 
2352   // Aligned initial sizes
2353   int data_size_align  = align_up(data_size, DATA_ALIGNMENT);
2354   int pcs_length_align = pcs_length + 1;
2355   assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity");
2356   DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length);
2357 
2358   copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align);
2359   recorder->stream()->set_position(data_size);
2360   code_offset += data_size;
2361 
2362   uint pcs_size = pcs_length * sizeof(PcDesc);
2363   copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size);
2364   code_offset += pcs_size;
2365   set_read_position(code_offset);
2366   return recorder;
2367 }
2368 
2369 bool SCCache::write_debug_info(DebugInformationRecorder* recorder) {
2370   if (!align_write()) {
2371     return false;
2372   }
2373   // Don't call data_size() and pcs_size(). They will freeze OopRecorder.
2374   int data_size = recorder->stream()->position(); // In bytes
2375   uint n = write_bytes(&data_size, sizeof(int));
2376   if (n != sizeof(int)) {
2377     return false;
2378   }
2379   int pcs_length = recorder->pcs_length(); // In bytes
2380   n = write_bytes(&pcs_length, sizeof(int));
2381   if (n != sizeof(int)) {
2382     return false;
2383   }
2384   n = write_bytes(recorder->stream()->buffer(), data_size);
2385   if (n != (uint)data_size) {
2386     return false;
2387   }
2388   uint pcs_size = pcs_length * sizeof(PcDesc);
2389   n = write_bytes(recorder->pcs(), pcs_size);
2390   if (n != pcs_size) {
2391     return false;
2392   }
2393   return true;
2394 }
2395 
2396 OopMapSet* SCCReader::read_oop_maps() {
2397   uint code_offset = read_position();
2398   int om_count = *(int*)addr(code_offset);
2399   code_offset += sizeof(int);
2400 
2401   log_debug(scc)("======== read oop maps [%d]:", om_count);
2402 
2403   OopMapSet* oop_maps = new OopMapSet(om_count);
2404   for (int i = 0; i < (int)om_count; i++) {
2405     int data_size = *(int*)addr(code_offset);
2406     code_offset += sizeof(int);
2407 
2408     OopMap* oop_map = new OopMap(data_size);
2409     // Preserve allocated stream
2410     CompressedWriteStream* stream = oop_map->write_stream();
2411 
2412     // Read data which overwrites default data
2413     copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap));
2414     code_offset += sizeof(OopMap);
2415     stream->set_position(data_size);
2416     oop_map->set_write_stream(stream);
2417     if (data_size > 0) {
2418       copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size);
2419       code_offset += data_size;
2420     }
2421 #ifdef ASSERT
2422     oop_map->_locs_length = 0;
2423     oop_map->_locs_used   = nullptr;
2424 #endif
2425     oop_maps->add(oop_map);
2426   }
2427   set_read_position(code_offset);
2428   return oop_maps;
2429 }
2430 
2431 bool SCCache::write_oop_maps(OopMapSet* oop_maps) {
2432   uint om_count = oop_maps->size();
2433   uint n = write_bytes(&om_count, sizeof(int));
2434   if (n != sizeof(int)) {
2435     return false;
2436   }
2437   for (int i = 0; i < (int)om_count; i++) {
2438     OopMap* om = oop_maps->at(i);
2439     int data_size = om->data_size();
2440     n = write_bytes(&data_size, sizeof(int));
2441     if (n != sizeof(int)) {
2442       return false;
2443     }
2444     n = write_bytes(om, sizeof(OopMap));
2445     if (n != sizeof(OopMap)) {
2446       return false;
2447     }
2448     n = write_bytes(om->data(), (uint)data_size);
2449     if (n != (uint)data_size) {
2450       return false;
2451     }
2452   }
2453   return true;
2454 }
2455 
2456 jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2457   uint code_offset = read_position();
2458   oop obj = nullptr;
2459   DataKind kind = *(DataKind*)addr(code_offset);
2460   code_offset += sizeof(DataKind);
2461   set_read_position(code_offset);
2462   if (kind == DataKind::Null) {
2463     return nullptr;
2464   } else if (kind == DataKind::No_Data) {
2465     return (jobject)Universe::non_oop_word();
2466   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2467     Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
2468     if (k == nullptr) {
2469       return nullptr;
2470     }
2471     obj = k->java_mirror();
2472     if (obj == nullptr) {
2473       set_lookup_failed();
2474       log_info(scc)("Lookup failed for java_mirror of klass %s", k->external_name());
2475       return nullptr;
2476     }
2477   } else if (kind == DataKind::Primitive) {
2478     code_offset = read_position();
2479     int t = *(int*)addr(code_offset);
2480     code_offset += sizeof(int);
2481     set_read_position(code_offset);
2482     BasicType bt = (BasicType)t;
2483     obj = java_lang_Class::primitive_mirror(bt);
2484     log_info(scc)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2485   } else if (kind == DataKind::String_Shared) {
2486     code_offset = read_position();
2487     int k = *(int*)addr(code_offset);
2488     code_offset += sizeof(int);
2489     set_read_position(code_offset);
2490     obj = CDSAccess::get_archived_object(k);
2491   } else if (kind == DataKind::String) {
2492     code_offset = read_position();
2493     int length = *(int*)addr(code_offset);
2494     code_offset += sizeof(int);
2495     set_read_position(code_offset);
2496     const char* dest = addr(code_offset);
2497     set_read_position(code_offset + length);
2498     obj = StringTable::intern(&(dest[0]), thread);
2499     if (obj == nullptr) {
2500       set_lookup_failed();
2501       log_info(scc)("%d (L%d): Lookup failed for String %s",
2502                        compile_id(), comp_level(), &(dest[0]));
2503       return nullptr;
2504     }
2505     assert(java_lang_String::is_instance(obj), "must be string");
2506     log_info(scc)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
2507   } else if (kind == DataKind::SysLoader) {
2508     obj = SystemDictionary::java_system_loader();
2509     log_info(scc)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2510   } else if (kind == DataKind::PlaLoader) {
2511     obj = SystemDictionary::java_platform_loader();
2512     log_info(scc)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2513   } else if (kind == DataKind::MH_Oop_Shared) {
2514     code_offset = read_position();
2515     int k = *(int*)addr(code_offset);
2516     code_offset += sizeof(int);
2517     set_read_position(code_offset);
2518     obj = CDSAccess::get_archived_object(k);
2519   } else {
2520     set_lookup_failed();
2521     log_info(scc)("%d (L%d): Unknown oop's kind: %d",
2522                      compile_id(), comp_level(), (int)kind);
2523     return nullptr;
2524   }
2525   return JNIHandles::make_local(thread, obj);
2526 }
2527 
2528 bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) {
2529   uint code_offset = read_position();
2530   int oop_count = *(int*)addr(code_offset);
2531   code_offset += sizeof(int);
2532   set_read_position(code_offset);
2533   log_debug(scc)("======== read oops [%d]:", oop_count);
2534   if (oop_count == 0) {
2535     return true;
2536   }
2537   {
2538     VM_ENTRY_MARK;
2539     methodHandle comp_method(THREAD, target->get_Method());
2540     for (int i = 1; i < oop_count; i++) {
2541       jobject jo = read_oop(THREAD, comp_method);
2542       if (lookup_failed()) {
2543         return false;
2544       }
2545       if (oop_recorder->is_real(jo)) {
2546         oop_recorder->find_index(jo);
2547       } else {
2548         oop_recorder->allocate_oop_index(jo);
2549       }
2550       LogStreamHandle(Debug, scc, oops) log;
2551       if (log.is_enabled()) {
2552         log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2553         if (jo == (jobject)Universe::non_oop_word()) {
2554           log.print("non-oop word");
2555         } else if (jo == nullptr) {
2556           log.print("nullptr-oop");
2557         } else {
2558           JNIHandles::resolve(jo)->print_value_on(&log);
2559         }
2560         log.cr();
2561       }
2562     }
2563   }
2564   return true;
2565 }
2566 
2567 Metadata* SCCReader::read_metadata(const methodHandle& comp_method) {
2568   uint code_offset = read_position();
2569   Metadata* m = nullptr;
2570   DataKind kind = *(DataKind*)addr(code_offset);
2571   code_offset += sizeof(DataKind);
2572   set_read_position(code_offset);
2573   if (kind == DataKind::Null) {
2574     m = (Metadata*)nullptr;
2575   } else if (kind == DataKind::No_Data) {
2576     m = (Metadata*)Universe::non_oop_word();
2577   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2578     m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2579   } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2580     m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2581   } else if (kind == DataKind::MethodCnts) {
2582     kind = *(DataKind*)addr(code_offset);
2583     bool shared = (kind == DataKind::Method_Shared);
2584     assert(kind == DataKind::Method || shared, "Sanity");
2585     code_offset += sizeof(DataKind);
2586     set_read_position(code_offset);
2587     m = (Metadata*)read_method(comp_method, shared);
2588     if (m != nullptr) {
2589       Method* method = (Method*)m;
2590       m = method->get_method_counters(Thread::current());
2591       if (m == nullptr) {
2592         set_lookup_failed();
2593         log_info(scc)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2594       } else {
2595         log_info(scc)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2596       }
2597     }
2598   } else {
2599     set_lookup_failed();
2600     log_info(scc)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2601   }
2602   return m;
2603 }
2604 
2605 bool SCCReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) {
2606   uint code_offset = read_position();
2607   int metadata_count = *(int*)addr(code_offset);
2608   code_offset += sizeof(int);
2609   set_read_position(code_offset);
2610 
2611   log_debug(scc)("======== read metadata [%d]:", metadata_count);
2612 
2613   if (metadata_count == 0) {
2614     return true;
2615   }
2616   {
2617     VM_ENTRY_MARK;
2618     methodHandle comp_method(THREAD, target->get_Method());
2619 
2620     for (int i = 1; i < metadata_count; i++) {
2621       Metadata* m = read_metadata(comp_method);
2622       if (lookup_failed()) {
2623         return false;
2624       }
2625       if (oop_recorder->is_real(m)) {
2626         oop_recorder->find_index(m);
2627       } else {
2628         oop_recorder->allocate_metadata_index(m);
2629       }
2630       LogTarget(Debug, scc, metadata) log;
2631       if (log.is_enabled()) {
2632         LogStream ls(log);
2633         ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2634         if (m == (Metadata*)Universe::non_oop_word()) {
2635           ls.print("non-metadata word");
2636         } else if (m == nullptr) {
2637           ls.print("nullptr-oop");
2638         } else {
2639           Metadata::print_value_on_maybe_null(&ls, m);
2640         }
2641         ls.cr();
2642       }
2643     }
2644   }
2645   return true;
2646 }
2647 
2648 bool SCCache::write_oop(jobject& jo) {
2649   DataKind kind;
2650   uint n = 0;
2651   oop obj = JNIHandles::resolve(jo);
2652   if (jo == nullptr) {
2653     kind = DataKind::Null;
2654     n = write_bytes(&kind, sizeof(int));
2655     if (n != sizeof(int)) {
2656       return false;
2657     }
2658   } else if (jo == (jobject)Universe::non_oop_word()) {
2659     kind = DataKind::No_Data;
2660     n = write_bytes(&kind, sizeof(int));
2661     if (n != sizeof(int)) {
2662       return false;
2663     }
2664   } else if (java_lang_Class::is_instance(obj)) {
2665     if (java_lang_Class::is_primitive(obj)) {
2666       int bt = (int)java_lang_Class::primitive_type(obj);
2667       kind = DataKind::Primitive;
2668       n = write_bytes(&kind, sizeof(int));
2669       if (n != sizeof(int)) {
2670         return false;
2671       }
2672       n = write_bytes(&bt, sizeof(int));
2673       if (n != sizeof(int)) {
2674         return false;
2675       }
2676       log_info(scc)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2677     } else {
2678       Klass* klass = java_lang_Class::as_Klass(obj);
2679       if (!write_klass(klass)) {
2680         return false;
2681       }
2682     }
2683   } else if (java_lang_String::is_instance(obj)) { // herere
2684     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2685     if (k >= 0) {
2686       kind = DataKind::String_Shared;
2687       n = write_bytes(&kind, sizeof(int));
2688       if (n != sizeof(int)) {
2689         return false;
2690       }
2691       n = write_bytes(&k, sizeof(int));
2692       if (n != sizeof(int)) {
2693         return false;
2694       }
2695       return true;
2696     }
2697     kind = DataKind::String;
2698     n = write_bytes(&kind, sizeof(int));
2699     if (n != sizeof(int)) {
2700       return false;
2701     }
2702     ResourceMark rm;
2703     size_t length_sz = 0;
2704     const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2705     int length = (int)length_sz; // FIXME -- cast
2706     length++; // write tailing '/0'
2707     n = write_bytes(&length, sizeof(int));
2708     if (n != sizeof(int)) {
2709       return false;
2710     }
2711     n = write_bytes(string, (uint)length);
2712     if (n != (uint)length) {
2713       return false;
2714     }
2715     log_info(scc)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
2716   } else if (java_lang_Module::is_instance(obj)) {
2717     fatal("Module object unimplemented");
2718   } else if (java_lang_ClassLoader::is_instance(obj)) {
2719     if (obj == SystemDictionary::java_system_loader()) {
2720       kind = DataKind::SysLoader;
2721       log_info(scc)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2722     } else if (obj == SystemDictionary::java_platform_loader()) {
2723       kind = DataKind::PlaLoader;
2724       log_info(scc)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2725     } else {
2726       fatal("ClassLoader object unimplemented");
2727       return false;
2728     }
2729     n = write_bytes(&kind, sizeof(int));
2730     if (n != sizeof(int)) {
2731       return false;
2732     }
2733   } else { // herere
2734     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2735     if (k >= 0) {
2736       kind = DataKind::MH_Oop_Shared;
2737       n = write_bytes(&kind, sizeof(int));
2738       if (n != sizeof(int)) {
2739         return false;
2740       }
2741       n = write_bytes(&k, sizeof(int));
2742       if (n != sizeof(int)) {
2743         return false;
2744       }
2745       return true;
2746     }
2747     // Unhandled oop - bailout
2748     set_lookup_failed();
2749     log_info(scc, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
2750                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2751     return false;
2752   }
2753   return true;
2754 }
2755 
2756 bool SCCache::write_oops(OopRecorder* oop_recorder) {
2757   int oop_count = oop_recorder->oop_count();
2758   uint n = write_bytes(&oop_count, sizeof(int));
2759   if (n != sizeof(int)) {
2760     return false;
2761   }
2762   log_debug(scc)("======== write oops [%d]:", oop_count);
2763 
2764   for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr
2765     jobject jo = oop_recorder->oop_at(i);
2766     LogStreamHandle(Info, scc, oops) log;
2767     if (log.is_enabled()) {
2768       log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2769       if (jo == (jobject)Universe::non_oop_word()) {
2770         log.print("non-oop word");
2771       } else if (jo == nullptr) {
2772         log.print("nullptr-oop");
2773       } else {
2774         JNIHandles::resolve(jo)->print_value_on(&log);
2775       }
2776       log.cr();
2777     }
2778     if (!write_oop(jo)) {
2779       return false;
2780     }
2781   }
2782   return true;
2783 }
2784 
2785 bool SCCache::write_metadata(Metadata* m) {
2786   uint n = 0;
2787   if (m == nullptr) {
2788     DataKind kind = DataKind::Null;
2789     n = write_bytes(&kind, sizeof(int));
2790     if (n != sizeof(int)) {
2791       return false;
2792     }
2793   } else if (m == (Metadata*)Universe::non_oop_word()) {
2794     DataKind kind = DataKind::No_Data;
2795     n = write_bytes(&kind, sizeof(int));
2796     if (n != sizeof(int)) {
2797       return false;
2798     }
2799   } else if (m->is_klass()) {
2800     if (!write_klass((Klass*)m)) {
2801       return false;
2802     }
2803   } else if (m->is_method()) {
2804     if (!write_method((Method*)m)) {
2805       return false;
2806     }
2807   } else if (m->is_methodCounters()) {
2808     DataKind kind = DataKind::MethodCnts;
2809     n = write_bytes(&kind, sizeof(int));
2810     if (n != sizeof(int)) {
2811       return false;
2812     }
2813     if (!write_method(((MethodCounters*)m)->method())) {
2814       return false;
2815     }
2816     log_info(scc)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2817   } else { // Not supported
2818     fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2819     return false;
2820   }
2821   return true;
2822 }
2823 
2824 bool SCCache::write_metadata(OopRecorder* oop_recorder) {
2825   int metadata_count = oop_recorder->metadata_count();
2826   uint n = write_bytes(&metadata_count, sizeof(int));
2827   if (n != sizeof(int)) {
2828     return false;
2829   }
2830 
2831   log_debug(scc)("======== write metadata [%d]:", metadata_count);
2832 
2833   for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr
2834     Metadata* m = oop_recorder->metadata_at(i);
2835     LogStreamHandle(Debug, scc, metadata) log;
2836     if (log.is_enabled()) {
2837       log.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2838       if (m == (Metadata*)Universe::non_oop_word()) {
2839         log.print("non-metadata word");
2840       } else if (m == nullptr) {
2841         log.print("nullptr-oop");
2842       } else {
2843         Metadata::print_value_on_maybe_null(&log, m);
2844       }
2845       log.cr();
2846     }
2847     if (!write_metadata(m)) {
2848       return false;
2849     }
2850   }
2851   return true;
2852 }
2853 
2854 bool SCCReader::read_dependencies(Dependencies* dependencies) {
2855   uint code_offset = read_position();
2856   int dependencies_size = *(int*)addr(code_offset);
2857 
2858   log_debug(scc)("======== read dependencies [%d]:", dependencies_size);
2859 
2860   code_offset += sizeof(int);
2861   code_offset = align_up(code_offset, DATA_ALIGNMENT);
2862   if (dependencies_size > 0) {
2863     dependencies->set_content((u_char*)addr(code_offset), dependencies_size);
2864   }
2865   code_offset += dependencies_size;
2866   set_read_position(code_offset);
2867   return true;
2868 }
2869 
2870 bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
2871   TraceTime t1("SC total load time", &_t_totalLoad, enable_timers(), false);
2872   CompileTask* task = env->task();
2873   SCCEntry* entry = task->scc_entry();
2874   bool preload = task->preload();
2875   assert(entry != nullptr, "sanity");
2876   SCCache* cache = open_for_read();
2877   if (cache == nullptr) {
2878     return false;
2879   }
2880   if (log_is_enabled(Info, scc, nmethod)) {
2881     uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count();
2882     VM_ENTRY_MARK;
2883     ResourceMark rm;
2884     methodHandle method(THREAD, target->get_Method());
2885     const char* target_name = method->name_and_sig_as_C_string();
2886     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
2887     bool clinit_brs = entry->has_clinit_barriers();
2888     log_info(scc, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
2889                            task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
2890                            target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : ""),
2891                            (entry->ignore_decompile() ? ", ignore_decomp" : ""));
2892   }
2893   ReadingMark rdmk;
2894   if (rdmk.failed()) {
2895     // Cache is closed, cannot touch anything.
2896     return false;
2897   }
2898 
2899   SCCReader reader(cache, entry, task);
2900   bool success = reader.compile(env, target, entry_bci, compiler);
2901   if (success) {
2902     task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
2903   } else {
2904     entry->set_load_fail();
2905   }
2906   return success;
2907 }
2908 
2909 SCCReader::SCCReader(SCCache* cache, SCCEntry* entry, CompileTask* task) {
2910   _cache = cache;
2911   _entry   = entry;
2912   _load_buffer = cache->cache_buffer();
2913   _read_position = 0;
2914   if (task != nullptr) {
2915     _compile_id = task->compile_id();
2916     _comp_level = task->comp_level();
2917     _preload    = task->preload();
2918   } else {
2919     _compile_id = 0;
2920     _comp_level = 0;
2921     _preload    = false;
2922   }
2923   _lookup_failed = false;
2924 }
2925 
2926 bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler) {
2927   uint entry_position = _entry->offset();
2928   uint code_offset = entry_position + _entry->code_offset();
2929   set_read_position(code_offset);
2930 
2931   // Read flags
2932   int flags = *(int*)addr(code_offset);
2933   code_offset += sizeof(int);
2934   bool has_monitors      = (flags & 0x1) != 0;
2935   bool has_wide_vectors  = (flags & 0x2) != 0;
2936   bool has_unsafe_access = (flags & 0x4) != 0;
2937   bool has_scoped_access = (flags & 0x8) != 0;
2938 
2939   int orig_pc_offset = *(int*)addr(code_offset);
2940   code_offset += sizeof(int);
2941   int frame_size = *(int*)addr(code_offset);
2942   code_offset += sizeof(int);
2943 
2944   // Read offsets
2945   CodeOffsets* offsets = (CodeOffsets*)addr(code_offset);
2946   code_offset += sizeof(CodeOffsets);
2947 
2948   // Create Debug Information Recorder to record scopes, oopmaps, etc.
2949   OopRecorder* oop_recorder = new OopRecorder(env->arena());
2950   env->set_oop_recorder(oop_recorder);
2951 
2952   set_read_position(code_offset);
2953 
2954   // Write OopRecorder data
2955   if (!read_oops(oop_recorder, target)) {
2956     return false;
2957   }
2958   if (!read_metadata(oop_recorder, target)) {
2959     return false;
2960   }
2961 
2962   // Read Debug info
2963   DebugInformationRecorder* recorder = read_debug_info(oop_recorder);
2964   if (recorder == nullptr) {
2965     return false;
2966   }
2967   env->set_debug_info(recorder);
2968 
2969   // Read Dependencies (compressed already)
2970   Dependencies* dependencies = new Dependencies(env);
2971   if (!read_dependencies(dependencies)) {
2972     return false;
2973   }
2974   env->set_dependencies(dependencies);
2975 
2976   // Read oop maps
2977   OopMapSet* oop_maps = read_oop_maps();
2978   if (oop_maps == nullptr) {
2979     return false;
2980   }
2981 
2982   // Read exception handles
2983   code_offset = read_position();
2984   int exc_table_length = *(int*)addr(code_offset);
2985   code_offset += sizeof(int);
2986   ExceptionHandlerTable handler_table(MAX2(exc_table_length, 4));
2987   if (exc_table_length > 0) {
2988     handler_table.set_length(exc_table_length);
2989     uint exc_table_size = handler_table.size_in_bytes();
2990     copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size);
2991     code_offset += exc_table_size;
2992   }
2993 
2994   // Read null check table
2995   int nul_chk_length = *(int*)addr(code_offset);
2996   code_offset += sizeof(int);
2997   ImplicitExceptionTable nul_chk_table;
2998   if (nul_chk_length > 0) {
2999     nul_chk_table.set_size(nul_chk_length);
3000     nul_chk_table.set_len(nul_chk_length);
3001     uint nul_chk_size = nul_chk_table.size_in_bytes();
3002     copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry));
3003     code_offset += nul_chk_size;
3004   }
3005 
3006   uint reloc_size = _entry->reloc_size();
3007   CodeBuffer buffer("Compile::Fill_buffer", _entry->code_size(), reloc_size);
3008   buffer.initialize_oop_recorder(oop_recorder);
3009 
3010   const char* name = addr(entry_position + _entry->name_offset());
3011 
3012   // Create fake original CodeBuffer
3013   CodeBuffer orig_buffer(name);
3014 
3015   // Read code
3016   if (!read_code(&buffer, &orig_buffer, align_up(code_offset, DATA_ALIGNMENT))) {
3017     return false;
3018   }
3019 
3020   // Read relocations
3021   uint reloc_offset = entry_position + _entry->reloc_offset();
3022   set_read_position(reloc_offset);
3023   if (!read_relocations(&buffer, &orig_buffer, oop_recorder, target)) {
3024     return false;
3025   }
3026 
3027   log_info(scc, nmethod)("%d (L%d): Read nmethod '%s' from Startup Code Cache '%s'", compile_id(), comp_level(), name, _cache->cache_path());
3028 #ifdef ASSERT
3029   LogStreamHandle(Debug, scc, nmethod) log;
3030   if (log.is_enabled()) {
3031     FlagSetting fs(PrintRelocations, true);
3032     buffer.print_on(&log);
3033     buffer.decode();
3034   }
3035 #endif
3036 
3037   if (VerifyCachedCode) {
3038     return false;
3039   }
3040 
3041   // Register nmethod
3042   TraceTime t1("SC total nmethod register time", &_t_totalRegister, enable_timers(), false);
3043   env->register_method(target, entry_bci,
3044                        offsets, orig_pc_offset,
3045                        &buffer, frame_size,
3046                        oop_maps, &handler_table,
3047                        &nul_chk_table, compiler,
3048                        _entry->has_clinit_barriers(),
3049                        false,
3050                        has_unsafe_access,
3051                        has_wide_vectors,
3052                        has_monitors,
3053                        has_scoped_access,
3054                        0, true /* install_code */,
3055                        (SCCEntry *)_entry);
3056   CompileTask* task = env->task();
3057   bool success = task->is_success();
3058   if (success) {
3059     ((SCCEntry *)_entry)->set_loaded();
3060   }
3061   return success;
3062 }
3063 
3064 // No concurency for writing to cache file because this method is called from
3065 // ciEnv::register_method() under MethodCompileQueue_lock and Compile_lock locks.
3066 SCCEntry* SCCache::store_nmethod(const methodHandle& method,
3067                      int comp_id,
3068                      int entry_bci,
3069                      CodeOffsets* offsets,
3070                      int orig_pc_offset,
3071                      DebugInformationRecorder* recorder,
3072                      Dependencies* dependencies,
3073                      CodeBuffer* buffer,
3074                      int frame_size,
3075                      OopMapSet* oop_maps,
3076                      ExceptionHandlerTable* handler_table,
3077                      ImplicitExceptionTable* nul_chk_table,
3078                      AbstractCompiler* compiler,
3079                      CompLevel comp_level,
3080                      bool has_clinit_barriers,
3081                      bool for_preload,
3082                      bool has_unsafe_access,
3083                      bool has_wide_vectors,
3084                      bool has_monitors,
3085                      bool has_scoped_access) {
3086   if (!CDSConfig::is_dumping_cached_code()) {
3087     return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
3088   }
3089   if (entry_bci != InvocationEntryBci) {
3090     return nullptr; // No OSR
3091   }
3092   if (compiler->is_c1() && (comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile)) {
3093     // Cache tier1 compilations
3094   } else if (!compiler->is_c2()) {
3095     return nullptr; // Only C2 now
3096   }
3097   TraceTime t1("SC total store time", &_t_totalStore, enable_timers(), false);
3098   SCCache* cache = open_for_write();
3099   if (cache == nullptr) {
3100     return nullptr; // Cache file is closed
3101   }
3102   SCCEntry* entry = cache->write_nmethod(method, comp_id, entry_bci, offsets, orig_pc_offset, recorder, dependencies, buffer,
3103                                   frame_size, oop_maps, handler_table, nul_chk_table, compiler, comp_level,
3104                                   has_clinit_barriers, for_preload, has_unsafe_access, has_wide_vectors, has_monitors, has_scoped_access);
3105   if (entry == nullptr) {
3106     log_info(scc, nmethod)("%d (L%d): nmethod store attempt failed", comp_id, (int)comp_level);
3107   }
3108   return entry;
3109 }
3110 
3111 SCCEntry* SCCache::write_nmethod(const methodHandle& method,
3112                                  int comp_id,
3113                                  int entry_bci,
3114                                  CodeOffsets* offsets,
3115                                  int orig_pc_offset,
3116                                  DebugInformationRecorder* recorder,
3117                                  Dependencies* dependencies,
3118                                  CodeBuffer* buffer,
3119                                  int frame_size,
3120                                  OopMapSet* oop_maps,
3121                                  ExceptionHandlerTable* handler_table,
3122                                  ImplicitExceptionTable* nul_chk_table,
3123                                  AbstractCompiler* compiler,
3124                                  CompLevel comp_level,
3125                                  bool has_clinit_barriers,
3126                                  bool for_preload,
3127                                  bool has_unsafe_access,
3128                                  bool has_wide_vectors,
3129                                  bool has_monitors,
3130                                  bool has_scoped_access) {
3131 //  if (method->is_hidden()) {
3132 //    ResourceMark rm;
3133 //    log_info(scc, nmethod)("%d (L%d): Skip hidden method '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3134 //    return nullptr;
3135 //  }
3136   if (buffer->before_expand() != nullptr) {
3137     ResourceMark rm;
3138     log_info(scc, nmethod)("%d (L%d): Skip nmethod with expanded buffer '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3139     return nullptr;
3140   }
3141 #ifdef ASSERT
3142   LogStreamHandle(Debug, scc, nmethod) log;
3143   if (log.is_enabled()) {
3144     tty->print_cr(" == store_nmethod");
3145     FlagSetting fs(PrintRelocations, true);
3146     buffer->print_on(&log);
3147     buffer->decode();
3148   }
3149 #endif
3150   assert(!has_clinit_barriers || _gen_preload_code, "sanity");
3151   Method* m = method();
3152   bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)m); // herere
3153   InstanceKlass* holder = m->method_holder();
3154   bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class();
3155   bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
3156   if (!builtin_loader) {
3157     ResourceMark rm;
3158     log_info(scc, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
3159     return nullptr;
3160   }
3161   if (for_preload && !(method_in_cds && klass_in_cds)) {
3162     ResourceMark rm;
3163     log_info(scc, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3164     return nullptr;
3165   }
3166   assert(!for_preload || method_in_cds, "sanity");
3167   _for_preload = for_preload;
3168   _has_clinit_barriers = has_clinit_barriers;
3169 
3170   if (!align_write()) {
3171     return nullptr;
3172   }
3173   _compile_id = comp_id;
3174   _comp_level = (int)comp_level;
3175 
3176   uint entry_position = _write_position;
3177 
3178   uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count();
3179 
3180   // Is this one-step workflow assembly phase?
3181   // In this phase compilation is done based on saved profiling data
3182   // without application run. Ignore decompilation counters in such case.
3183   // Also ignore it for C1 code because it is decompiled unconditionally
3184   // when C2 generated code is published.
3185   bool ignore_decompile = (comp_level == CompLevel_limited_profile) ||
3186                           CDSConfig::is_dumping_final_static_archive();
3187 
3188   // Write name
3189   uint name_offset = 0;
3190   uint name_size   = 0;
3191   uint hash = 0;
3192   uint n;
3193   {
3194     ResourceMark rm;
3195     const char* name   = method->name_and_sig_as_C_string();
3196     log_info(scc, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to Startup Code Cache '%s'",
3197                            comp_id, (int)comp_level, name, comp_level, decomp,
3198                            (ignore_decompile ? ", ignore_decomp" : ""),
3199                            (has_clinit_barriers ? ", has clinit barriers" : ""), _cache_path);
3200 
3201     LogStreamHandle(Info, scc, loader) log;
3202     if (log.is_enabled()) {
3203       oop loader = holder->class_loader();
3204       oop domain = holder->protection_domain();
3205       log.print("Holder: ");
3206       holder->print_value_on(&log);
3207       log.print(" loader: ");
3208       if (loader == nullptr) {
3209         log.print("nullptr");
3210       } else {
3211         loader->print_value_on(&log);
3212       }
3213       log.print(" domain: ");
3214       if (domain == nullptr) {
3215         log.print("nullptr");
3216       } else {
3217         domain->print_value_on(&log);
3218       }
3219       log.cr();
3220     }
3221     name_offset = _write_position  - entry_position;
3222     name_size   = (uint)strlen(name) + 1; // Includes '/0'
3223     n = write_bytes(name, name_size);
3224     if (n != name_size) {
3225       return nullptr;
3226     }
3227     hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
3228   }
3229 
3230   if (!align_write()) {
3231     return nullptr;
3232   }
3233 
3234   uint code_offset = _write_position - entry_position;
3235 
3236   int flags = (has_scoped_access ? 0x8 : 0) |
3237               (has_unsafe_access ? 0x4 : 0) |
3238               (has_wide_vectors  ? 0x2 : 0) |
3239               (has_monitors      ? 0x1 : 0);
3240   n = write_bytes(&flags, sizeof(int));
3241   if (n != sizeof(int)) {
3242     return nullptr;
3243   }
3244 
3245   n = write_bytes(&orig_pc_offset, sizeof(int));
3246   if (n != sizeof(int)) {
3247     return nullptr;
3248   }
3249 
3250   n = write_bytes(&frame_size, sizeof(int));
3251   if (n != sizeof(int)) {
3252     return nullptr;
3253   }
3254 
3255   // Write offsets
3256   n = write_bytes(offsets, sizeof(CodeOffsets));
3257   if (n != sizeof(CodeOffsets)) {
3258     return nullptr;
3259   }
3260 
3261   // Write OopRecorder data
3262   if (!write_oops(buffer->oop_recorder())) {
3263     if (lookup_failed() && !failed()) {
3264       // Skip this method and reposition file
3265       set_write_position(entry_position);
3266     }
3267     return nullptr;
3268   }
3269   if (!write_metadata(buffer->oop_recorder())) {
3270     if (lookup_failed() && !failed()) {
3271       // Skip this method and reposition file
3272       set_write_position(entry_position);
3273     }
3274     return nullptr;
3275   }
3276 
3277   // Write Debug info
3278   if (!write_debug_info(recorder)) {
3279     return nullptr;
3280   }
3281   // Write Dependencies
3282   int dependencies_size = (int)dependencies->size_in_bytes();
3283   n = write_bytes(&dependencies_size, sizeof(int));
3284   if (n != sizeof(int)) {
3285     return nullptr;
3286   }
3287   if (!align_write()) {
3288     return nullptr;
3289   }
3290   n = write_bytes(dependencies->content_bytes(), dependencies_size);
3291   if (n != (uint)dependencies_size) {
3292     return nullptr;
3293   }
3294 
3295   // Write oop maps
3296   if (!write_oop_maps(oop_maps)) {
3297     return nullptr;
3298   }
3299 
3300   // Write exception handles
3301   int exc_table_length = handler_table->length();
3302   n = write_bytes(&exc_table_length, sizeof(int));
3303   if (n != sizeof(int)) {
3304     return nullptr;
3305   }
3306   uint exc_table_size = handler_table->size_in_bytes();
3307   n = write_bytes(handler_table->table(), exc_table_size);
3308   if (n != exc_table_size) {
3309     return nullptr;
3310   }
3311 
3312   // Write null check table
3313   int nul_chk_length = nul_chk_table->len();
3314   n = write_bytes(&nul_chk_length, sizeof(int));
3315   if (n != sizeof(int)) {
3316     return nullptr;
3317   }
3318   uint nul_chk_size = nul_chk_table->size_in_bytes();
3319   n = write_bytes(nul_chk_table->data(), nul_chk_size);
3320   if (n != nul_chk_size) {
3321     return nullptr;
3322   }
3323 
3324   // Write code section
3325   if (!align_write()) {
3326     return nullptr;
3327   }
3328   uint code_size = 0;
3329   if (!write_code(buffer, code_size)) {
3330     return nullptr;
3331   }
3332   // Write relocInfo array
3333   uint reloc_offset = _write_position - entry_position;
3334   uint reloc_size = 0;
3335   if (!write_relocations(buffer, reloc_size)) {
3336     if (lookup_failed() && !failed()) {
3337       // Skip this method and reposition file
3338       set_write_position(entry_position);
3339     }
3340     return nullptr;
3341   }
3342   uint entry_size = _write_position - entry_position;
3343 
3344   SCCEntry* entry = new (this) SCCEntry(entry_position, entry_size, name_offset, name_size,
3345                                         code_offset, code_size, reloc_offset, reloc_size,
3346                                         SCCEntry::Code, hash, (uint)comp_level, (uint)comp_id, decomp,
3347                                         has_clinit_barriers, _for_preload, ignore_decompile);
3348   if (method_in_cds) {
3349     entry->set_method(m);
3350   }
3351 #ifdef ASSERT
3352   if (has_clinit_barriers || _for_preload) {
3353     assert(for_preload, "sanity");
3354     assert(entry->method() != nullptr, "sanity");
3355   }
3356 #endif
3357   {
3358     ResourceMark rm;
3359     const char* name   = method->name_and_sig_as_C_string();
3360     log_info(scc, nmethod)("%d (L%d): Wrote nmethod '%s'%s to Startup Code Cache '%s'",
3361                            comp_id, (int)comp_level, name, (_for_preload ? " (for preload)" : ""), _cache_path);
3362   }
3363   if (VerifyCachedCode) {
3364     return nullptr;
3365   }
3366   return entry;
3367 }
3368 
3369 static void print_helper1(outputStream* st, const char* name, int count) {
3370   if (count > 0) {
3371     st->print(" %s=%d", name, count);
3372   }
3373 }
3374 static void print_helper(outputStream* st, const char* name, int stats[6+3][6], int idx) {
3375   int total = stats[idx][0];
3376   if (total > 0) {
3377     st->print("  %s:", name);
3378     print_helper1(st, "total",               stats[idx][0]);
3379     //print_helper1(st, "for_preload",         stats[idx][2]); // implied by Tier5
3380     print_helper1(st, "loaded",              stats[idx][3]);
3381     print_helper1(st, "invalidated",         stats[idx][4]);
3382     print_helper1(st, "failed",              stats[idx][5]);
3383     print_helper1(st, "has_clinit_barriers", stats[idx][1]);
3384     st->cr();
3385   }
3386 }
3387 
3388 void SCCache::print_statistics_on(outputStream* st) {
3389   SCCache* cache = open_for_read();
3390   if (cache != nullptr) {
3391     ReadingMark rdmk;
3392     if (rdmk.failed()) {
3393       // Cache is closed, cannot touch anything.
3394       return;
3395     }
3396 
3397     uint count = cache->_load_header->entries_count();
3398     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3399     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3400 
3401     int stats[6 + 3][6] = {0};
3402     for (uint i = 0; i < count; i++) {
3403       int index = search_entries[2*i + 1];
3404       SCCEntry* entry = &(load_entries[index]);
3405 
3406       int lvl = entry->kind();
3407       if (entry->kind() == SCCEntry::Code) {
3408         lvl += entry->comp_level() + (entry->for_preload() ? 1 : 0);
3409       }
3410       ++stats[lvl][0]; // total
3411       if (entry->has_clinit_barriers()) {
3412         ++stats[lvl][1];
3413       }
3414       if (entry->for_preload()) {
3415         ++stats[lvl][2];
3416       }
3417       if (entry->is_loaded()) {
3418         ++stats[lvl][3];
3419       }
3420       if (entry->not_entrant()) {
3421         ++stats[lvl][4];
3422       }
3423       if (entry->load_fail()) {
3424         ++stats[lvl][5];
3425       }
3426     }
3427 
3428     print_helper(st, "None", stats, SCCEntry::None);
3429     print_helper(st, "Stub", stats, SCCEntry::Stub);
3430     print_helper(st, "Blob", stats, SCCEntry::Blob);
3431     for (int lvl = 0; lvl <= CompLevel_full_optimization + 1; lvl++) {
3432       ResourceMark rm;
3433       stringStream ss;
3434       ss.print("SC T%d", lvl);
3435       print_helper(st, ss.freeze(), stats, SCCEntry::Code + lvl);
3436     }
3437 
3438   } else {
3439     st->print_cr("failed to open SCA at %s", CachedCodeFile);
3440   }
3441 }
3442 
3443 void SCCache::print_on(outputStream* st) {
3444   SCCache* cache = open_for_read();
3445   if (cache != nullptr) {
3446     ReadingMark rdmk;
3447     if (rdmk.failed()) {
3448       // Cache is closed, cannot touch anything.
3449       return;
3450     }
3451 
3452     uint count = cache->_load_header->entries_count();
3453     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3454     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3455 
3456     for (uint i = 0; i < count; i++) {
3457       int index = search_entries[2*i + 1];
3458       SCCEntry* entry = &(load_entries[index]);
3459 
3460       st->print_cr("%4u: %4u: K%u L%u offset=%u decompile=%u size=%u code_size=%u%s%s%s%s",
3461                 i, index, entry->kind(), entry->comp_level(), entry->offset(),
3462                 entry->decompile(), entry->size(), entry->code_size(),
3463                 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
3464                 entry->for_preload()         ? " for_preload"         : "",
3465                 entry->is_loaded()           ? " loaded"              : "",
3466                 entry->not_entrant()         ? " not_entrant"         : "");
3467       st->print_raw("         ");
3468       SCCReader reader(cache, entry, nullptr);
3469       reader.print_on(st);
3470     }
3471   } else {
3472     st->print_cr("failed to open SCA at %s", CachedCodeFile);
3473   }
3474 }
3475 
3476 void SCCache::print_unused_entries_on(outputStream* st) {
3477   LogStreamHandle(Info, scc, init) info;
3478   if (info.is_enabled()) {
3479     SCCache::iterate([&](SCCEntry* entry) {
3480       if (!entry->is_loaded()) {
3481         MethodTrainingData* mtd = MethodTrainingData::lookup_for(entry->method());
3482         if (mtd != nullptr) {
3483           if (mtd->has_holder()) {
3484             if (mtd->holder()->method_holder()->is_initialized()) {
3485               ResourceMark rm;
3486               mtd->iterate_all_compiles([&](CompileTrainingData* ctd) {
3487                 if ((uint)ctd->level() == entry->comp_level()) {
3488                   if (ctd->init_deps_left() == 0) {
3489                     nmethod* nm = mtd->holder()->code();
3490                     if (nm == nullptr) {
3491                       if (mtd->holder()->queued_for_compilation()) {
3492                         return; // scheduled for compilation
3493                       }
3494                     } else if ((uint)nm->comp_level() >= entry->comp_level()) {
3495                       return; // already online compiled and superseded by a more optimal method
3496                     }
3497                     info.print("SCC entry not loaded: ");
3498                     ctd->print_on(&info);
3499                     info.cr();
3500                   }
3501                 }
3502               });
3503             } else {
3504               // not yet initialized
3505             }
3506           } else {
3507             info.print("SCC entry doesn't have a holder: ");
3508             mtd->print_on(&info);
3509             info.cr();
3510           }
3511         }
3512       }
3513     });
3514   }
3515 }
3516 
3517 void SCCReader::print_on(outputStream* st) {
3518   uint entry_position = _entry->offset();
3519   set_read_position(entry_position);
3520 
3521   // Read name
3522   uint name_offset = entry_position + _entry->name_offset();
3523   uint name_size = _entry->name_size(); // Includes '/0'
3524   const char* name = addr(name_offset);
3525 
3526   st->print_cr("  name: %s", name);
3527 }
3528 
3529 #define _extrs_max 80
3530 #define _stubs_max 120
3531 #define _blobs_max 100
3532 #define _shared_blobs_max 24
3533 #define _C2_blobs_max 25
3534 #define _C1_blobs_max (_blobs_max - _shared_blobs_max - _C2_blobs_max)
3535 #define _all_max 300
3536 
3537 #define SET_ADDRESS(type, addr)                           \
3538   {                                                       \
3539     type##_addr[type##_length++] = (address) (addr);      \
3540     assert(type##_length <= type##_max, "increase size"); \
3541   }
3542 
3543 static bool initializing = false;
3544 void SCAddressTable::init() {
3545   if (_complete || initializing) return; // Done already
3546   initializing = true;
3547   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3548   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3549   _blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3550 
3551   // Divide _blobs_addr array to chunks because they could be initialized in parrallel
3552   _C2_blobs_addr = _blobs_addr + _shared_blobs_max;// C2 blobs addresses stored after shared blobs
3553   _C1_blobs_addr = _C2_blobs_addr + _C2_blobs_max; // C1 blobs addresses stored after C2 blobs
3554 
3555   _extrs_length = 0;
3556   _stubs_length = 0;
3557   _blobs_length = 0;       // for shared blobs
3558   _C1_blobs_length = 0;
3559   _C2_blobs_length = 0;
3560   _final_blobs_length = 0; // Depends on numnber of C1 blobs
3561 
3562   // Runtime methods
3563 #ifdef COMPILER2
3564   SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3565 #endif
3566 #ifdef COMPILER1
3567   SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3568   SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3569 #endif
3570 
3571   SET_ADDRESS(_extrs, CompressedOops::base_addr());
3572 #if INCLUDE_G1GC
3573   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3574   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3575 #endif
3576 
3577 #if INCLUDE_SHENANDOAHGC
3578   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3579   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3580   SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3581   SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3582   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3583   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3584   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3585   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3586   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3587   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3588 #endif
3589 
3590   SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3591   SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3592 #if defined(AMD64) && !defined(ZERO)
3593   SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3594   SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3595 #endif // AMD64
3596   SET_ADDRESS(_extrs, SharedRuntime::d2f);
3597   SET_ADDRESS(_extrs, SharedRuntime::d2i);
3598   SET_ADDRESS(_extrs, SharedRuntime::d2l);
3599   SET_ADDRESS(_extrs, SharedRuntime::dcos);
3600   SET_ADDRESS(_extrs, SharedRuntime::dexp);
3601   SET_ADDRESS(_extrs, SharedRuntime::dlog);
3602   SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3603   SET_ADDRESS(_extrs, SharedRuntime::dpow);
3604   SET_ADDRESS(_extrs, SharedRuntime::dsin);
3605   SET_ADDRESS(_extrs, SharedRuntime::dtan);
3606   SET_ADDRESS(_extrs, SharedRuntime::f2i);
3607   SET_ADDRESS(_extrs, SharedRuntime::f2l);
3608 #ifndef ZERO
3609   SET_ADDRESS(_extrs, SharedRuntime::drem);
3610   SET_ADDRESS(_extrs, SharedRuntime::frem);
3611 #endif
3612   SET_ADDRESS(_extrs, SharedRuntime::l2d);
3613   SET_ADDRESS(_extrs, SharedRuntime::l2f);
3614   SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3615   SET_ADDRESS(_extrs, SharedRuntime::lmul);
3616   SET_ADDRESS(_extrs, SharedRuntime::lrem);
3617 #if INCLUDE_JVMTI
3618   SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3619 #endif /* INCLUDE_JVMTI */
3620   BarrierSet* bs = BarrierSet::barrier_set();
3621   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3622     SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3623   }
3624   SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3625   SET_ADDRESS(_extrs, Thread::current);
3626 
3627   SET_ADDRESS(_extrs, os::javaTimeMillis);
3628   SET_ADDRESS(_extrs, os::javaTimeNanos);
3629 
3630 #if INCLUDE_JVMTI
3631   SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3632 #endif /* INCLUDE_JVMTI */
3633   SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3634 #ifndef PRODUCT
3635   SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3636   SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3637 #endif
3638 
3639 #ifndef ZERO
3640 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3641   SET_ADDRESS(_extrs, MacroAssembler::debug64);
3642 #endif
3643 #if defined(AMD64)
3644   SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3645 #endif
3646 #endif
3647 
3648 #ifdef COMPILER1
3649 #ifdef X86
3650   SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3651   SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3652   SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3653   SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3654 #endif
3655 #endif
3656 
3657   // addresses of fields in AOT runtime constants area
3658   address* p = AOTRuntimeConstants::field_addresses_list();
3659   while (*p != nullptr) {
3660     SET_ADDRESS(_extrs, *p++);
3661   }
3662   // Stubs
3663   SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3664   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3665 /*
3666   SET_ADDRESS(_stubs, StubRoutines::throw_AbstractMethodError_entry());
3667   SET_ADDRESS(_stubs, StubRoutines::throw_IncompatibleClassChangeError_entry());
3668   SET_ADDRESS(_stubs, StubRoutines::throw_NullPointerException_at_call_entry());
3669   SET_ADDRESS(_stubs, StubRoutines::throw_StackOverflowError_entry());
3670   SET_ADDRESS(_stubs, StubRoutines::throw_delayed_StackOverflowError_entry());
3671 */
3672   SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3673   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3674   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3675   SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3676   SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3677 
3678   SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3679   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3680   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3681 
3682   JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3683 
3684 
3685   SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3686   SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3687   SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3688   SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3689   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3690   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3691 
3692   SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3693   SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3694   SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3695   SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3696   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3697   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3698 
3699   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3700   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3701   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3702   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3703   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3704   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3705 
3706   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3707   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3708   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3709   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3710   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3711   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3712 
3713   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3714   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3715 
3716   SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3717   SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3718 
3719   SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3720   SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3721   SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3722   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3723   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3724   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3725 
3726   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3727   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3728 
3729   SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3730   SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3731   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3732   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3733   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3734   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3735   SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3736   SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3737   SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3738   SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3739   SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3740   SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3741   SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3742   SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3743   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3744   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3745   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3746   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3747   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3748   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3749   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3750   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3751 
3752   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3753 
3754   SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3755   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3756   SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3757 
3758   SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3759   SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3760   SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3761   SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3762   SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3763   SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3764   SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3765   SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3766 
3767   SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3768 
3769   SET_ADDRESS(_stubs, StubRoutines::dexp());
3770   SET_ADDRESS(_stubs, StubRoutines::dlog());
3771   SET_ADDRESS(_stubs, StubRoutines::dlog10());
3772   SET_ADDRESS(_stubs, StubRoutines::dpow());
3773   SET_ADDRESS(_stubs, StubRoutines::dsin());
3774   SET_ADDRESS(_stubs, StubRoutines::dcos());
3775   SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3776   SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3777   SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3778   SET_ADDRESS(_stubs, StubRoutines::dtan());
3779 
3780   SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3781   SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3782 
3783 #if defined(AMD64) && !defined(ZERO)
3784   SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3785   SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3786   SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3787   SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3788   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3789   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3790   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3791   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3792   SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3793   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3794   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3795   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3796   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3797   // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3798   // See C2_MacroAssembler::load_iota_indices().
3799   for (int i = 0; i < 6; i++) {
3800     SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3801   }
3802 #endif
3803 #if defined(AARCH64) && !defined(ZERO)
3804   SET_ADDRESS(_stubs, StubRoutines::aarch64::d2i_fixup());
3805   SET_ADDRESS(_stubs, StubRoutines::aarch64::f2i_fixup());
3806   SET_ADDRESS(_stubs, StubRoutines::aarch64::d2l_fixup());
3807   SET_ADDRESS(_stubs, StubRoutines::aarch64::f2l_fixup());
3808   SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_mask());
3809   SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_flip());
3810   SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_mask());
3811   SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_flip());
3812   SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3813   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3814   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3815   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3816   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3817   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3818   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3819   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3820   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3821   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3822   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3823   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3824   SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3825 
3826   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3827   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3828   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3829   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3830   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3831 #endif
3832 
3833   // Blobs
3834   SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub());
3835   SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub());
3836   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3837   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3838   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_static_call_stub());
3839   SET_ADDRESS(_blobs, SharedRuntime::deopt_blob()->entry_point());
3840   SET_ADDRESS(_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3841   SET_ADDRESS(_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3842 #ifdef COMPILER2
3843   SET_ADDRESS(_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3844 #endif
3845 
3846   SET_ADDRESS(_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3847   SET_ADDRESS(_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3848   SET_ADDRESS(_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3849   SET_ADDRESS(_blobs, SharedRuntime::throw_StackOverflowError_entry());
3850   SET_ADDRESS(_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3851 
3852   assert(_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _blobs_length);
3853   _final_blobs_length = _blobs_length;
3854   _complete = true;
3855   log_info(scc,init)("External addresses and stubs recorded");
3856 }
3857 
3858 void SCAddressTable::init_opto() {
3859 #ifdef COMPILER2
3860   // OptoRuntime Blobs
3861   SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3862   SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3863   SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3864   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3865   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3866   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3867   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3868   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3869   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3870   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3871   SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3872   SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3873   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3874   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3875   SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3876   SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3877   SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3878   SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3879 #if INCLUDE_JVMTI
3880   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3881   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3882   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3883   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3884 #endif /* INCLUDE_JVMTI */
3885 #endif
3886 
3887   assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3888   _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_length));
3889   _opto_complete = true;
3890   log_info(scc,init)("OptoRuntime Blobs recorded");
3891 }
3892 
3893 void SCAddressTable::init_c1() {
3894 #ifdef COMPILER1
3895   // Runtime1 Blobs
3896   for (int i = 0; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3897     C1StubId id = (C1StubId)i;
3898     if (Runtime1::blob_for(id) == nullptr) {
3899       log_info(scc, init)("C1 blob %s is missing", Runtime1::name_for(id));
3900       continue;
3901     }
3902     if (Runtime1::entry_for(id) == nullptr) {
3903       log_info(scc, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3904       continue;
3905     }
3906     address entry = Runtime1::entry_for(id);
3907     SET_ADDRESS(_C1_blobs, entry);
3908   }
3909 #if INCLUDE_G1GC
3910   if (UseG1GC) {
3911     G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3912     address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3913     SET_ADDRESS(_C1_blobs, entry);
3914     entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3915     SET_ADDRESS(_C1_blobs, entry);
3916   }
3917 #endif // INCLUDE_G1GC
3918 #if INCLUDE_ZGC
3919   if (UseZGC) {
3920     ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3921     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3922     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3923     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3924     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3925   }
3926 #endif // INCLUDE_ZGC
3927 #if INCLUDE_SHENANDOAHGC
3928   if (UseShenandoahGC) {
3929     ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3930     SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3931     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3932     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3933     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3934     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3935   }
3936 #endif // INCLUDE_SHENANDOAHGC
3937 #endif // COMPILER1
3938 
3939   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3940   _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_max + _C1_blobs_length));
3941   _c1_complete = true;
3942   log_info(scc,init)("Runtime1 Blobs recorded");
3943 }
3944 
3945 #undef SET_ADDRESS
3946 #undef _extrs_max
3947 #undef _stubs_max
3948 #undef _blobs_max
3949 #undef _shared_blobs_max
3950 #undef _C1_blobs_max
3951 #undef _C2_blobs_max
3952 
3953 SCAddressTable::~SCAddressTable() {
3954   if (_extrs_addr != nullptr) {
3955     FREE_C_HEAP_ARRAY(address, _extrs_addr);
3956   }
3957   if (_stubs_addr != nullptr) {
3958     FREE_C_HEAP_ARRAY(address, _stubs_addr);
3959   }
3960   if (_blobs_addr != nullptr) {
3961     FREE_C_HEAP_ARRAY(address, _blobs_addr);
3962   }
3963 }
3964 
3965 #define MAX_STR_COUNT 200
3966 static const char* _C_strings[MAX_STR_COUNT] = {nullptr};
3967 static int _C_strings_count = 0;
3968 static int _C_strings_s[MAX_STR_COUNT] = {0};
3969 static int _C_strings_id[MAX_STR_COUNT] = {0};
3970 static int _C_strings_len[MAX_STR_COUNT] = {0};
3971 static int _C_strings_hash[MAX_STR_COUNT] = {0};
3972 static int _C_strings_used = 0;
3973 
3974 void SCCache::load_strings() {
3975   uint strings_count  = _load_header->strings_count();
3976   if (strings_count == 0) {
3977     return;
3978   }
3979   uint strings_offset = _load_header->strings_offset();
3980   uint strings_size   = _load_header->entries_offset() - strings_offset;
3981   uint data_size = (uint)(strings_count * sizeof(uint));
3982   uint* sizes = (uint*)addr(strings_offset);
3983   uint* hashs = (uint*)addr(strings_offset + data_size);
3984   strings_size -= 2 * data_size;
3985   // We have to keep cached strings longer than _cache buffer
3986   // because they are refernced from compiled code which may
3987   // still be executed on VM exit after _cache is freed.
3988   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3989   memcpy(p, addr(strings_offset + 2 * data_size), strings_size);
3990   _C_strings_buf = p;
3991   assert(strings_count <= MAX_STR_COUNT, "sanity");
3992   for (uint i = 0; i < strings_count; i++) {
3993     _C_strings[i] = p;
3994     uint len = sizes[i];
3995     _C_strings_s[i] = i;
3996     _C_strings_id[i] = i;
3997     _C_strings_len[i] = len;
3998     _C_strings_hash[i] = hashs[i];
3999     p += len;
4000   }
4001   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
4002   _C_strings_count = strings_count;
4003   _C_strings_used  = strings_count;
4004   log_info(scc, init)("Load %d C strings at offset %d from Startup Code Cache '%s'", _C_strings_count, strings_offset, _cache_path);
4005 }
4006 
4007 int SCCache::store_strings() {
4008   uint offset = _write_position;
4009   uint length = 0;
4010   if (_C_strings_used > 0) {
4011     // Write sizes first
4012     for (int i = 0; i < _C_strings_used; i++) {
4013       uint len = _C_strings_len[i] + 1; // Include 0
4014       length += len;
4015       assert(len < 1000, "big string: %s", _C_strings[i]);
4016       uint n = write_bytes(&len, sizeof(uint));
4017       if (n != sizeof(uint)) {
4018         return -1;
4019       }
4020     }
4021     // Write hashs
4022     for (int i = 0; i < _C_strings_used; i++) {
4023       uint n = write_bytes(&(_C_strings_hash[i]), sizeof(uint));
4024       if (n != sizeof(uint)) {
4025         return -1;
4026       }
4027     }
4028     for (int i = 0; i < _C_strings_used; i++) {
4029       uint len = _C_strings_len[i] + 1; // Include 0
4030       uint n = write_bytes(_C_strings[_C_strings_s[i]], len);
4031       if (n != len) {
4032         return -1;
4033       }
4034     }
4035     log_info(scc, exit)("Wrote %d C strings of total length %d at offset %d to Startup Code Cache '%s'",
4036                         _C_strings_used, length, offset, _cache_path);
4037   }
4038   return _C_strings_used;
4039 }
4040 
4041 void SCCache::add_new_C_string(const char* str) {
4042   assert(for_write(), "only when storing code");
4043   _table->add_C_string(str);
4044 }
4045 
4046 void SCAddressTable::add_C_string(const char* str) {
4047   if (str != nullptr && _complete && (_opto_complete || _c1_complete)) {
4048     // Check previous strings address
4049     for (int i = 0; i < _C_strings_count; i++) {
4050       if (_C_strings[i] == str) {
4051         return; // Found existing one
4052       }
4053     }
4054     // Add new one
4055     if (_C_strings_count < MAX_STR_COUNT) {
4056       log_trace(scc)("add_C_string: [%d] " INTPTR_FORMAT " %s", _C_strings_count, p2i(str), str);
4057       _C_strings_id[_C_strings_count] = -1; // Init
4058       _C_strings[_C_strings_count++] = str;
4059     } else {
4060       if (Thread::current()->is_Compiler_thread()) {
4061         CompileTask* task = ciEnv::current()->task();
4062         log_info(scc)("%d (L%d): Number of C strings > max %d %s",
4063                       task->compile_id(), task->comp_level(), MAX_STR_COUNT, str);
4064       }
4065     }
4066   }
4067 }
4068 
4069 int SCAddressTable::id_for_C_string(address str) {
4070   for (int i = 0; i < _C_strings_count; i++) {
4071     if (_C_strings[i] == (const char*)str) { // found
4072       int id = _C_strings_id[i];
4073       if (id >= 0) {
4074         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
4075         return id; // Found recorded
4076       }
4077       // Search for the same string content
4078       int len = (int)strlen((const char*)str);
4079       int hash = java_lang_String::hash_code((const jbyte*)str, len);
4080       for (int j = 0; j < _C_strings_used; j++) {
4081         if ((_C_strings_len[j] == len) && (_C_strings_hash[j] == hash)) {
4082           _C_strings_id[i] = j; // Found match
4083           return j;
4084         }
4085       }
4086       // Not found in recorded, add new
4087       id = _C_strings_used++;
4088       _C_strings_s[id] = i;
4089       _C_strings_id[i] = id;
4090       _C_strings_len[id] = len;
4091       _C_strings_hash[id] = hash;
4092       return id;
4093     }
4094   }
4095   return -1;
4096 }
4097 
4098 address SCAddressTable::address_for_C_string(int idx) {
4099   assert(idx < _C_strings_count, "sanity");
4100   return (address)_C_strings[idx];
4101 }
4102 
4103 int search_address(address addr, address* table, uint length) {
4104   for (int i = 0; i < (int)length; i++) {
4105     if (table[i] == addr) {
4106       return i;
4107     }
4108   }
4109   return -1;
4110 }
4111 
4112 address SCAddressTable::address_for_id(int idx) {
4113   if (!_complete) {
4114     fatal("SCA table is not complete");
4115   }
4116   if (idx == -1) {
4117     return (address)-1;
4118   }
4119   uint id = (uint)idx;
4120   if (id >= _all_max && idx < (_all_max + _C_strings_count)) {
4121     return address_for_C_string(idx - _all_max);
4122   }
4123   if (idx < 0 || id == (_extrs_length + _stubs_length + _final_blobs_length)) {
4124     fatal("Incorrect id %d for SCA table", id);
4125   }
4126   if (idx > (_all_max + _C_strings_count)) {
4127     return (address)os::init + idx;
4128   }
4129   if (id < _extrs_length) {
4130     return _extrs_addr[id];
4131   }
4132   id -= _extrs_length;
4133   if (id < _stubs_length) {
4134     return _stubs_addr[id];
4135   }
4136   id -= _stubs_length;
4137   if (id < _final_blobs_length) {
4138     return _blobs_addr[id];
4139   }
4140   return nullptr;
4141 }
4142 
4143 int SCAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer) {
4144   int id = -1;
4145   if (addr == (address)-1) { // Static call stub has jump to itself
4146     return id;
4147   }
4148   if (!_complete) {
4149     fatal("SCA table is not complete");
4150   }
4151   // Seach for C string
4152   id = id_for_C_string(addr);
4153   if (id >=0) {
4154     return id + _all_max;
4155   }
4156   if (StubRoutines::contains(addr)) {
4157     // Search in stubs
4158     id = search_address(addr, _stubs_addr, _stubs_length);
4159     if (id < 0) {
4160       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
4161       if (desc == nullptr) {
4162         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
4163       }
4164       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
4165       fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in SCA table", p2i(addr), sub_name);
4166     } else {
4167       id += _extrs_length;
4168     }
4169   } else {
4170     CodeBlob* cb = CodeCache::find_blob(addr);
4171     if (cb != nullptr) {
4172       // Search in code blobs
4173       id = search_address(addr, _blobs_addr, _final_blobs_length);
4174       if (id < 0) {
4175         fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in SCA table", p2i(addr), cb->name());
4176       } else {
4177         id += _extrs_length + _stubs_length;
4178       }
4179     } else {
4180       // Search in runtime functions
4181       id = search_address(addr, _extrs_addr, _extrs_length);
4182       if (id < 0) {
4183         ResourceMark rm;
4184         const int buflen = 1024;
4185         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
4186         int offset = 0;
4187         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
4188           if (offset > 0) {
4189             // Could be address of C string
4190             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
4191             CompileTask* task = ciEnv::current()->task();
4192             uint compile_id = 0;
4193             uint comp_level =0;
4194             if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
4195               compile_id = task->compile_id();
4196               comp_level = task->comp_level();
4197             }
4198             log_info(scc)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in SCA table",
4199                           compile_id, comp_level, p2i(addr), dist, (const char*)addr);
4200             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
4201             return dist;
4202           }
4203           fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in SCA table", p2i(addr), func_name, offset);
4204         } else {
4205           os::print_location(tty, p2i(addr), true);
4206           reloc.print_current_on(tty);
4207 #ifndef PRODUCT
4208           buffer->print_on(tty);
4209           buffer->decode();
4210 #endif // !PRODUCT
4211           fatal("Address " INTPTR_FORMAT " for <unknown> is missing in SCA table", p2i(addr));
4212         }
4213       }
4214     }
4215   }
4216   return id;
4217 }
4218 
4219 void AOTRuntimeConstants::initialize_from_runtime() {
4220   BarrierSet* bs = BarrierSet::barrier_set();
4221   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4222     CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
4223     _aot_runtime_constants._grain_shift = ctbs->grain_shift();
4224     _aot_runtime_constants._card_shift = ctbs->card_shift();
4225   }
4226 }
4227 
4228 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
4229 
4230 address AOTRuntimeConstants::_field_addresses_list[] = {
4231   grain_shift_address(),
4232   card_shift_address(),
4233   nullptr
4234 };
4235 
4236 
4237 void SCCache::wait_for_no_nmethod_readers() {
4238   while (true) {
4239     int cur = Atomic::load(&_nmethod_readers);
4240     int upd = -(cur + 1);
4241     if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
4242       // Success, no new readers should appear.
4243       break;
4244     }
4245   }
4246 
4247   // Now wait for all readers to leave.
4248   SpinYield w;
4249   while (Atomic::load(&_nmethod_readers) != -1) {
4250     w.wait();
4251   }
4252 }
4253 
4254 SCCache::ReadingMark::ReadingMark() {
4255   while (true) {
4256     int cur = Atomic::load(&_nmethod_readers);
4257     if (cur < 0) {
4258       // Cache is already closed, cannot proceed.
4259       _failed = true;
4260       return;
4261     }
4262     if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4263       // Successfully recorded ourselves as entered.
4264       _failed = false;
4265       return;
4266     }
4267   }
4268 }
4269 
4270 SCCache::ReadingMark::~ReadingMark() {
4271   if (_failed) {
4272     return;
4273   }
4274   while (true) {
4275     int cur = Atomic::load(&_nmethod_readers);
4276     if (cur > 0) {
4277       // Cache is open, we are counting down towards 0.
4278       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
4279         return;
4280       }
4281     } else {
4282       // Cache is closed, we are counting up towards -1.
4283       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4284         return;
4285       }
4286     }
4287   }
4288 }