1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "cds/cdsAccess.hpp"
  28 #include "cds/cdsConfig.hpp"
  29 #include "cds/heapShared.hpp"
  30 #include "cds/metaspaceShared.hpp"
  31 #include "ci/ciConstant.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "ci/ciField.hpp"
  34 #include "ci/ciMethod.hpp"
  35 #include "ci/ciMethodData.hpp"
  36 #include "ci/ciObject.hpp"
  37 #include "ci/ciUtilities.inline.hpp"
  38 #include "classfile/javaAssertions.hpp"
  39 #include "classfile/stringTable.hpp"
  40 #include "classfile/symbolTable.hpp"
  41 #include "classfile/systemDictionary.hpp"
  42 #include "classfile/vmClasses.hpp"
  43 #include "classfile/vmIntrinsics.hpp"
  44 #include "code/codeBlob.hpp"
  45 #include "code/codeCache.hpp"
  46 #include "code/oopRecorder.inline.hpp"
  47 #include "code/SCCache.hpp"
  48 #include "compiler/abstractCompiler.hpp"
  49 #include "compiler/compilationPolicy.hpp"
  50 #include "compiler/compileBroker.hpp"
  51 #include "compiler/compileTask.hpp"
  52 #include "gc/g1/g1BarrierSetRuntime.hpp"
  53 #include "gc/shared/gcConfig.hpp"
  54 #include "logging/log.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/klass.inline.hpp"
  57 #include "oops/method.inline.hpp"
  58 #include "oops/trainingData.hpp"
  59 #include "prims/jvmtiThreadState.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/flags/flagSetting.hpp"
  62 #include "runtime/globals_extension.hpp"
  63 #include "runtime/handles.inline.hpp"
  64 #include "runtime/java.hpp"
  65 #include "runtime/jniHandles.inline.hpp"
  66 #include "runtime/os.inline.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stubCodeGenerator.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/timerTrace.hpp"
  71 #include "runtime/threadIdentifier.hpp"
  72 #include "utilities/ostream.hpp"
  73 #include "utilities/spinYield.hpp"
  74 #ifdef COMPILER1
  75 #include "c1/c1_Runtime1.hpp"
  76 #include "c1/c1_LIRAssembler.hpp"
  77 #include "gc/shared/c1/barrierSetC1.hpp"
  78 #include "gc/g1/c1/g1BarrierSetC1.hpp"
  79 #if INCLUDE_SHENANDOAHGC
  80 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  81 #endif
  82 #include "gc/z/c1/zBarrierSetC1.hpp"
  83 #endif
  84 #ifdef COMPILER2
  85 #include "opto/runtime.hpp"
  86 #endif
  87 #if INCLUDE_JVMCI
  88 #include "jvmci/jvmci.hpp"
  89 #endif
  90 #if INCLUDE_SHENANDOAHGC
  91 #include "gc/shenandoah/shenandoahRuntime.hpp"
  92 #endif
  93 
  94 #include <sys/stat.h>
  95 #include <errno.h>
  96 
  97 #ifndef O_BINARY       // if defined (Win32) use binary files.
  98 #define O_BINARY 0     // otherwise do nothing.
  99 #endif
 100 
 101 static elapsedTimer _t_totalLoad;
 102 static elapsedTimer _t_totalRegister;
 103 static elapsedTimer _t_totalFind;
 104 static elapsedTimer _t_totalStore;
 105 
 106 SCCache* SCCache::_cache = nullptr;
 107 
 108 static bool enable_timers() {
 109   return CITime || log_is_enabled(Info, init);
 110 }
 111 
 112 static void exit_vm_on_load_failure() {
 113   // Treat SCC warnings as error when RequireSharedSpaces is on.
 114   if (RequireSharedSpaces) {
 115     vm_exit_during_initialization("Unable to used startup cached code.", nullptr);
 116   }
 117 }
 118 
 119 static void exit_vm_on_store_failure() {
 120   // Treat SCC warnings as error when RequireSharedSpaces is on.
 121   if (RequireSharedSpaces) {
 122     tty->print_cr("Unable to create startup cached code.");
 123     // Failure during AOT code caching, we don't want to dump core
 124     vm_abort(false);
 125   }
 126 }
 127 void SCCache::initialize() {
 128   if (LoadCachedCode && !UseSharedSpaces) {
 129     return;
 130   }
 131   if (StoreCachedCode || LoadCachedCode) {
 132     if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
 133       FLAG_SET_DEFAULT(ClassInitBarrierMode, 1);
 134     }
 135   } else if (ClassInitBarrierMode > 0) {
 136     log_info(scc, init)("Set ClassInitBarrierMode to 0 because StoreCachedCode and LoadCachedCode are false.");
 137     FLAG_SET_DEFAULT(ClassInitBarrierMode, 0);
 138   }
 139   if ((LoadCachedCode || StoreCachedCode) && CachedCodeFile != nullptr) {
 140     const int len = (int)strlen(CachedCodeFile);
 141     // cache file path
 142     char* path  = NEW_C_HEAP_ARRAY(char, len+1, mtCode);
 143     memcpy(path, CachedCodeFile, len);
 144     path[len] = '\0';
 145     if (!open_cache(path)) {
 146       exit_vm_on_load_failure();
 147       return;
 148     }
 149     if (StoreCachedCode) {
 150       FLAG_SET_DEFAULT(FoldStableValues, false);
 151       FLAG_SET_DEFAULT(ForceUnreachable, true);
 152     }
 153     FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
 154   }
 155 }
 156 
 157 void SCCache::init2() {
 158   if (!is_on()) {
 159     return;
 160   }
 161   // After Universe initialized
 162   BarrierSet* bs = BarrierSet::barrier_set();
 163   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
 164     address byte_map_base = ci_card_table_address_as<address>();
 165     if (is_on_for_write() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
 166       // Bail out since we can't encode card table base address with relocation
 167       log_warning(scc, init)("Can't create Startup Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
 168       close();
 169       exit_vm_on_load_failure();
 170     }
 171   }
 172   // initialize aot runtime constants as appropriate to this runtime
 173   AOTRuntimeConstants::initialize_from_runtime();
 174 
 175   if (!verify_vm_config()) {
 176     close();
 177     exit_vm_on_load_failure();
 178   }
 179 }
 180 
 181 void SCCache::print_timers_on(outputStream* st) {
 182   if (LoadCachedCode) {
 183     st->print_cr ("    SC Load Time:         %7.3f s", _t_totalLoad.seconds());
 184     st->print_cr ("      nmethod register:     %7.3f s", _t_totalRegister.seconds());
 185     st->print_cr ("      find cached code:     %7.3f s", _t_totalFind.seconds());
 186   }
 187   if (StoreCachedCode) {
 188     st->print_cr ("    SC Store Time:        %7.3f s", _t_totalStore.seconds());
 189   }
 190 }
 191 
 192 bool SCCache::is_C3_on() {
 193 #if INCLUDE_JVMCI
 194   if (UseJVMCICompiler) {
 195     return (StoreCachedCode || LoadCachedCode) && UseC2asC3;
 196   }
 197 #endif
 198   return false;
 199 }
 200 
 201 bool SCCache::is_code_load_thread_on() {
 202   return UseCodeLoadThread && LoadCachedCode;
 203 }
 204 
 205 bool SCCache::gen_preload_code(ciMethod* m, int entry_bci) {
 206   VM_ENTRY_MARK;
 207   return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
 208          CDSAccess::can_generate_cached_code(m->get_Method());
 209 }
 210 
 211 static void print_helper(nmethod* nm, outputStream* st) {
 212   SCCache::iterate([&](SCCEntry* e) {
 213     if (e->method() == nm->method()) {
 214       ResourceMark rm;
 215       stringStream ss;
 216       ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
 217       if (e->decompile() > 0) {
 218         ss.print("+D%d", e->decompile());
 219       }
 220       ss.print("[%s%s%s]",
 221                (e->is_loaded()   ? "L" : ""),
 222                (e->load_fail()   ? "F" : ""),
 223                (e->not_entrant() ? "I" : ""));
 224       ss.print("#%d", e->comp_id());
 225 
 226       st->print(" %s", ss.freeze());
 227     }
 228   });
 229 }
 230 
 231 void SCCache::close() {
 232   if (is_on()) {
 233     if (SCCache::is_on_for_read()) {
 234       LogStreamHandle(Info, init) log;
 235       if (log.is_enabled()) {
 236         log.print_cr("Startup Code Cache statistics (when closed): ");
 237         SCCache::print_statistics_on(&log);
 238         log.cr();
 239         SCCache::print_timers_on(&log);
 240 
 241         LogStreamHandle(Info, scc, init) log1;
 242         if (log1.is_enabled()) {
 243           SCCache::print_unused_entries_on(&log1);
 244         }
 245 
 246         LogStreamHandle(Info, scc, codecache) info_scc;
 247         if (info_scc.is_enabled()) {
 248           NMethodIterator iter(NMethodIterator::all);
 249           while (iter.next()) {
 250             nmethod* nm = iter.method();
 251             if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
 252               info_scc.print("%5d:%c%c%c%d:", nm->compile_id(),
 253                              (nm->method()->is_shared() ? 'S' : ' '),
 254                              (nm->is_scc() ? 'A' : ' '),
 255                              (nm->preloaded() ? 'P' : ' '),
 256                              nm->comp_level());
 257               print_helper(nm, &info_scc);
 258               info_scc.print(": ");
 259               CompileTask::print(&info_scc, nm, nullptr, true /*short_form*/);
 260 
 261               LogStreamHandle(Debug, scc, codecache) debug_scc;
 262               if (debug_scc.is_enabled()) {
 263                 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
 264                 if (mtd != nullptr) {
 265                   mtd->iterate_all_compiles([&](CompileTrainingData* ctd) {
 266                     debug_scc.print("     CTD: "); ctd->print_on(&debug_scc); debug_scc.cr();
 267                   });
 268                 }
 269               }
 270             }
 271           }
 272         }
 273       }
 274     }
 275 
 276     delete _cache; // Free memory
 277     _cache = nullptr;
 278   }
 279 }
 280 
 281 void SCCache::invalidate(SCCEntry* entry) {
 282   // This could be concurent execution
 283   if (entry != nullptr && is_on()) { // Request could come after cache is closed.
 284     _cache->invalidate_entry(entry);
 285   }
 286 }
 287 
 288 bool SCCache::is_loaded(SCCEntry* entry) {
 289   if (is_on() && _cache->cache_buffer() != nullptr) {
 290     return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
 291   }
 292   return false;
 293 }
 294 
 295 void SCCache::preload_code(JavaThread* thread) {
 296   if ((ClassInitBarrierMode == 0) || !is_on_for_read()) {
 297     return;
 298   }
 299   if ((DisableCachedCode & (1 << 3)) != 0) {
 300     return; // no preloaded code (level 5);
 301   }
 302   _cache->preload_startup_code(thread);
 303 }
 304 
 305 SCCEntry* SCCache::find_code_entry(const methodHandle& method, uint comp_level) {
 306   switch (comp_level) {
 307     case CompLevel_simple:
 308       if ((DisableCachedCode & (1 << 0)) != 0) {
 309         return nullptr;
 310       }
 311       break;
 312     case CompLevel_limited_profile:
 313       if ((DisableCachedCode & (1 << 1)) != 0) {
 314         return nullptr;
 315       }
 316       break;
 317     case CompLevel_full_optimization:
 318       if ((DisableCachedCode & (1 << 2)) != 0) {
 319         return nullptr;
 320       }
 321       break;
 322 
 323     default: return nullptr; // Level 1, 2, and 4 only
 324   }
 325   TraceTime t1("SC total find code time", &_t_totalFind, enable_timers(), false);
 326   if (is_on() && _cache->cache_buffer() != nullptr) {
 327     MethodData* md = method->method_data();
 328     uint decomp = (md == nullptr) ? 0 : md->decompile_count();
 329 
 330     ResourceMark rm;
 331     const char* target_name = method->name_and_sig_as_C_string();
 332     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
 333     SCCEntry* entry = _cache->find_entry(SCCEntry::Code, hash, comp_level, decomp);
 334     if (entry == nullptr) {
 335       log_info(scc, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash);
 336 #ifdef ASSERT
 337     } else {
 338       uint name_offset = entry->offset() + entry->name_offset();
 339       uint name_size   = entry->name_size(); // Includes '/0'
 340       const char* name = _cache->cache_buffer() + name_offset;
 341       if (strncmp(target_name, name, name_size) != 0) {
 342         assert(false, "SCA: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
 343       }
 344 #endif
 345     }
 346 
 347     DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
 348     if (directives->IgnorePrecompiledOption) {
 349       LogStreamHandle(Info, scc, compilation) log;
 350       if (log.is_enabled()) {
 351         log.print("Ignore cached code entry on level %d for ", comp_level);
 352         method->print_value_on(&log);
 353       }
 354       return nullptr;
 355     }
 356 
 357     return entry;
 358   }
 359   return nullptr;
 360 }
 361 
 362 void SCCache::add_C_string(const char* str) {
 363   if (is_on_for_write()) {
 364     _cache->add_new_C_string(str);
 365   }
 366 }
 367 
 368 bool SCCache::allow_const_field(ciConstant& value) {
 369   return !is_on() || !StoreCachedCode // Restrict only when we generate cache
 370         // Can not trust primitive too   || !is_reference_type(value.basic_type())
 371         // May disable this too for now  || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
 372         ;
 373 }
 374 
 375 bool SCCache::open_cache(const char* cache_path) {
 376   if (LoadCachedCode) {
 377     log_info(scc)("Trying to load Startup Code Cache '%s'", cache_path);
 378     struct stat st;
 379     if (os::stat(cache_path, &st) != 0) {
 380       log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path);
 381       return false;
 382     } else if ((st.st_mode & S_IFMT) != S_IFREG) {
 383       log_warning(scc, init)("Specified Startup Code Cache is not file '%s'", cache_path);
 384       return false;
 385     }
 386     int fd = os::open(cache_path, O_RDONLY | O_BINARY, 0);
 387     if (fd < 0) {
 388       if (errno == ENOENT) {
 389         log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path);
 390       } else {
 391         log_warning(scc, init)("Failed to open Startup Code Cache file '%s': (%s)", cache_path, os::strerror(errno));
 392       }
 393       return false;
 394     } else {
 395       log_info(scc, init)("Opened for read Startup Code Cache '%s'", cache_path);
 396     }
 397     SCCache* cache = new SCCache(cache_path, fd, (uint)st.st_size);
 398     bool failed = cache->failed();
 399     if (::close(fd) < 0) {
 400       log_warning(scc)("Failed to close for read Startup Code Cache file '%s'", cache_path);
 401       failed = true;
 402     }
 403     if (failed) {
 404       delete cache;
 405       _cache = nullptr;
 406       return false;
 407     }
 408     _cache = cache;
 409   }
 410   if (_cache == nullptr && StoreCachedCode) {
 411     SCCache* cache = new SCCache(cache_path, -1 /* fd */, 0 /* size */);
 412     if (cache->failed()) {
 413       delete cache;
 414       _cache = nullptr;
 415       return false;
 416     }
 417     _cache = cache;
 418   }
 419   return true;
 420 }
 421 
 422 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
 423 public:
 424   int _some_number;
 425   InstanceKlass* _some_klass;
 426   size_t _my_data_length;
 427   void* _my_data;
 428 };
 429 
 430 // Skeleton code for including cached code in CDS:
 431 //
 432 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
 433 //     E.g., you can build a hashtable to record what methods have been archived.
 434 //
 435 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
 436 //     allocated using CDSAccess::allocate_from_code_cache().
 437 //
 438 // [3] CachedCodeDirectory must be the very first allocation.
 439 //
 440 // [4] Two kinds of pointer can be stored:
 441 //     - A pointer p that points to metadata. CDSAccess::can_generate_cached_code(p) must return true.
 442 //     - A pointer to a buffer returned by CDSAccess::allocate_from_code_cache().
 443 //       (It's OK to point to an interior location within this buffer).
 444 //     Such pointers must be stored using CDSAccess::set_pointer()
 445 //
 446 // The buffers allocated by CDSAccess::allocate_from_code_cache() are in a contiguous region. At runtime, this
 447 // region is mapped to the beginning of the CodeCache (see _cds_code_space in codeCache.cpp). All the pointers
 448 // in this buffer are relocated as necessary (e.g., to account for the runtime location of the CodeCache).
 449 //
 450 // Example:
 451 //
 452 // # make sure hw.cds doesn't exist, so that it's regenerated (1.5 step training)
 453 // $ rm -f hw.cds; java -Xlog:cds,scc::uptime,tags,pid -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld
 454 //
 455 // # After training is finish, hw.cds should contain a CachedCodeDirectory. You can see the effect of relocation
 456 // # from the [scc] log.
 457 // $ java -Xlog:cds,scc -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld
 458 // [0.016s][info][scc] new workflow: cached code mapped at 0x7fef97ebc000
 459 // [0.016s][info][scc] _cached_code_directory->_some_klass     = 0x800009ca8 (java.lang.String)
 460 // [0.016s][info][scc] _cached_code_directory->_some_number    = 0
 461 // [0.016s][info][scc] _cached_code_directory->_my_data_length = 0
 462 // [0.016s][info][scc] _cached_code_directory->_my_data        = 0x7fef97ebc020 (32 bytes offset from base)
 463 //
 464 // The 1.5 step training may be hard to debug. If you want to run in a debugger, run the above training step
 465 // with an additional "-XX:+CDSManualFinalImage" command-line argument.
 466 
 467 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
 468 static CachedCodeDirectory* _cached_code_directory = nullptr;
 469 
 470 #if INCLUDE_CDS_JAVA_HEAP
 471 void SCCache::new_workflow_start_writing_cache() {
 472   CachedCodeDirectory* dir = (CachedCodeDirectory*)CDSAccess::allocate_from_code_cache(sizeof(CachedCodeDirectory));
 473   _cached_code_directory = dir;
 474 
 475   CDSAccess::set_pointer(&dir->_some_klass, vmClasses::String_klass());
 476 
 477   size_t n = 120;
 478   void* d = (void*)CDSAccess::allocate_from_code_cache(n);
 479   CDSAccess::set_pointer(&dir->_my_data, d);
 480 }
 481 
 482 void SCCache::new_workflow_end_writing_cache() {
 483   _cached_code_directory->dumptime_init_internal();
 484 }
 485 
 486 void SCCache::new_workflow_load_cache() {
 487   void* ptr = CodeCache::map_cached_code();
 488   if (ptr != nullptr) {
 489     ResourceMark rm;
 490     _cached_code_directory = (CachedCodeDirectory*)ptr;
 491 
 492     // CDS uses this to implement CDSAccess::get_archived_object(k)
 493     _cached_code_directory->runtime_init_internal();
 494 
 495     // At this point:
 496     // - CodeCache::initialize_heaps() has finished.
 497     // - CDS archive is fully mapped ("metadata", "heap" and "cached_code" regions are mapped)
 498     // - All pointers in the mapped CDS regions are relocated.
 499     // - CDSAccess::get_archived_object() works.
 500 
 501     // Data used by AOT compiler
 502     InstanceKlass* k = _cached_code_directory->_some_klass;
 503     log_info(scc)("new workflow: cached code mapped at %p", ptr);
 504     log_info(scc)("_cached_code_directory->_some_klass     = %p (%s)", k, k->external_name());
 505     log_info(scc)("_cached_code_directory->_some_number    = %d", _cached_code_directory->_some_number);
 506     log_info(scc)("_cached_code_directory->_my_data_length = %zu", _cached_code_directory->_my_data_length);
 507     log_info(scc)("_cached_code_directory->_my_data        = %p (%zu bytes offset from base)", _cached_code_directory->_my_data,
 508                   pointer_delta((address)_cached_code_directory->_my_data, (address)_cached_code_directory, 1));
 509   }
 510 }
 511 #endif // INCLUDE_CDS_JAVA_HEAP
 512 
 513 #define DATA_ALIGNMENT HeapWordSize
 514 
 515 SCCache::SCCache(const char* cache_path, int fd, uint load_size) {
 516   _load_header = nullptr;
 517   _cache_path = cache_path;
 518   _for_read  = LoadCachedCode;
 519   _for_write = StoreCachedCode;
 520   _load_size = load_size;
 521   _store_size = 0;
 522   _write_position = 0;
 523   _closing  = false;
 524   _failed = false;
 525   _lookup_failed = false;
 526   _table = nullptr;
 527   _load_entries = nullptr;
 528   _store_entries  = nullptr;
 529   _C_strings_buf  = nullptr;
 530   _load_buffer = nullptr;
 531   _store_buffer = nullptr;
 532   _C_load_buffer = nullptr;
 533   _C_store_buffer = nullptr;
 534   _store_entries_cnt = 0;
 535   _gen_preload_code = false;
 536   _for_preload = false;       // changed while storing entry data
 537   _has_clinit_barriers = false;
 538 
 539   _compile_id = 0;
 540   _comp_level = 0;
 541 
 542   _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false;
 543 
 544   // Read header at the begining of cache
 545   uint header_size = sizeof(SCCHeader);
 546   if (_for_read) {
 547     // Read cache
 548     _C_load_buffer = NEW_C_HEAP_ARRAY(char, load_size + DATA_ALIGNMENT, mtCode);
 549     _load_buffer = align_up(_C_load_buffer, DATA_ALIGNMENT);
 550     uint n = (uint)::read(fd, _load_buffer, load_size);
 551     if (n != load_size) {
 552       log_warning(scc, init)("Failed to read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache file '%s'", load_size, p2i(_load_buffer), _cache_path);
 553       set_failed();
 554       return;
 555     }
 556     log_info(scc, init)("Read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache '%s'", load_size, p2i(_load_buffer), _cache_path);
 557 
 558     _load_header = (SCCHeader*)addr(0);
 559     const char* scc_jvm_version = addr(_load_header->jvm_version_offset());
 560     if (strncmp(scc_jvm_version, VM_Version::internal_vm_info_string(), strlen(scc_jvm_version)) != 0) {
 561       log_warning(scc, init)("Disable Startup Code Cache: JVM version '%s' recorded in '%s' does not match current version '%s'", scc_jvm_version, _cache_path, VM_Version::internal_vm_info_string());
 562       set_failed();
 563       return;
 564     }
 565     if (!_load_header->verify_config(_cache_path, load_size)) {
 566       set_failed();
 567       return;
 568     }
 569     log_info(scc, init)("Read header from Startup Code Cache '%s'", cache_path);
 570     if (_load_header->has_meta_ptrs()) {
 571       assert(UseSharedSpaces, "should be verified already");
 572       _use_meta_ptrs = true; // Regardless UseMetadataPointers
 573       UseMetadataPointers = true;
 574     }
 575     // Read strings
 576     load_strings();
 577   }
 578   if (_for_write) {
 579     _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0);
 580 
 581     _C_store_buffer = NEW_C_HEAP_ARRAY(char, CachedCodeMaxSize + DATA_ALIGNMENT, mtCode);
 582     _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
 583     // Entries allocated at the end of buffer in reverse (as on stack).
 584     _store_entries = (SCCEntry*)align_up(_C_store_buffer + CachedCodeMaxSize, DATA_ALIGNMENT);
 585     log_info(scc, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %d", p2i(_store_buffer), CachedCodeMaxSize);
 586   }
 587   _table = new SCAddressTable();
 588 }
 589 
 590 void SCCache::init_table() {
 591   SCCache* cache = SCCache::cache();
 592   if (cache != nullptr && cache->_table != nullptr) {
 593     cache->_table->init();
 594   }
 595 }
 596 
 597 void SCCache::init_opto_table() {
 598   SCCache* cache = SCCache::cache();
 599   if (cache != nullptr && cache->_table != nullptr) {
 600     cache->_table->init_opto();
 601   }
 602 }
 603 
 604 void SCCache::init_c1_table() {
 605   SCCache* cache = SCCache::cache();
 606   if (cache != nullptr && cache->_table != nullptr) {
 607     cache->_table->init_c1();
 608   }
 609 }
 610 
 611 void SCConfig::record(bool use_meta_ptrs) {
 612   _flags = 0;
 613   if (use_meta_ptrs) {
 614     _flags |= metadataPointers;
 615   }
 616 #ifdef ASSERT
 617   _flags |= debugVM;
 618 #endif
 619   if (UseCompressedOops) {
 620     _flags |= compressedOops;
 621   }
 622   if (UseCompressedClassPointers) {
 623     _flags |= compressedClassPointers;
 624   }
 625   if (UseTLAB) {
 626     _flags |= useTLAB;
 627   }
 628   if (JavaAssertions::systemClassDefault()) {
 629     _flags |= systemClassAssertions;
 630   }
 631   if (JavaAssertions::userClassDefault()) {
 632     _flags |= userClassAssertions;
 633   }
 634   if (EnableContended) {
 635     _flags |= enableContendedPadding;
 636   }
 637   if (RestrictContended) {
 638     _flags |= restrictContendedPadding;
 639   }
 640   _compressedOopShift    = CompressedOops::shift();
 641   _compressedKlassShift  = CompressedKlassPointers::shift();
 642   _contendedPaddingWidth = ContendedPaddingWidth;
 643   _objectAlignment       = ObjectAlignmentInBytes;
 644   _gc                    = (uint)Universe::heap()->kind();
 645 }
 646 
 647 bool SCConfig::verify(const char* cache_path) const {
 648 #ifdef ASSERT
 649   if ((_flags & debugVM) == 0) {
 650     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by product VM, it can't be used by debug VM", cache_path);
 651     return false;
 652   }
 653 #else
 654   if ((_flags & debugVM) != 0) {
 655     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by debug VM, it can't be used by product VM", cache_path);
 656     return false;
 657   }
 658 #endif
 659 
 660   CollectedHeap::Name scc_gc = (CollectedHeap::Name)_gc;
 661   if (scc_gc != Universe::heap()->kind()) {
 662     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with different GC: %s vs current %s", cache_path, GCConfig::hs_err_name(scc_gc), GCConfig::hs_err_name());
 663     return false;
 664   }
 665 
 666   if (((_flags & compressedOops) != 0) != UseCompressedOops) {
 667     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedOops = %s", cache_path, UseCompressedOops ? "false" : "true");
 668     return false;
 669   }
 670   if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
 671     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedClassPointers = %s", cache_path, UseCompressedClassPointers ? "false" : "true");
 672     return false;
 673   }
 674 
 675   if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
 676     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::systemClassDefault() = %s", cache_path, JavaAssertions::systemClassDefault() ? "disabled" : "enabled");
 677     return false;
 678   }
 679   if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
 680     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::userClassDefault() = %s", cache_path, JavaAssertions::userClassDefault() ? "disabled" : "enabled");
 681     return false;
 682   }
 683 
 684   if (((_flags & enableContendedPadding) != 0) != EnableContended) {
 685     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with EnableContended = %s", cache_path, EnableContended ? "false" : "true");
 686     return false;
 687   }
 688   if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
 689     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with RestrictContended = %s", cache_path, RestrictContended ? "false" : "true");
 690     return false;
 691   }
 692   if (_compressedOopShift != (uint)CompressedOops::shift()) {
 693     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedOops::shift() = %d vs current %d", cache_path, _compressedOopShift, CompressedOops::shift());
 694     return false;
 695   }
 696   if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
 697     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedKlassPointers::shift() = %d vs current %d", cache_path, _compressedKlassShift, CompressedKlassPointers::shift());
 698     return false;
 699   }
 700   if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
 701     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ContendedPaddingWidth = %d vs current %d", cache_path, _contendedPaddingWidth, ContendedPaddingWidth);
 702     return false;
 703   }
 704   if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
 705     log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ObjectAlignmentInBytes = %d vs current %d", cache_path, _objectAlignment, ObjectAlignmentInBytes);
 706     return false;
 707   }
 708   return true;
 709 }
 710 
 711 bool SCCHeader::verify_config(const char* cache_path, uint load_size) const {
 712   if (_version != SCC_VERSION) {
 713     log_warning(scc, init)("Disable Startup Code Cache: different SCC version %d vs %d recorded in '%s'", SCC_VERSION, _version, cache_path);
 714     return false;
 715   }
 716   if (_cache_size != load_size) {
 717     log_warning(scc, init)("Disable Startup Code Cache: different cached code size %d vs %d recorded in '%s'", load_size, _cache_size, cache_path);
 718     return false;
 719   }
 720   if (has_meta_ptrs() && !UseSharedSpaces) {
 721     log_warning(scc, init)("Disable Startup Cached Code: '%s' contains metadata pointers but CDS is off", cache_path);
 722     return false;
 723   }
 724   return true;
 725 }
 726 
 727 volatile int SCCache::_nmethod_readers = 0;
 728 
 729 SCCache::~SCCache() {
 730   if (_closing) {
 731     return; // Already closed
 732   }
 733   // Stop any further access to cache.
 734   // Checked on entry to load_nmethod() and store_nmethod().
 735   _closing = true;
 736   if (_for_read) {
 737     // Wait for all load_nmethod() finish.
 738     wait_for_no_nmethod_readers();
 739   }
 740   // Prevent writing code into cache while we are closing it.
 741   // This lock held by ciEnv::register_method() which calls store_nmethod().
 742   MutexLocker ml(Compile_lock);
 743   if (for_write()) { // Finalize cache
 744     finish_write();
 745   }
 746   FREE_C_HEAP_ARRAY(char, _cache_path);
 747   if (_C_load_buffer != nullptr) {
 748     FREE_C_HEAP_ARRAY(char, _C_load_buffer);
 749     _C_load_buffer = nullptr;
 750     _load_buffer = nullptr;
 751   }
 752   if (_C_store_buffer != nullptr) {
 753     FREE_C_HEAP_ARRAY(char, _C_store_buffer);
 754     _C_store_buffer = nullptr;
 755     _store_buffer = nullptr;
 756   }
 757   if (_table != nullptr) {
 758     delete _table;
 759     _table = nullptr;
 760   }
 761 }
 762 
 763 SCCache* SCCache::open_for_read() {
 764   if (SCCache::is_on_for_read()) {
 765     return SCCache::cache();
 766   }
 767   return nullptr;
 768 }
 769 
 770 SCCache* SCCache::open_for_write() {
 771   if (SCCache::is_on_for_write()) {
 772     SCCache* cache = SCCache::cache();
 773     cache->clear_lookup_failed(); // Reset bit
 774     return cache;
 775   }
 776   return nullptr;
 777 }
 778 
 779 void copy_bytes(const char* from, address to, uint size) {
 780   assert(size > 0, "sanity");
 781   bool by_words = true;
 782   if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
 783     // Use wordwise copies if possible:
 784     Copy::disjoint_words((HeapWord*)from,
 785                          (HeapWord*)to,
 786                          ((size_t)size + HeapWordSize-1) / HeapWordSize);
 787   } else {
 788     by_words = false;
 789     Copy::conjoint_jbytes(from, to, (size_t)size);
 790   }
 791   log_trace(scc)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
 792 }
 793 
 794 void SCCReader::set_read_position(uint pos) {
 795   if (pos == _read_position) {
 796     return;
 797   }
 798   assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
 799   _read_position = pos;
 800 }
 801 
 802 bool SCCache::set_write_position(uint pos) {
 803   if (pos == _write_position) {
 804     return true;
 805   }
 806   if (_store_size < _write_position) {
 807     _store_size = _write_position; // Adjust during write
 808   }
 809   assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
 810   _write_position = pos;
 811   return true;
 812 }
 813 
 814 static char align_buffer[256] = { 0 };
 815 
 816 bool SCCache::align_write() {
 817   // We are not executing code from cache - we copy it by bytes first.
 818   // No need for big alignment (or at all).
 819   uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
 820   if (padding == DATA_ALIGNMENT) {
 821     return true;
 822   }
 823   uint n = write_bytes((const void*)&align_buffer, padding);
 824   if (n != padding) {
 825     return false;
 826   }
 827   log_trace(scc)("Adjust write alignment in Startup Code Cache '%s'", _cache_path);
 828   return true;
 829 }
 830 
 831 uint SCCache::write_bytes(const void* buffer, uint nbytes) {
 832   assert(for_write(), "Code Cache file is not created");
 833   if (nbytes == 0) {
 834     return 0;
 835   }
 836   uint new_position = _write_position + nbytes;
 837   if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
 838     log_warning(scc)("Failed to write %d bytes at offset %d to Startup Code Cache file '%s'. Increase CachedCodeMaxSize.",
 839                      nbytes, _write_position, _cache_path);
 840     set_failed();
 841     exit_vm_on_store_failure();
 842     return 0;
 843   }
 844   copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
 845   log_trace(scc)("Wrote %d bytes at offset %d to Startup Code Cache '%s'", nbytes, _write_position, _cache_path);
 846   _write_position += nbytes;
 847   if (_store_size < _write_position) {
 848     _store_size = _write_position;
 849   }
 850   return nbytes;
 851 }
 852 
 853 void SCCEntry::update_method_for_writing() {
 854   if (_method != nullptr) {
 855     _method = CDSAccess::method_in_cached_code(_method);
 856   }
 857 }
 858 
 859 void SCCEntry::print(outputStream* st) const {
 860   st->print_cr(" SCA entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s%s]",
 861                p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile,
 862                (_not_entrant? "not_entrant" : "entrant"),
 863                (_loaded ? ", loaded" : ""),
 864                (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
 865                (_for_preload ? ", for_preload" : ""),
 866                (_ignore_decompile ? ", ignore_decomp" : ""));
 867 }
 868 
 869 void* SCCEntry::operator new(size_t x, SCCache* cache) {
 870   return (void*)(cache->add_entry());
 871 }
 872 
 873 bool skip_preload(methodHandle mh) {
 874   if (!mh->method_holder()->is_loaded()) {
 875     return true;
 876   }
 877   DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
 878   if (directives->DontPreloadOption) {
 879     LogStreamHandle(Info, scc, init) log;
 880     if (log.is_enabled()) {
 881       log.print("Exclude preloading code for ");
 882       mh->print_value_on(&log);
 883     }
 884     return true;
 885   }
 886   return false;
 887 }
 888 
 889 void SCCache::preload_startup_code(TRAPS) {
 890   if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
 891     // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
 892     // level we want (that is CompLevel_full_optimization).
 893     return;
 894   }
 895   assert(_for_read, "sanity");
 896   uint count = _load_header->entries_count();
 897   if (_load_entries == nullptr) {
 898     // Read it
 899     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 900     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 901     log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path);
 902   }
 903   uint preload_entries_count = _load_header->preload_entries_count();
 904   if (preload_entries_count > 0) {
 905     uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
 906     log_info(scc, init)("Load %d preload entries from Startup Code Cache '%s'", preload_entries_count, _cache_path);
 907     uint count = MIN2(preload_entries_count, SCLoadStop);
 908     for (uint i = SCLoadStart; i < count; i++) {
 909       uint index = entries_index[i];
 910       SCCEntry* entry = &(_load_entries[index]);
 911       if (entry->not_entrant()) {
 912         continue;
 913       }
 914       methodHandle mh(THREAD, entry->method());
 915       assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
 916       if (skip_preload(mh)) {
 917         continue; // Exclude preloading for this method
 918       }
 919       assert(mh->method_holder()->is_loaded(), "");
 920       if (!mh->method_holder()->is_linked()) {
 921         assert(!HAS_PENDING_EXCEPTION, "");
 922         mh->method_holder()->link_class(THREAD);
 923         if (HAS_PENDING_EXCEPTION) {
 924           LogStreamHandle(Info, scc) log;
 925           if (log.is_enabled()) {
 926             ResourceMark rm;
 927             log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
 928             THREAD->pending_exception()->print_value_on(&log);
 929             if (log_is_enabled(Debug, scc)) {
 930               THREAD->pending_exception()->print_on(&log);
 931             }
 932           }
 933           CLEAR_PENDING_EXCEPTION;
 934         }
 935       }
 936       if (mh->scc_entry() != nullptr) {
 937         // Second C2 compilation of the same method could happen for
 938         // different reasons without marking first entry as not entrant.
 939         continue; // Keep old entry to avoid issues
 940       }
 941       mh->set_scc_entry(entry);
 942       CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0, false, CompileTask::Reason_Preload, CHECK);
 943     }
 944   }
 945 }
 946 
 947 static bool check_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp, SCCEntry* entry) {
 948   if (entry->kind() == kind) {
 949     assert(entry->id() == id, "sanity");
 950     if (kind != SCCEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
 951                                   (entry->comp_level() == comp_level) &&
 952                                   (entry->ignore_decompile() || entry->decompile() == decomp))) {
 953       return true; // Found
 954     }
 955   }
 956   return false;
 957 }
 958 
 959 SCCEntry* SCCache::find_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp) {
 960   assert(_for_read, "sanity");
 961   uint count = _load_header->entries_count();
 962   if (_load_entries == nullptr) {
 963     // Read it
 964     _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
 965     _load_entries = (SCCEntry*)(_search_entries + 2 * count);
 966     log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path);
 967   }
 968   // Binary search
 969   int l = 0;
 970   int h = count - 1;
 971   while (l <= h) {
 972     int mid = (l + h) >> 1;
 973     int ix = mid * 2;
 974     uint is = _search_entries[ix];
 975     if (is == id) {
 976       int index = _search_entries[ix + 1];
 977       SCCEntry* entry = &(_load_entries[index]);
 978       if (check_entry(kind, id, comp_level, decomp, entry)) {
 979         return entry; // Found
 980       }
 981       // Leaner search around (could be the same nmethod with different decompile count)
 982       for (int i = mid - 1; i >= l; i--) { // search back
 983         ix = i * 2;
 984         is = _search_entries[ix];
 985         if (is != id) {
 986           break;
 987         }
 988         index = _search_entries[ix + 1];
 989         SCCEntry* entry = &(_load_entries[index]);
 990         if (check_entry(kind, id, comp_level, decomp, entry)) {
 991           return entry; // Found
 992         }
 993       }
 994       for (int i = mid + 1; i <= h; i++) { // search forward
 995         ix = i * 2;
 996         is = _search_entries[ix];
 997         if (is != id) {
 998           break;
 999         }
1000         index = _search_entries[ix + 1];
1001         SCCEntry* entry = &(_load_entries[index]);
1002         if (check_entry(kind, id, comp_level, decomp, entry)) {
1003           return entry; // Found
1004         }
1005       }
1006       break; // Not found match (different decompile count or not_entrant state).
1007     } else if (is < id) {
1008       l = mid + 1;
1009     } else {
1010       h = mid - 1;
1011     }
1012   }
1013   return nullptr;
1014 }
1015 
1016 void SCCache::invalidate_entry(SCCEntry* entry) {
1017   assert(entry!= nullptr, "all entries should be read already");
1018   if (entry->not_entrant()) {
1019     return; // Someone invalidated it already
1020   }
1021 #ifdef ASSERT
1022   bool found = false;
1023   if (_for_read) {
1024     uint count = _load_header->entries_count();
1025     uint i = 0;
1026     for(; i < count; i++) {
1027       if (entry == &(_load_entries[i])) {
1028         break;
1029       }
1030     }
1031     found = (i < count);
1032   }
1033   if (!found && _for_write) {
1034     uint count = _store_entries_cnt;
1035     uint i = 0;
1036     for(; i < count; i++) {
1037       if (entry == &(_store_entries[i])) {
1038         break;
1039       }
1040     }
1041     found = (i < count);
1042   }
1043   assert(found, "entry should exist");
1044 #endif
1045   entry->set_not_entrant();
1046   {
1047     uint name_offset = entry->offset() + entry->name_offset();
1048     const char* name;
1049     if (SCCache::is_loaded(entry)) {
1050       name = _load_buffer + name_offset;
1051     } else {
1052       name = _store_buffer + name_offset;
1053     }
1054     uint level   = entry->comp_level();
1055     uint comp_id = entry->comp_id();
1056     uint decomp  = entry->decompile();
1057     bool clinit_brs = entry->has_clinit_barriers();
1058     log_info(scc, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)",
1059                            name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1060   }
1061   if (entry->next() != nullptr) {
1062     entry = entry->next();
1063     assert(entry->has_clinit_barriers(), "expecting only such entries here");
1064     invalidate_entry(entry);
1065   }
1066 }
1067 
1068 extern "C" {
1069   static int uint_cmp(const void *i, const void *j) {
1070     uint a = *(uint *)i;
1071     uint b = *(uint *)j;
1072     return a > b ? 1 : a < b ? -1 : 0;
1073   }
1074 }
1075 
1076 bool SCCache::finish_write() {
1077   if (!align_write()) {
1078     return false;
1079   }
1080   uint strings_offset = _write_position;
1081   int strings_count = store_strings();
1082   if (strings_count < 0) {
1083     return false;
1084   }
1085   if (!align_write()) {
1086     return false;
1087   }
1088   uint strings_size = _write_position - strings_offset;
1089 
1090   uint entries_count = 0; // Number of entrant (useful) code entries
1091   uint entries_offset = _write_position;
1092 
1093   uint store_count = _store_entries_cnt;
1094   if (store_count > 0) {
1095     uint header_size = (uint)align_up(sizeof(SCCHeader),  DATA_ALIGNMENT);
1096     const char* vm_version = VM_Version::internal_vm_info_string();
1097     uint vm_version_size = (uint)align_up(strlen(vm_version) + 1, DATA_ALIGNMENT);
1098     uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1099     uint code_count = store_count + load_count;
1100     uint search_count = code_count * 2;
1101     uint search_size = search_count * sizeof(uint);
1102     uint entries_size = (uint)align_up(code_count * sizeof(SCCEntry), DATA_ALIGNMENT); // In bytes
1103     uint preload_entries_cnt = 0;
1104     uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1105     uint preload_entries_size = code_count * sizeof(uint);
1106     // _write_position should include code and strings
1107     uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1108     uint total_size = _write_position + _load_size + header_size + vm_version_size +
1109                      code_alignment + search_size + preload_entries_size + entries_size;
1110 
1111     // Create ordered search table for entries [id, index];
1112     uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1113     char* buffer = NEW_C_HEAP_ARRAY(char, total_size + DATA_ALIGNMENT, mtCode);
1114     char* start = align_up(buffer, DATA_ALIGNMENT);
1115     char* current = start + header_size; // Skip header
1116     uint jvm_version_offset = current - start;
1117     copy_bytes(vm_version, (address)current, (uint)strlen(vm_version) + 1);
1118     current += vm_version_size;
1119 
1120     SCCEntry* entries_address = _store_entries; // Pointer to latest entry
1121     uint not_entrant_nb = 0;
1122     uint max_size = 0;
1123     // Add old entries first
1124     if (_for_read && (_load_header != nullptr)) {
1125       for(uint i = 0; i < load_count; i++) {
1126         if (_load_entries[i].load_fail()) {
1127           continue;
1128         }
1129         if (_load_entries[i].not_entrant()) {
1130           log_info(scc, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id());
1131           not_entrant_nb++;
1132           if (_load_entries[i].for_preload()) {
1133             // Skip not entrant preload code:
1134             // we can't pre-load code which may have failing dependencies.
1135             continue;
1136           }
1137           _load_entries[i].set_entrant(); // Reset
1138         } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) {
1139           // record entrant first version code for pre-loading
1140           preload_entries[preload_entries_cnt++] = entries_count;
1141         }
1142         {
1143           uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT);
1144           if (size > max_size) {
1145             max_size = size;
1146           }
1147           copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size);
1148           _load_entries[i].set_offset(current - start); // New offset
1149           current += size;
1150           uint n = write_bytes(&(_load_entries[i]), sizeof(SCCEntry));
1151           if (n != sizeof(SCCEntry)) {
1152             FREE_C_HEAP_ARRAY(char, buffer);
1153             FREE_C_HEAP_ARRAY(uint, search);
1154             return false;
1155           }
1156           search[entries_count*2 + 0] = _load_entries[i].id();
1157           search[entries_count*2 + 1] = entries_count;
1158           entries_count++;
1159         }
1160       }
1161     }
1162     // SCCEntry entries were allocated in reverse in store buffer.
1163     // Process them in reverse order to cache first code first.
1164     for (int i = store_count - 1; i >= 0; i--) {
1165       if (entries_address[i].load_fail()) {
1166         continue;
1167       }
1168       if (entries_address[i].not_entrant()) {
1169         log_info(scc, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : ""));
1170         not_entrant_nb++;
1171         if (entries_address[i].for_preload()) {
1172           // Skip not entrant preload code:
1173           // we can't pre-load code which may have failing dependencies.
1174           continue;
1175         }
1176         entries_address[i].set_entrant(); // Reset
1177       } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) {
1178         // record entrant first version code for pre-loading
1179         preload_entries[preload_entries_cnt++] = entries_count;
1180       }
1181       {
1182         entries_address[i].set_next(nullptr); // clear pointers before storing data
1183         uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1184         if (size > max_size) {
1185           max_size = size;
1186         }
1187         copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1188         entries_address[i].set_offset(current - start); // New offset
1189         entries_address[i].update_method_for_writing();
1190         current += size;
1191         uint n = write_bytes(&(entries_address[i]), sizeof(SCCEntry));
1192         if (n != sizeof(SCCEntry)) {
1193           FREE_C_HEAP_ARRAY(char, buffer);
1194           FREE_C_HEAP_ARRAY(uint, search);
1195           return false;
1196         }
1197         search[entries_count*2 + 0] = entries_address[i].id();
1198         search[entries_count*2 + 1] = entries_count;
1199         entries_count++;
1200       }
1201     }
1202     if (entries_count == 0) {
1203       log_info(scc, exit)("No new entires, cache files %s was not %s", _cache_path, (_for_read ? "updated" : "created"));
1204       FREE_C_HEAP_ARRAY(char, buffer);
1205       FREE_C_HEAP_ARRAY(uint, search);
1206       return true; // Nothing to write
1207     }
1208     assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1209     // Write strings
1210     if (strings_count > 0) {
1211       copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1212       strings_offset = (current - start); // New offset
1213       current += strings_size;
1214     }
1215     uint preload_entries_offset = (current - start);
1216     preload_entries_size = preload_entries_cnt * sizeof(uint);
1217     if (preload_entries_size > 0) {
1218       copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1219       current += preload_entries_size;
1220       log_info(scc, exit)("Wrote %d preload entries to Startup Code Cache '%s'", preload_entries_cnt, _cache_path);
1221     }
1222     if (preload_entries != nullptr) {
1223       FREE_C_HEAP_ARRAY(uint, preload_entries);
1224     }
1225 
1226     uint new_entries_offset = (current - start); // New offset
1227     // Sort and store search table
1228     qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1229     search_size = 2 * entries_count * sizeof(uint);
1230     copy_bytes((const char*)search, (address)current, search_size);
1231     FREE_C_HEAP_ARRAY(uint, search);
1232     current += search_size;
1233 
1234     // Write entries
1235     entries_size = entries_count * sizeof(SCCEntry); // New size
1236     copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1237     current += entries_size;
1238     log_info(scc, exit)("Wrote %d SCCEntry entries (%d were not entrant, %d max size) to Startup Code Cache '%s'", entries_count, not_entrant_nb, max_size, _cache_path);
1239 
1240     uint size = (current - start);
1241     assert(size <= total_size, "%d > %d", size , total_size);
1242 
1243     // Finalize header
1244     SCCHeader* header = (SCCHeader*)start;
1245     header->init(jvm_version_offset, size,
1246                  (uint)strings_count, strings_offset,
1247                  entries_count, new_entries_offset,
1248                  preload_entries_cnt, preload_entries_offset,
1249                  _use_meta_ptrs);
1250     log_info(scc, init)("Wrote header to Startup Code Cache '%s'", _cache_path);
1251 
1252     // Now store to file
1253 #ifdef _WINDOWS  // On Windows, need WRITE permission to remove the file.
1254     chmod(_cache_path, _S_IREAD | _S_IWRITE);
1255 #endif
1256     // Use remove() to delete the existing file because, on Unix, this will
1257     // allow processes that have it open continued access to the file.
1258     remove(_cache_path);
1259     int fd = os::open(_cache_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
1260     if (fd < 0) {
1261       log_warning(scc, exit)("Unable to create Startup Code Cache file '%s': (%s)", _cache_path, os::strerror(errno));
1262       FREE_C_HEAP_ARRAY(char, buffer);
1263       exit_vm_on_store_failure();
1264       return false;
1265     } else {
1266       log_info(scc, exit)("Opened for write Startup Code Cache '%s'", _cache_path);
1267     }
1268     bool success = os::write(fd, start, (size_t)size);
1269     if (!success) {
1270       log_warning(scc, exit)("Failed to write %d bytes to Startup Code Cache file '%s': (%s)", size, _cache_path, os::strerror(errno));
1271       FREE_C_HEAP_ARRAY(char, buffer);
1272       exit_vm_on_store_failure();
1273       return false;
1274     }
1275     log_info(scc, exit)("Wrote %d bytes to Startup Code Cache '%s'", size, _cache_path);
1276     if (::close(fd) < 0) {
1277       log_warning(scc, exit)("Failed to close for write Startup Code Cache file '%s'", _cache_path);
1278       exit_vm_on_store_failure();
1279     } else {
1280       log_info(scc, exit)("Closed for write Startup Code Cache '%s'", _cache_path);
1281     }
1282     FREE_C_HEAP_ARRAY(char, buffer);
1283   }
1284   return true;
1285 }
1286 
1287 bool SCCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1288   assert(start == cgen->assembler()->pc(), "wrong buffer");
1289   SCCache* cache = open_for_read();
1290   if (cache == nullptr) {
1291     return false;
1292   }
1293   SCCEntry* entry = cache->find_entry(SCCEntry::Stub, (uint)id);
1294   if (entry == nullptr) {
1295     return false;
1296   }
1297   uint entry_position = entry->offset();
1298   // Read name
1299   uint name_offset = entry->name_offset() + entry_position;
1300   uint name_size   = entry->name_size(); // Includes '/0'
1301   const char* saved_name = cache->addr(name_offset);
1302   if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1303     log_warning(scc)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1304     cache->set_failed();
1305     exit_vm_on_load_failure();
1306     return false;
1307   }
1308   log_info(scc,stubs)("Reading stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1309   // Read code
1310   uint code_offset = entry->code_offset() + entry_position;
1311   uint code_size   = entry->code_size();
1312   copy_bytes(cache->addr(code_offset), start, code_size);
1313   cgen->assembler()->code_section()->set_end(start + code_size);
1314   log_info(scc,stubs)("Read stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1315   return true;
1316 }
1317 
1318 bool SCCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1319   SCCache* cache = open_for_write();
1320   if (cache == nullptr) {
1321     return false;
1322   }
1323   log_info(scc, stubs)("Writing stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1324   if (!cache->align_write()) {
1325     return false;
1326   }
1327 #ifdef ASSERT
1328   CodeSection* cs = cgen->assembler()->code_section();
1329   if (cs->has_locs()) {
1330     uint reloc_count = cs->locs_count();
1331     tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1332     // Collect additional data
1333     RelocIterator iter(cs);
1334     while (iter.next()) {
1335       switch (iter.type()) {
1336         case relocInfo::none:
1337           break;
1338         default: {
1339           iter.print_current_on(tty);
1340           fatal("stub's relocation %d unimplemented", (int)iter.type());
1341           break;
1342         }
1343       }
1344     }
1345   }
1346 #endif
1347   uint entry_position = cache->_write_position;
1348 
1349   // Write code
1350   uint code_offset = 0;
1351   uint code_size = cgen->assembler()->pc() - start;
1352   uint n = cache->write_bytes(start, code_size);
1353   if (n != code_size) {
1354     return false;
1355   }
1356   // Write name
1357   uint name_offset = cache->_write_position - entry_position;
1358   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1359   n = cache->write_bytes(name, name_size);
1360   if (n != name_size) {
1361     return false;
1362   }
1363   uint entry_size = cache->_write_position - entry_position;
1364   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
1365                                           code_offset, code_size, 0, 0,
1366                                           SCCEntry::Stub, (uint32_t)id);
1367   log_info(scc, stubs)("Wrote stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path);
1368   return true;
1369 }
1370 
1371 Klass* SCCReader::read_klass(const methodHandle& comp_method, bool shared) {
1372   uint code_offset = read_position();
1373   uint state = *(uint*)addr(code_offset);
1374   uint init_state = (state  & 1);
1375   uint array_dim  = (state >> 1);
1376   code_offset += sizeof(int);
1377   if (_cache->use_meta_ptrs() && shared) {
1378     uint klass_offset = *(uint*)addr(code_offset);
1379     code_offset += sizeof(uint);
1380     set_read_position(code_offset);
1381     Klass* k = (Klass*)((address)SharedBaseAddress + klass_offset);
1382     if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1383       // Something changed in CDS
1384       set_lookup_failed();
1385       log_info(scc)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
1386       return nullptr;
1387     }
1388     assert(k->is_klass(), "sanity");
1389     ResourceMark rm;
1390     const char* comp_name = comp_method->name_and_sig_as_C_string();
1391     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
1392       set_lookup_failed();
1393       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
1394                        compile_id(), comp_name, comp_level(), k->external_name());
1395       return nullptr;
1396     } else
1397     // Allow not initialized klass which was uninitialized during code caching or for preload
1398     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
1399       set_lookup_failed();
1400       log_info(scc)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
1401                        compile_id(), comp_name, comp_level(), k->external_name());
1402       return nullptr;
1403     }
1404     if (array_dim > 0) {
1405       assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
1406       Klass* ak = k->array_klass_or_null(array_dim);
1407       // FIXME: what would it take to create an array class on the fly?
1408 //      Klass* ak = k->array_klass(dim, JavaThread::current());
1409 //      guarantee(JavaThread::current()->pending_exception() == nullptr, "");
1410       if (ak == nullptr) {
1411         set_lookup_failed();
1412         log_info(scc)("%d (L%d): %d-dimension array klass lookup failed: %s",
1413                          compile_id(), comp_level(), array_dim, k->external_name());
1414       }
1415       log_info(scc)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
1416       return ak;
1417     } else {
1418       log_info(scc)("%d (L%d): Shared klass lookup: %s",
1419                     compile_id(), comp_level(), k->external_name());
1420       return k;
1421     }
1422   }
1423   int name_length = *(int*)addr(code_offset);
1424   code_offset += sizeof(int);
1425   const char* dest = addr(code_offset);
1426   code_offset += name_length + 1;
1427   set_read_position(code_offset);
1428   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length);
1429   if (klass_sym == nullptr) {
1430     set_lookup_failed();
1431     log_info(scc)("%d (L%d): Probe failed for class %s",
1432                      compile_id(), comp_level(), &(dest[0]));
1433     return nullptr;
1434   }
1435   // Use class loader of compiled method.
1436   Thread* thread = Thread::current();
1437   Handle loader(thread, comp_method->method_holder()->class_loader());
1438   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
1439   assert(!thread->has_pending_exception(), "should not throw");
1440   if (k == nullptr && !loader.is_null()) {
1441     // Try default loader and domain
1442     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
1443     assert(!thread->has_pending_exception(), "should not throw");
1444   }
1445   if (k != nullptr) {
1446     // Allow not initialized klass which was uninitialized during code caching
1447     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) {
1448       set_lookup_failed();
1449       log_info(scc)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0]));
1450       return nullptr;
1451     }
1452     log_info(scc)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name());
1453   } else {
1454     set_lookup_failed();
1455     log_info(scc)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0]));
1456     return nullptr;
1457   }
1458   return k;
1459 }
1460 
1461 Method* SCCReader::read_method(const methodHandle& comp_method, bool shared) {
1462   uint code_offset = read_position();
1463   if (_cache->use_meta_ptrs() && shared) {
1464     uint method_offset = *(uint*)addr(code_offset);
1465     code_offset += sizeof(uint);
1466     set_read_position(code_offset);
1467     Method* m = (Method*)((address)SharedBaseAddress + method_offset);
1468     if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
1469       // Something changed in CDS
1470       set_lookup_failed();
1471       log_info(scc)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
1472       return nullptr;
1473     }
1474     assert(m->is_method(), "sanity");
1475     ResourceMark rm;
1476     const char* comp_name = comp_method->name_and_sig_as_C_string();
1477     Klass* k = m->method_holder();
1478     if (!k->is_instance_klass()) {
1479       set_lookup_failed();
1480       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass", compile_id(), comp_name, comp_level(), k->external_name());
1481       return nullptr;
1482     } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
1483       set_lookup_failed();
1484       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS", compile_id(), comp_name, comp_level(), k->external_name());
1485       return nullptr;
1486     } else if (!InstanceKlass::cast(k)->is_loaded()) {
1487       set_lookup_failed();
1488       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not loaded", compile_id(), comp_name, comp_level(), k->external_name());
1489       return nullptr;
1490     } else if (!InstanceKlass::cast(k)->is_linked()) {
1491       set_lookup_failed();
1492       log_info(scc)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s", compile_id(), comp_name, comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
1493       return nullptr;
1494     }
1495     log_info(scc)("%d (L%d): Shared method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1496     return m;
1497   }
1498   int holder_length = *(int*)addr(code_offset);
1499   code_offset += sizeof(int);
1500   int name_length = *(int*)addr(code_offset);
1501   code_offset += sizeof(int);
1502   int signat_length = *(int*)addr(code_offset);
1503   code_offset += sizeof(int);
1504 
1505   const char* dest = addr(code_offset);
1506   code_offset += holder_length + 1 + name_length + 1 + signat_length + 1;
1507   set_read_position(code_offset);
1508   TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length);
1509   if (klass_sym == nullptr) {
1510     set_lookup_failed();
1511     log_info(scc)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0]));
1512     return nullptr;
1513   }
1514   // Use class loader of compiled method.
1515   Thread* thread = Thread::current();
1516   Handle loader(thread, comp_method->method_holder()->class_loader());
1517   Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
1518   assert(!thread->has_pending_exception(), "should not throw");
1519   if (k == nullptr && !loader.is_null()) {
1520     // Try default loader and domain
1521     k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
1522     assert(!thread->has_pending_exception(), "should not throw");
1523   }
1524   if (k != nullptr) {
1525     if (!k->is_instance_klass()) {
1526       set_lookup_failed();
1527       log_info(scc)("%d (L%d): Lookup failed for holder %s: not instance klass",
1528                        compile_id(), comp_level(), &(dest[0]));
1529       return nullptr;
1530     } else if (!InstanceKlass::cast(k)->is_linked()) {
1531       set_lookup_failed();
1532       log_info(scc)("%d (L%d): Lookup failed for holder %s: not linked",
1533                        compile_id(), comp_level(), &(dest[0]));
1534       return nullptr;
1535     }
1536     log_info(scc)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name());
1537   } else {
1538     set_lookup_failed();
1539     log_info(scc)("%d (L%d): Lookup failed for holder %s",
1540                   compile_id(), comp_level(), &(dest[0]));
1541     return nullptr;
1542   }
1543   TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length);
1544   int pos = holder_length + 1 + name_length + 1;
1545   TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length);
1546   if (name_sym == nullptr) {
1547     set_lookup_failed();
1548     log_info(scc)("%d (L%d): Probe failed for method name %s",
1549                      compile_id(), comp_level(), &(dest[holder_length + 1]));
1550     return nullptr;
1551   }
1552   if (sign_sym == nullptr) {
1553     set_lookup_failed();
1554     log_info(scc)("%d (L%d): Probe failed for method signature %s",
1555                      compile_id(), comp_level(), &(dest[pos]));
1556     return nullptr;
1557   }
1558   Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym);
1559   if (m != nullptr) {
1560     ResourceMark rm;
1561     log_info(scc)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
1562   } else {
1563     set_lookup_failed();
1564     log_info(scc)("%d (L%d): Lookup failed for method %s::%s%s",
1565                      compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos]));
1566     return nullptr;
1567   }
1568   return m;
1569 }
1570 
1571 bool SCCache::write_klass(Klass* klass) {
1572   if (klass->is_hidden()) { // Skip such nmethod
1573     set_lookup_failed();
1574     return false;
1575   }
1576   bool can_use_meta_ptrs = _use_meta_ptrs;
1577   uint array_dim = 0;
1578   if (klass->is_objArray_klass()) {
1579     array_dim = ObjArrayKlass::cast(klass)->dimension();
1580     klass     = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
1581   }
1582   uint init_state = 0;
1583   if (klass->is_instance_klass()) {
1584     InstanceKlass* ik = InstanceKlass::cast(klass);
1585     ClassLoaderData* cld = ik->class_loader_data();
1586     if (!cld->is_builtin_class_loader_data()) {
1587       set_lookup_failed();
1588       return false;
1589     }
1590     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1591       _for_preload = false;
1592       // Bailout if code has clinit barriers:
1593       // method will be recompiled without them in any case
1594       if (_has_clinit_barriers) {
1595         set_lookup_failed();
1596         return false;
1597       }
1598       can_use_meta_ptrs = false;
1599     }
1600     init_state = (ik->is_initialized() ? 1 : 0);
1601   }
1602   ResourceMark rm;
1603   uint state = (array_dim << 1) | (init_state & 1);
1604   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(klass)) {
1605     DataKind kind = DataKind::Klass_Shared;
1606     uint n = write_bytes(&kind, sizeof(int));
1607     if (n != sizeof(int)) {
1608       return false;
1609     }
1610     // Record state of instance klass initialization.
1611     n = write_bytes(&state, sizeof(int));
1612     if (n != sizeof(int)) {
1613       return false;
1614     }
1615     uint klass_offset = CDSAccess::delta_from_shared_address_base((address)klass);
1616     n = write_bytes(&klass_offset, sizeof(uint));
1617     if (n != sizeof(uint)) {
1618       return false;
1619     }
1620     log_info(scc)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(),
1621                   (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1622                   (array_dim > 0 ? " (object array)" : ""),
1623                   klass_offset);
1624     return true;
1625   }
1626   // Bailout if code has clinit barriers:
1627   // method will be recompiled without them in any case
1628   if (_for_preload && _has_clinit_barriers) {
1629     set_lookup_failed();
1630     return false;
1631   }
1632   _for_preload = false;
1633   log_info(scc,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name());
1634   DataKind kind = DataKind::Klass;
1635   uint n = write_bytes(&kind, sizeof(int));
1636   if (n != sizeof(int)) {
1637     return false;
1638   }
1639   // Record state of instance klass initialization.
1640   n = write_bytes(&state, sizeof(int));
1641   if (n != sizeof(int)) {
1642     return false;
1643   }
1644   Symbol* name = klass->name();
1645   int name_length = name->utf8_length();
1646   int total_length = name_length + 1;
1647   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1648   name->as_C_string(dest, total_length);
1649   dest[total_length - 1] = '\0';
1650   LogTarget(Info, scc, loader) log;
1651   if (log.is_enabled()) {
1652     LogStream ls(log);
1653     oop loader = klass->class_loader();
1654     oop domain = klass->protection_domain();
1655     ls.print("Class %s loader: ", dest);
1656     if (loader == nullptr) {
1657       ls.print("nullptr");
1658     } else {
1659       loader->print_value_on(&ls);
1660     }
1661     ls.print(" domain: ");
1662     if (domain == nullptr) {
1663       ls.print("nullptr");
1664     } else {
1665       domain->print_value_on(&ls);
1666     }
1667     ls.cr();
1668   }
1669   n = write_bytes(&name_length, sizeof(int));
1670   if (n != sizeof(int)) {
1671     return false;
1672   }
1673   n = write_bytes(dest, total_length);
1674   if (n != (uint)total_length) {
1675     return false;
1676   }
1677   log_info(scc)("%d (L%d): Wrote klass: %s%s%s",
1678                 compile_id(), comp_level(),
1679                 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
1680                 (array_dim > 0 ? " (object array)" : ""));
1681   return true;
1682 }
1683 
1684 bool SCCache::write_method(Method* method) {
1685   bool can_use_meta_ptrs = _use_meta_ptrs;
1686   Klass* klass = method->method_holder();
1687   if (klass->is_instance_klass()) {
1688     InstanceKlass* ik = InstanceKlass::cast(klass);
1689     ClassLoaderData* cld = ik->class_loader_data();
1690     if (!cld->is_builtin_class_loader_data()) {
1691       set_lookup_failed();
1692       return false;
1693     }
1694     if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) {
1695       _for_preload = false;
1696       // Bailout if code has clinit barriers:
1697       // method will be recompiled without them in any case
1698       if (_has_clinit_barriers) {
1699         set_lookup_failed();
1700         return false;
1701       }
1702       can_use_meta_ptrs = false;
1703     }
1704   }
1705   ResourceMark rm;
1706   if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(method)) {
1707     DataKind kind = DataKind::Method_Shared;
1708     uint n = write_bytes(&kind, sizeof(int));
1709     if (n != sizeof(int)) {
1710       return false;
1711     }
1712     uint method_offset = CDSAccess::delta_from_shared_address_base((address)method);
1713     n = write_bytes(&method_offset, sizeof(uint));
1714     if (n != sizeof(uint)) {
1715       return false;
1716     }
1717     log_info(scc)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
1718     return true;
1719   }
1720   // Bailout if code has clinit barriers:
1721   // method will be recompiled without them in any case
1722   if (_for_preload && _has_clinit_barriers) {
1723     set_lookup_failed();
1724     return false;
1725   }
1726   _for_preload = false;
1727   log_info(scc,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string());
1728   if (method->is_hidden()) { // Skip such nmethod
1729     set_lookup_failed();
1730     return false;
1731   }
1732   DataKind kind = DataKind::Method;
1733   uint n = write_bytes(&kind, sizeof(int));
1734   if (n != sizeof(int)) {
1735     return false;
1736   }
1737   Symbol* name   = method->name();
1738   Symbol* holder = method->klass_name();
1739   Symbol* signat = method->signature();
1740   int name_length   = name->utf8_length();
1741   int holder_length = holder->utf8_length();
1742   int signat_length = signat->utf8_length();
1743 
1744   // Write sizes and strings
1745   int total_length = holder_length + 1 + name_length + 1 + signat_length + 1;
1746   char* dest = NEW_RESOURCE_ARRAY(char, total_length);
1747   holder->as_C_string(dest, total_length);
1748   dest[holder_length] = '\0';
1749   int pos = holder_length + 1;
1750   name->as_C_string(&(dest[pos]), (total_length - pos));
1751   pos += name_length;
1752   dest[pos++] = '\0';
1753   signat->as_C_string(&(dest[pos]), (total_length - pos));
1754   dest[total_length - 1] = '\0';
1755 
1756   LogTarget(Info, scc, loader) log;
1757   if (log.is_enabled()) {
1758     LogStream ls(log);
1759     oop loader = klass->class_loader();
1760     oop domain = klass->protection_domain();
1761     ls.print("Holder %s loader: ", dest);
1762     if (loader == nullptr) {
1763       ls.print("nullptr");
1764     } else {
1765       loader->print_value_on(&ls);
1766     }
1767     ls.print(" domain: ");
1768     if (domain == nullptr) {
1769       ls.print("nullptr");
1770     } else {
1771       domain->print_value_on(&ls);
1772     }
1773     ls.cr();
1774   }
1775 
1776   n = write_bytes(&holder_length, sizeof(int));
1777   if (n != sizeof(int)) {
1778     return false;
1779   }
1780   n = write_bytes(&name_length, sizeof(int));
1781   if (n != sizeof(int)) {
1782     return false;
1783   }
1784   n = write_bytes(&signat_length, sizeof(int));
1785   if (n != sizeof(int)) {
1786     return false;
1787   }
1788   n = write_bytes(dest, total_length);
1789   if (n != (uint)total_length) {
1790     return false;
1791   }
1792   dest[holder_length] = ' ';
1793   dest[holder_length + 1 + name_length] = ' ';
1794   log_info(scc)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest);
1795   return true;
1796 }
1797 
1798 // Repair the pc relative information in the code after load
1799 bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer,
1800                                  OopRecorder* oop_recorder, ciMethod* target) {
1801   bool success = true;
1802   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1803     uint code_offset = read_position();
1804     int reloc_count = *(int*)addr(code_offset);
1805     code_offset += sizeof(int);
1806     if (reloc_count == 0) {
1807       set_read_position(code_offset);
1808       continue;
1809     }
1810     // Read _locs_point (as offset from start)
1811     int locs_point_off = *(int*)addr(code_offset);
1812     code_offset += sizeof(int);
1813     uint reloc_size = reloc_count * sizeof(relocInfo);
1814     CodeSection* cs  = buffer->code_section(i);
1815     if (cs->locs_capacity() < reloc_count) {
1816       cs->expand_locs(reloc_count);
1817     }
1818     relocInfo* reloc_start = cs->locs_start();
1819     copy_bytes(addr(code_offset), (address)reloc_start, reloc_size);
1820     code_offset += reloc_size;
1821     cs->set_locs_end(reloc_start + reloc_count);
1822     cs->set_locs_point(cs->start() + locs_point_off);
1823 
1824     // Read additional relocation data: uint per relocation
1825     uint  data_size  = reloc_count * sizeof(uint);
1826     uint* reloc_data = (uint*)addr(code_offset);
1827     code_offset += data_size;
1828     set_read_position(code_offset);
1829     LogStreamHandle(Info, scc, reloc) log;
1830     if (log.is_enabled()) {
1831       log.print_cr("======== read code section %d relocations [%d]:", i, reloc_count);
1832     }
1833     RelocIterator iter(cs);
1834     int j = 0;
1835     while (iter.next()) {
1836       switch (iter.type()) {
1837         case relocInfo::none:
1838           break;
1839         case relocInfo::oop_type: {
1840           VM_ENTRY_MARK;
1841           oop_Relocation* r = (oop_Relocation*)iter.reloc();
1842           if (r->oop_is_immediate()) {
1843             assert(reloc_data[j] == (uint)j, "should be");
1844             methodHandle comp_method(THREAD, target->get_Method());
1845             jobject jo = read_oop(THREAD, comp_method);
1846             if (lookup_failed()) {
1847               success = false;
1848               break;
1849             }
1850             r->set_value((address)jo);
1851           } else if (false) {
1852             // Get already updated value from OopRecorder.
1853             assert(oop_recorder != nullptr, "sanity");
1854             int index = r->oop_index();
1855             jobject jo = oop_recorder->oop_at(index);
1856             oop obj = JNIHandles::resolve(jo);
1857             r->set_value(*reinterpret_cast<address*>(&obj));
1858           }
1859           break;
1860         }
1861         case relocInfo::metadata_type: {
1862           VM_ENTRY_MARK;
1863           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
1864           Metadata* m;
1865           if (r->metadata_is_immediate()) {
1866             assert(reloc_data[j] == (uint)j, "should be");
1867             methodHandle comp_method(THREAD, target->get_Method());
1868             m = read_metadata(comp_method);
1869             if (lookup_failed()) {
1870               success = false;
1871               break;
1872             }
1873           } else {
1874             // Get already updated value from OopRecorder.
1875             assert(oop_recorder != nullptr, "sanity");
1876             int index = r->metadata_index();
1877             m = oop_recorder->metadata_at(index);
1878           }
1879           r->set_value((address)m);
1880           break;
1881         }
1882         case relocInfo::virtual_call_type:   // Fall through. They all call resolve_*_call blobs.
1883         case relocInfo::opt_virtual_call_type:
1884         case relocInfo::static_call_type: {
1885           address dest = _cache->address_for_id(reloc_data[j]);
1886           if (dest != (address)-1) {
1887             ((CallRelocation*)iter.reloc())->set_destination(dest);
1888           }
1889           break;
1890         }
1891         case relocInfo::trampoline_stub_type: {
1892           address dest = _cache->address_for_id(reloc_data[j]);
1893           if (dest != (address)-1) {
1894             ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
1895           }
1896           break;
1897         }
1898         case relocInfo::static_stub_type:
1899           break;
1900         case relocInfo::runtime_call_type: {
1901           address dest = _cache->address_for_id(reloc_data[j]);
1902           if (dest != (address)-1) {
1903             ((CallRelocation*)iter.reloc())->set_destination(dest);
1904           }
1905           break;
1906         }
1907         case relocInfo::runtime_call_w_cp_type:
1908           fatal("runtime_call_w_cp_type unimplemented");
1909           //address destination = iter.reloc()->value();
1910           break;
1911         case relocInfo::external_word_type: {
1912           address target = _cache->address_for_id(reloc_data[j]);
1913           // Add external address to global table
1914           int index = ExternalsRecorder::find_index(target);
1915           // Update index in relocation
1916           Relocation::add_jint(iter.data(), index);
1917           external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1918           assert(reloc->target() == target, "sanity");
1919           reloc->set_value(target); // Patch address in the code
1920           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1921           break;
1922         }
1923         case relocInfo::internal_word_type:
1924           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1925           break;
1926         case relocInfo::section_word_type:
1927           iter.reloc()->fix_relocation_after_move(orig_buffer, buffer);
1928           break;
1929         case relocInfo::poll_type:
1930           break;
1931         case relocInfo::poll_return_type:
1932           break;
1933         case relocInfo::post_call_nop_type:
1934           break;
1935         case relocInfo::entry_guard_type:
1936           break;
1937         default:
1938           fatal("relocation %d unimplemented", (int)iter.type());
1939           break;
1940       }
1941       if (success && log.is_enabled()) {
1942         iter.print_current_on(&log);
1943       }
1944       j++;
1945     }
1946     assert(j <= (int)reloc_count, "sanity");
1947   }
1948   return success;
1949 }
1950 
1951 bool SCCReader::read_code(CodeBuffer* buffer, CodeBuffer* orig_buffer, uint code_offset) {
1952   assert(code_offset == align_up(code_offset, DATA_ALIGNMENT), "%d not aligned to %d", code_offset, DATA_ALIGNMENT);
1953   assert(buffer->blob() != nullptr, "sanity");
1954   SCCodeSection* scc_cs = (SCCodeSection*)addr(code_offset);
1955   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
1956     CodeSection* cs = buffer->code_section(i);
1957     // Read original section size and address.
1958     uint orig_size = scc_cs[i]._size;
1959     log_debug(scc)("======== read code section %d [%d]:", i, orig_size);
1960     uint orig_size_align = align_up(orig_size, DATA_ALIGNMENT);
1961     if (i != (int)CodeBuffer::SECT_INSTS) {
1962       buffer->initialize_section_size(cs, orig_size_align);
1963     }
1964     if (orig_size_align > (uint)cs->capacity()) { // Will not fit
1965       log_info(scc)("%d (L%d): original code section %d size %d > current capacity %d",
1966                        compile_id(), comp_level(), i, orig_size, cs->capacity());
1967       return false;
1968     }
1969     if (orig_size == 0) {
1970       assert(cs->size() == 0, "should match");
1971       continue;  // skip trivial section
1972     }
1973     address orig_start = scc_cs[i]._origin_address;
1974 
1975     // Populate fake original buffer (no code allocation in CodeCache).
1976     // It is used for relocations to calculate sections addesses delta.
1977     CodeSection* orig_cs = orig_buffer->code_section(i);
1978     assert(!orig_cs->is_allocated(), "This %d section should not be set", i);
1979     orig_cs->initialize(orig_start, orig_size);
1980 
1981     // Load code to new buffer.
1982     address code_start = cs->start();
1983     copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align);
1984     cs->set_end(code_start + orig_size);
1985   }
1986 
1987   return true;
1988 }
1989 
1990 bool SCCache::load_exception_blob(CodeBuffer* buffer, int* pc_offset) {
1991 #ifdef ASSERT
1992   LogStreamHandle(Debug, scc, nmethod) log;
1993   if (log.is_enabled()) {
1994     FlagSetting fs(PrintRelocations, true);
1995     buffer->print_on(&log);
1996   }
1997 #endif
1998   SCCache* cache = open_for_read();
1999   if (cache == nullptr) {
2000     return false;
2001   }
2002   SCCEntry* entry = cache->find_entry(SCCEntry::Blob, 999);
2003   if (entry == nullptr) {
2004     return false;
2005   }
2006   SCCReader reader(cache, entry, nullptr);
2007   return reader.compile_blob(buffer, pc_offset);
2008 }
2009 
2010 bool SCCReader::compile_blob(CodeBuffer* buffer, int* pc_offset) {
2011   uint entry_position = _entry->offset();
2012 
2013   // Read pc_offset
2014   *pc_offset = *(int*)addr(entry_position);
2015 
2016   // Read name
2017   uint name_offset = entry_position + _entry->name_offset();
2018   uint name_size = _entry->name_size(); // Includes '/0'
2019   const char* name = addr(name_offset);
2020 
2021   log_info(scc, stubs)("%d (L%d): Reading blob '%s' with pc_offset %d from Startup Code Cache '%s'",
2022                        compile_id(), comp_level(), name, *pc_offset, _cache->cache_path());
2023 
2024   if (strncmp(buffer->name(), name, (name_size - 1)) != 0) {
2025     log_warning(scc)("%d (L%d): Saved blob's name '%s' is different from '%s'",
2026                      compile_id(), comp_level(), name, buffer->name());
2027     ((SCCache*)_cache)->set_failed();
2028     exit_vm_on_load_failure();
2029     return false;
2030   }
2031 
2032   // Create fake original CodeBuffer
2033   CodeBuffer orig_buffer(name);
2034 
2035   // Read code
2036   uint code_offset = entry_position + _entry->code_offset();
2037   if (!read_code(buffer, &orig_buffer, code_offset)) {
2038     return false;
2039   }
2040 
2041   // Read relocations
2042   uint reloc_offset = entry_position + _entry->reloc_offset();
2043   set_read_position(reloc_offset);
2044   if (!read_relocations(buffer, &orig_buffer, nullptr, nullptr)) {
2045     return false;
2046   }
2047 
2048   log_info(scc, stubs)("%d (L%d): Read blob '%s' from Startup Code Cache '%s'",
2049                        compile_id(), comp_level(), name, _cache->cache_path());
2050 #ifdef ASSERT
2051   LogStreamHandle(Debug, scc, nmethod) log;
2052   if (log.is_enabled()) {
2053     FlagSetting fs(PrintRelocations, true);
2054     buffer->print_on(&log);
2055     buffer->decode();
2056   }
2057 #endif
2058   return true;
2059 }
2060 
2061 bool SCCache::write_relocations(CodeBuffer* buffer, uint& all_reloc_size) {
2062   uint all_reloc_count = 0;
2063   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2064     CodeSection* cs = buffer->code_section(i);
2065     uint reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2066     all_reloc_count += reloc_count;
2067   }
2068   all_reloc_size = all_reloc_count * sizeof(relocInfo);
2069   bool success = true;
2070   uint* reloc_data = NEW_C_HEAP_ARRAY(uint, all_reloc_count, mtCode);
2071   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2072     CodeSection* cs = buffer->code_section(i);
2073     int reloc_count = cs->has_locs() ? cs->locs_count() : 0;
2074     uint n = write_bytes(&reloc_count, sizeof(int));
2075     if (n != sizeof(int)) {
2076       success = false;
2077       break;
2078     }
2079     if (reloc_count == 0) {
2080       continue;
2081     }
2082     // Write _locs_point (as offset from start)
2083     int locs_point_off = cs->locs_point_off();
2084     n = write_bytes(&locs_point_off, sizeof(int));
2085     if (n != sizeof(int)) {
2086       success = false;
2087       break;
2088     }
2089     relocInfo* reloc_start = cs->locs_start();
2090     uint reloc_size      = reloc_count * sizeof(relocInfo);
2091     n = write_bytes(reloc_start, reloc_size);
2092     if (n != reloc_size) {
2093       success = false;
2094       break;
2095     }
2096     LogStreamHandle(Info, scc, reloc) log;
2097     if (log.is_enabled()) {
2098       log.print_cr("======== write code section %d relocations [%d]:", i, reloc_count);
2099     }
2100     // Collect additional data
2101     RelocIterator iter(cs);
2102     bool has_immediate = false;
2103     int j = 0;
2104     while (iter.next()) {
2105       reloc_data[j] = 0; // initialize
2106       switch (iter.type()) {
2107         case relocInfo::none:
2108           break;
2109         case relocInfo::oop_type: {
2110           oop_Relocation* r = (oop_Relocation*)iter.reloc();
2111           if (r->oop_is_immediate()) {
2112             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2113             has_immediate = true;
2114           }
2115           break;
2116         }
2117         case relocInfo::metadata_type: {
2118           metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2119           if (r->metadata_is_immediate()) {
2120             reloc_data[j] = (uint)j; // Indication that we need to restore immediate
2121             has_immediate = true;
2122           }
2123           break;
2124         }
2125         case relocInfo::virtual_call_type:  // Fall through. They all call resolve_*_call blobs.
2126         case relocInfo::opt_virtual_call_type:
2127         case relocInfo::static_call_type: {
2128           CallRelocation* r = (CallRelocation*)iter.reloc();
2129           address dest = r->destination();
2130           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2131             dest = (address)-1;    // do nothing in this case when loading this relocation
2132           }
2133           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2134           break;
2135         }
2136         case relocInfo::trampoline_stub_type: {
2137           address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2138           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2139           break;
2140         }
2141         case relocInfo::static_stub_type:
2142           break;
2143         case relocInfo::runtime_call_type: {
2144           // Record offset of runtime destination
2145           CallRelocation* r = (CallRelocation*)iter.reloc();
2146           address dest = r->destination();
2147           if (dest == r->addr()) { // possible call via trampoline on Aarch64
2148             dest = (address)-1;    // do nothing in this case when loading this relocation
2149           }
2150           reloc_data[j] = _table->id_for_address(dest, iter, buffer);
2151           break;
2152         }
2153         case relocInfo::runtime_call_w_cp_type:
2154           fatal("runtime_call_w_cp_type unimplemented");
2155           break;
2156         case relocInfo::external_word_type: {
2157           // Record offset of runtime target
2158           address target = ((external_word_Relocation*)iter.reloc())->target();
2159           reloc_data[j] = _table->id_for_address(target, iter, buffer);
2160           break;
2161         }
2162         case relocInfo::internal_word_type:
2163           break;
2164         case relocInfo::section_word_type:
2165           break;
2166         case relocInfo::poll_type:
2167           break;
2168         case relocInfo::poll_return_type:
2169           break;
2170         case relocInfo::post_call_nop_type:
2171           break;
2172         case relocInfo::entry_guard_type:
2173           break;
2174         default:
2175           fatal("relocation %d unimplemented", (int)iter.type());
2176           break;
2177       }
2178       if (log.is_enabled()) {
2179         iter.print_current_on(&log);
2180       }
2181       j++;
2182     }
2183     assert(j <= (int)reloc_count, "sanity");
2184     // Write additional relocation data: uint per relocation
2185     uint data_size = reloc_count * sizeof(uint);
2186     n = write_bytes(reloc_data, data_size);
2187     if (n != data_size) {
2188       success = false;
2189       break;
2190     }
2191     if (has_immediate) {
2192       // Save information about immediates in this Code Section
2193       RelocIterator iter_imm(cs);
2194       int j = 0;
2195       while (iter_imm.next()) {
2196         switch (iter_imm.type()) {
2197           case relocInfo::oop_type: {
2198             oop_Relocation* r = (oop_Relocation*)iter_imm.reloc();
2199             if (r->oop_is_immediate()) {
2200               assert(reloc_data[j] == (uint)j, "should be");
2201               jobject jo = *(jobject*)(r->oop_addr()); // Handle currently
2202               if (!write_oop(jo)) {
2203                 success = false;
2204               }
2205             }
2206             break;
2207           }
2208           case relocInfo::metadata_type: {
2209             metadata_Relocation* r = (metadata_Relocation*)iter_imm.reloc();
2210             if (r->metadata_is_immediate()) {
2211               assert(reloc_data[j] == (uint)j, "should be");
2212               Metadata* m = r->metadata_value();
2213               if (!write_metadata(m)) {
2214                 success = false;
2215               }
2216             }
2217             break;
2218           }
2219           default:
2220             break;
2221         }
2222         if (!success) {
2223           break;
2224         }
2225         j++;
2226       } // while (iter_imm.next())
2227     } // if (has_immediate)
2228   } // for(i < SECT_LIMIT)
2229   FREE_C_HEAP_ARRAY(uint, reloc_data);
2230   return success;
2231 }
2232 
2233 bool SCCache::write_code(CodeBuffer* buffer, uint& code_size) {
2234   assert(_write_position == align_up(_write_position, DATA_ALIGNMENT), "%d not aligned to %d", _write_position, DATA_ALIGNMENT);
2235   //assert(buffer->blob() != nullptr, "sanity");
2236   uint code_offset = _write_position;
2237   uint cb_total_size = (uint)buffer->total_content_size();
2238   // Write information about Code sections first.
2239   SCCodeSection scc_cs[CodeBuffer::SECT_LIMIT];
2240   uint scc_cs_size = (uint)(sizeof(SCCodeSection) * CodeBuffer::SECT_LIMIT);
2241   uint offset = align_up(scc_cs_size, DATA_ALIGNMENT);
2242   uint total_size = 0;
2243   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2244     const CodeSection* cs = buffer->code_section(i);
2245     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2246     uint cs_size = (uint)cs->size();
2247     scc_cs[i]._size = cs_size;
2248     scc_cs[i]._origin_address = (cs_size == 0) ? nullptr : cs->start();
2249     scc_cs[i]._offset = (cs_size == 0) ? 0 : (offset + total_size);
2250     assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented");
2251     total_size += align_up(cs_size, DATA_ALIGNMENT);
2252   }
2253   uint n = write_bytes(scc_cs, scc_cs_size);
2254   if (n != scc_cs_size) {
2255     return false;
2256   }
2257   if (!align_write()) {
2258     return false;
2259   }
2260   assert(_write_position == (code_offset + offset), "%d  != (%d + %d)", _write_position, code_offset, offset);
2261   for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) {
2262     const CodeSection* cs = buffer->code_section(i);
2263     uint cs_size = (uint)cs->size();
2264     if (cs_size == 0) {
2265       continue;  // skip trivial section
2266     }
2267     assert((_write_position - code_offset) == scc_cs[i]._offset, "%d != %d", _write_position, scc_cs[i]._offset);
2268     // Write code
2269     n = write_bytes(cs->start(), cs_size);
2270     if (n != cs_size) {
2271       return false;
2272     }
2273     if (!align_write()) {
2274       return false;
2275     }
2276   }
2277   assert((_write_position - code_offset) == (offset + total_size), "(%d - %d) != (%d + %d)", _write_position, code_offset, offset, total_size);
2278   code_size = total_size;
2279   return true;
2280 }
2281 
2282 bool SCCache::store_exception_blob(CodeBuffer* buffer, int pc_offset) {
2283   SCCache* cache = open_for_write();
2284   if (cache == nullptr) {
2285     return false;
2286   }
2287   log_info(scc, stubs)("Writing blob '%s' to Startup Code Cache '%s'", buffer->name(), cache->_cache_path);
2288 
2289 #ifdef ASSERT
2290   LogStreamHandle(Debug, scc, nmethod) log;
2291   if (log.is_enabled()) {
2292     FlagSetting fs(PrintRelocations, true);
2293     buffer->print_on(&log);
2294     buffer->decode();
2295   }
2296 #endif
2297   if (!cache->align_write()) {
2298     return false;
2299   }
2300   uint entry_position = cache->_write_position;
2301 
2302   // Write pc_offset
2303   uint n = cache->write_bytes(&pc_offset, sizeof(int));
2304   if (n != sizeof(int)) {
2305     return false;
2306   }
2307 
2308   // Write name
2309   const char* name = buffer->name();
2310   uint name_offset = cache->_write_position - entry_position;
2311   uint name_size = (uint)strlen(name) + 1; // Includes '/0'
2312   n = cache->write_bytes(name, name_size);
2313   if (n != name_size) {
2314     return false;
2315   }
2316 
2317   // Write code section
2318   if (!cache->align_write()) {
2319     return false;
2320   }
2321   uint code_offset = cache->_write_position - entry_position;
2322   uint code_size = 0;
2323   if (!cache->write_code(buffer, code_size)) {
2324     return false;
2325   }
2326   // Write relocInfo array
2327   uint reloc_offset = cache->_write_position - entry_position;
2328   uint reloc_size = 0;
2329   if (!cache->write_relocations(buffer, reloc_size)) {
2330     return false;
2331   }
2332 
2333   uint entry_size = cache->_write_position - entry_position;
2334   SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size,
2335                                           code_offset, code_size, reloc_offset, reloc_size,
2336                                           SCCEntry::Blob, (uint32_t)999);
2337   log_info(scc, stubs)("Wrote stub '%s' to Startup Code Cache '%s'", name, cache->_cache_path);
2338   return true;
2339 }
2340 
2341 DebugInformationRecorder* SCCReader::read_debug_info(OopRecorder* oop_recorder) {
2342   uint code_offset = align_up(read_position(), DATA_ALIGNMENT);
2343   int data_size  = *(int*)addr(code_offset);
2344   code_offset   += sizeof(int);
2345   int pcs_length = *(int*)addr(code_offset);
2346   code_offset   += sizeof(int);
2347 
2348   log_debug(scc)("======== read DebugInfo [%d, %d]:", data_size, pcs_length);
2349 
2350   // Aligned initial sizes
2351   int data_size_align  = align_up(data_size, DATA_ALIGNMENT);
2352   int pcs_length_align = pcs_length + 1;
2353   assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity");
2354   DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length);
2355 
2356   copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align);
2357   recorder->stream()->set_position(data_size);
2358   code_offset += data_size;
2359 
2360   uint pcs_size = pcs_length * sizeof(PcDesc);
2361   copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size);
2362   code_offset += pcs_size;
2363   set_read_position(code_offset);
2364   return recorder;
2365 }
2366 
2367 bool SCCache::write_debug_info(DebugInformationRecorder* recorder) {
2368   if (!align_write()) {
2369     return false;
2370   }
2371   // Don't call data_size() and pcs_size(). They will freeze OopRecorder.
2372   int data_size = recorder->stream()->position(); // In bytes
2373   uint n = write_bytes(&data_size, sizeof(int));
2374   if (n != sizeof(int)) {
2375     return false;
2376   }
2377   int pcs_length = recorder->pcs_length(); // In bytes
2378   n = write_bytes(&pcs_length, sizeof(int));
2379   if (n != sizeof(int)) {
2380     return false;
2381   }
2382   n = write_bytes(recorder->stream()->buffer(), data_size);
2383   if (n != (uint)data_size) {
2384     return false;
2385   }
2386   uint pcs_size = pcs_length * sizeof(PcDesc);
2387   n = write_bytes(recorder->pcs(), pcs_size);
2388   if (n != pcs_size) {
2389     return false;
2390   }
2391   return true;
2392 }
2393 
2394 OopMapSet* SCCReader::read_oop_maps() {
2395   uint code_offset = read_position();
2396   int om_count = *(int*)addr(code_offset);
2397   code_offset += sizeof(int);
2398 
2399   log_debug(scc)("======== read oop maps [%d]:", om_count);
2400 
2401   OopMapSet* oop_maps = new OopMapSet(om_count);
2402   for (int i = 0; i < (int)om_count; i++) {
2403     int data_size = *(int*)addr(code_offset);
2404     code_offset += sizeof(int);
2405 
2406     OopMap* oop_map = new OopMap(data_size);
2407     // Preserve allocated stream
2408     CompressedWriteStream* stream = oop_map->write_stream();
2409 
2410     // Read data which overwrites default data
2411     copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap));
2412     code_offset += sizeof(OopMap);
2413     stream->set_position(data_size);
2414     oop_map->set_write_stream(stream);
2415     if (data_size > 0) {
2416       copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size);
2417       code_offset += data_size;
2418     }
2419 #ifdef ASSERT
2420     oop_map->_locs_length = 0;
2421     oop_map->_locs_used   = nullptr;
2422 #endif
2423     oop_maps->add(oop_map);
2424   }
2425   set_read_position(code_offset);
2426   return oop_maps;
2427 }
2428 
2429 bool SCCache::write_oop_maps(OopMapSet* oop_maps) {
2430   uint om_count = oop_maps->size();
2431   uint n = write_bytes(&om_count, sizeof(int));
2432   if (n != sizeof(int)) {
2433     return false;
2434   }
2435   for (int i = 0; i < (int)om_count; i++) {
2436     OopMap* om = oop_maps->at(i);
2437     int data_size = om->data_size();
2438     n = write_bytes(&data_size, sizeof(int));
2439     if (n != sizeof(int)) {
2440       return false;
2441     }
2442     n = write_bytes(om, sizeof(OopMap));
2443     if (n != sizeof(OopMap)) {
2444       return false;
2445     }
2446     n = write_bytes(om->data(), (uint)data_size);
2447     if (n != (uint)data_size) {
2448       return false;
2449     }
2450   }
2451   return true;
2452 }
2453 
2454 jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2455   uint code_offset = read_position();
2456   oop obj = nullptr;
2457   DataKind kind = *(DataKind*)addr(code_offset);
2458   code_offset += sizeof(DataKind);
2459   set_read_position(code_offset);
2460   if (kind == DataKind::Null) {
2461     return nullptr;
2462   } else if (kind == DataKind::No_Data) {
2463     return (jobject)Universe::non_oop_word();
2464   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2465     Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
2466     if (k == nullptr) {
2467       return nullptr;
2468     }
2469     obj = k->java_mirror();
2470     if (obj == nullptr) {
2471       set_lookup_failed();
2472       log_info(scc)("Lookup failed for java_mirror of klass %s", k->external_name());
2473       return nullptr;
2474     }
2475   } else if (kind == DataKind::Primitive) {
2476     code_offset = read_position();
2477     int t = *(int*)addr(code_offset);
2478     code_offset += sizeof(int);
2479     set_read_position(code_offset);
2480     BasicType bt = (BasicType)t;
2481     obj = java_lang_Class::primitive_mirror(bt);
2482     log_info(scc)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2483   } else if (kind == DataKind::String_Shared) {
2484     code_offset = read_position();
2485     int k = *(int*)addr(code_offset);
2486     code_offset += sizeof(int);
2487     set_read_position(code_offset);
2488     obj = CDSAccess::get_archived_object(k);
2489   } else if (kind == DataKind::String) {
2490     code_offset = read_position();
2491     int length = *(int*)addr(code_offset);
2492     code_offset += sizeof(int);
2493     set_read_position(code_offset);
2494     const char* dest = addr(code_offset);
2495     set_read_position(code_offset + length);
2496     obj = StringTable::intern(&(dest[0]), thread);
2497     if (obj == nullptr) {
2498       set_lookup_failed();
2499       log_info(scc)("%d (L%d): Lookup failed for String %s",
2500                        compile_id(), comp_level(), &(dest[0]));
2501       return nullptr;
2502     }
2503     assert(java_lang_String::is_instance(obj), "must be string");
2504     log_info(scc)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
2505   } else if (kind == DataKind::SysLoader) {
2506     obj = SystemDictionary::java_system_loader();
2507     log_info(scc)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2508   } else if (kind == DataKind::PlaLoader) {
2509     obj = SystemDictionary::java_platform_loader();
2510     log_info(scc)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2511   } else if (kind == DataKind::MH_Oop_Shared) {
2512     code_offset = read_position();
2513     int k = *(int*)addr(code_offset);
2514     code_offset += sizeof(int);
2515     set_read_position(code_offset);
2516     obj = CDSAccess::get_archived_object(k);
2517   } else {
2518     set_lookup_failed();
2519     log_info(scc)("%d (L%d): Unknown oop's kind: %d",
2520                      compile_id(), comp_level(), (int)kind);
2521     return nullptr;
2522   }
2523   return JNIHandles::make_local(thread, obj);
2524 }
2525 
2526 bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) {
2527   uint code_offset = read_position();
2528   int oop_count = *(int*)addr(code_offset);
2529   code_offset += sizeof(int);
2530   set_read_position(code_offset);
2531   log_debug(scc)("======== read oops [%d]:", oop_count);
2532   if (oop_count == 0) {
2533     return true;
2534   }
2535   {
2536     VM_ENTRY_MARK;
2537     methodHandle comp_method(THREAD, target->get_Method());
2538     for (int i = 1; i < oop_count; i++) {
2539       jobject jo = read_oop(THREAD, comp_method);
2540       if (lookup_failed()) {
2541         return false;
2542       }
2543       if (oop_recorder->is_real(jo)) {
2544         oop_recorder->find_index(jo);
2545       } else {
2546         oop_recorder->allocate_oop_index(jo);
2547       }
2548       LogStreamHandle(Debug, scc, oops) log;
2549       if (log.is_enabled()) {
2550         log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2551         if (jo == (jobject)Universe::non_oop_word()) {
2552           log.print("non-oop word");
2553         } else if (jo == nullptr) {
2554           log.print("nullptr-oop");
2555         } else {
2556           JNIHandles::resolve(jo)->print_value_on(&log);
2557         }
2558         log.cr();
2559       }
2560     }
2561   }
2562   return true;
2563 }
2564 
2565 Metadata* SCCReader::read_metadata(const methodHandle& comp_method) {
2566   uint code_offset = read_position();
2567   Metadata* m = nullptr;
2568   DataKind kind = *(DataKind*)addr(code_offset);
2569   code_offset += sizeof(DataKind);
2570   set_read_position(code_offset);
2571   if (kind == DataKind::Null) {
2572     m = (Metadata*)nullptr;
2573   } else if (kind == DataKind::No_Data) {
2574     m = (Metadata*)Universe::non_oop_word();
2575   } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2576     m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2577   } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2578     m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2579   } else if (kind == DataKind::MethodCnts) {
2580     kind = *(DataKind*)addr(code_offset);
2581     bool shared = (kind == DataKind::Method_Shared);
2582     assert(kind == DataKind::Method || shared, "Sanity");
2583     code_offset += sizeof(DataKind);
2584     set_read_position(code_offset);
2585     m = (Metadata*)read_method(comp_method, shared);
2586     if (m != nullptr) {
2587       Method* method = (Method*)m;
2588       m = method->get_method_counters(Thread::current());
2589       if (m == nullptr) {
2590         set_lookup_failed();
2591         log_info(scc)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2592       } else {
2593         log_info(scc)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2594       }
2595     }
2596   } else {
2597     set_lookup_failed();
2598     log_info(scc)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2599   }
2600   return m;
2601 }
2602 
2603 bool SCCReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) {
2604   uint code_offset = read_position();
2605   int metadata_count = *(int*)addr(code_offset);
2606   code_offset += sizeof(int);
2607   set_read_position(code_offset);
2608 
2609   log_debug(scc)("======== read metadata [%d]:", metadata_count);
2610 
2611   if (metadata_count == 0) {
2612     return true;
2613   }
2614   {
2615     VM_ENTRY_MARK;
2616     methodHandle comp_method(THREAD, target->get_Method());
2617 
2618     for (int i = 1; i < metadata_count; i++) {
2619       Metadata* m = read_metadata(comp_method);
2620       if (lookup_failed()) {
2621         return false;
2622       }
2623       if (oop_recorder->is_real(m)) {
2624         oop_recorder->find_index(m);
2625       } else {
2626         oop_recorder->allocate_metadata_index(m);
2627       }
2628       LogTarget(Debug, scc, metadata) log;
2629       if (log.is_enabled()) {
2630         LogStream ls(log);
2631         ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2632         if (m == (Metadata*)Universe::non_oop_word()) {
2633           ls.print("non-metadata word");
2634         } else if (m == nullptr) {
2635           ls.print("nullptr-oop");
2636         } else {
2637           Metadata::print_value_on_maybe_null(&ls, m);
2638         }
2639         ls.cr();
2640       }
2641     }
2642   }
2643   return true;
2644 }
2645 
2646 bool SCCache::write_oop(jobject& jo) {
2647   DataKind kind;
2648   uint n = 0;
2649   oop obj = JNIHandles::resolve(jo);
2650   if (jo == nullptr) {
2651     kind = DataKind::Null;
2652     n = write_bytes(&kind, sizeof(int));
2653     if (n != sizeof(int)) {
2654       return false;
2655     }
2656   } else if (jo == (jobject)Universe::non_oop_word()) {
2657     kind = DataKind::No_Data;
2658     n = write_bytes(&kind, sizeof(int));
2659     if (n != sizeof(int)) {
2660       return false;
2661     }
2662   } else if (java_lang_Class::is_instance(obj)) {
2663     if (java_lang_Class::is_primitive(obj)) {
2664       int bt = (int)java_lang_Class::primitive_type(obj);
2665       kind = DataKind::Primitive;
2666       n = write_bytes(&kind, sizeof(int));
2667       if (n != sizeof(int)) {
2668         return false;
2669       }
2670       n = write_bytes(&bt, sizeof(int));
2671       if (n != sizeof(int)) {
2672         return false;
2673       }
2674       log_info(scc)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2675     } else {
2676       Klass* klass = java_lang_Class::as_Klass(obj);
2677       if (!write_klass(klass)) {
2678         return false;
2679       }
2680     }
2681   } else if (java_lang_String::is_instance(obj)) { // herere
2682     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2683     if (k >= 0) {
2684       kind = DataKind::String_Shared;
2685       n = write_bytes(&kind, sizeof(int));
2686       if (n != sizeof(int)) {
2687         return false;
2688       }
2689       n = write_bytes(&k, sizeof(int));
2690       if (n != sizeof(int)) {
2691         return false;
2692       }
2693       return true;
2694     }
2695     kind = DataKind::String;
2696     n = write_bytes(&kind, sizeof(int));
2697     if (n != sizeof(int)) {
2698       return false;
2699     }
2700     ResourceMark rm;
2701     size_t length_sz = 0;
2702     const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2703     int length = (int)length_sz; // FIXME -- cast
2704     length++; // write tailing '/0'
2705     n = write_bytes(&length, sizeof(int));
2706     if (n != sizeof(int)) {
2707       return false;
2708     }
2709     n = write_bytes(string, (uint)length);
2710     if (n != (uint)length) {
2711       return false;
2712     }
2713     log_info(scc)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
2714   } else if (java_lang_Module::is_instance(obj)) {
2715     fatal("Module object unimplemented");
2716   } else if (java_lang_ClassLoader::is_instance(obj)) {
2717     if (obj == SystemDictionary::java_system_loader()) {
2718       kind = DataKind::SysLoader;
2719       log_info(scc)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2720     } else if (obj == SystemDictionary::java_platform_loader()) {
2721       kind = DataKind::PlaLoader;
2722       log_info(scc)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2723     } else {
2724       fatal("ClassLoader object unimplemented");
2725       return false;
2726     }
2727     n = write_bytes(&kind, sizeof(int));
2728     if (n != sizeof(int)) {
2729       return false;
2730     }
2731   } else { // herere
2732     int k = CDSAccess::get_archived_object_permanent_index(obj);  // k >= 0 means obj is a "permanent heap object"
2733     if (k >= 0) {
2734       kind = DataKind::MH_Oop_Shared;
2735       n = write_bytes(&kind, sizeof(int));
2736       if (n != sizeof(int)) {
2737         return false;
2738       }
2739       n = write_bytes(&k, sizeof(int));
2740       if (n != sizeof(int)) {
2741         return false;
2742       }
2743       return true;
2744     }
2745     // Unhandled oop - bailout
2746     set_lookup_failed();
2747     log_info(scc, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
2748                               compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2749     return false;
2750   }
2751   return true;
2752 }
2753 
2754 bool SCCache::write_oops(OopRecorder* oop_recorder) {
2755   int oop_count = oop_recorder->oop_count();
2756   uint n = write_bytes(&oop_count, sizeof(int));
2757   if (n != sizeof(int)) {
2758     return false;
2759   }
2760   log_debug(scc)("======== write oops [%d]:", oop_count);
2761 
2762   for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr
2763     jobject jo = oop_recorder->oop_at(i);
2764     LogStreamHandle(Info, scc, oops) log;
2765     if (log.is_enabled()) {
2766       log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
2767       if (jo == (jobject)Universe::non_oop_word()) {
2768         log.print("non-oop word");
2769       } else if (jo == nullptr) {
2770         log.print("nullptr-oop");
2771       } else {
2772         JNIHandles::resolve(jo)->print_value_on(&log);
2773       }
2774       log.cr();
2775     }
2776     if (!write_oop(jo)) {
2777       return false;
2778     }
2779   }
2780   return true;
2781 }
2782 
2783 bool SCCache::write_metadata(Metadata* m) {
2784   uint n = 0;
2785   if (m == nullptr) {
2786     DataKind kind = DataKind::Null;
2787     n = write_bytes(&kind, sizeof(int));
2788     if (n != sizeof(int)) {
2789       return false;
2790     }
2791   } else if (m == (Metadata*)Universe::non_oop_word()) {
2792     DataKind kind = DataKind::No_Data;
2793     n = write_bytes(&kind, sizeof(int));
2794     if (n != sizeof(int)) {
2795       return false;
2796     }
2797   } else if (m->is_klass()) {
2798     if (!write_klass((Klass*)m)) {
2799       return false;
2800     }
2801   } else if (m->is_method()) {
2802     if (!write_method((Method*)m)) {
2803       return false;
2804     }
2805   } else if (m->is_methodCounters()) {
2806     DataKind kind = DataKind::MethodCnts;
2807     n = write_bytes(&kind, sizeof(int));
2808     if (n != sizeof(int)) {
2809       return false;
2810     }
2811     if (!write_method(((MethodCounters*)m)->method())) {
2812       return false;
2813     }
2814     log_info(scc)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2815   } else { // Not supported
2816     fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2817     return false;
2818   }
2819   return true;
2820 }
2821 
2822 bool SCCache::write_metadata(OopRecorder* oop_recorder) {
2823   int metadata_count = oop_recorder->metadata_count();
2824   uint n = write_bytes(&metadata_count, sizeof(int));
2825   if (n != sizeof(int)) {
2826     return false;
2827   }
2828 
2829   log_debug(scc)("======== write metadata [%d]:", metadata_count);
2830 
2831   for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr
2832     Metadata* m = oop_recorder->metadata_at(i);
2833     LogStreamHandle(Debug, scc, metadata) log;
2834     if (log.is_enabled()) {
2835       log.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2836       if (m == (Metadata*)Universe::non_oop_word()) {
2837         log.print("non-metadata word");
2838       } else if (m == nullptr) {
2839         log.print("nullptr-oop");
2840       } else {
2841         Metadata::print_value_on_maybe_null(&log, m);
2842       }
2843       log.cr();
2844     }
2845     if (!write_metadata(m)) {
2846       return false;
2847     }
2848   }
2849   return true;
2850 }
2851 
2852 bool SCCReader::read_dependencies(Dependencies* dependencies) {
2853   uint code_offset = read_position();
2854   int dependencies_size = *(int*)addr(code_offset);
2855 
2856   log_debug(scc)("======== read dependencies [%d]:", dependencies_size);
2857 
2858   code_offset += sizeof(int);
2859   code_offset = align_up(code_offset, DATA_ALIGNMENT);
2860   if (dependencies_size > 0) {
2861     dependencies->set_content((u_char*)addr(code_offset), dependencies_size);
2862   }
2863   code_offset += dependencies_size;
2864   set_read_position(code_offset);
2865   return true;
2866 }
2867 
2868 bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
2869   TraceTime t1("SC total load time", &_t_totalLoad, enable_timers(), false);
2870   CompileTask* task = env->task();
2871   SCCEntry* entry = task->scc_entry();
2872   bool preload = task->preload();
2873   assert(entry != nullptr, "sanity");
2874   SCCache* cache = open_for_read();
2875   if (cache == nullptr) {
2876     return false;
2877   }
2878   if (log_is_enabled(Info, scc, nmethod)) {
2879     uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count();
2880     VM_ENTRY_MARK;
2881     ResourceMark rm;
2882     methodHandle method(THREAD, target->get_Method());
2883     const char* target_name = method->name_and_sig_as_C_string();
2884     uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
2885     bool clinit_brs = entry->has_clinit_barriers();
2886     log_info(scc, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
2887                            task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
2888                            target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : ""),
2889                            (entry->ignore_decompile() ? ", ignore_decomp" : ""));
2890   }
2891   ReadingMark rdmk;
2892   if (rdmk.failed()) {
2893     // Cache is closed, cannot touch anything.
2894     return false;
2895   }
2896 
2897   SCCReader reader(cache, entry, task);
2898   bool success = reader.compile(env, target, entry_bci, compiler);
2899   if (success) {
2900     task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
2901   } else {
2902     entry->set_load_fail();
2903   }
2904   return success;
2905 }
2906 
2907 SCCReader::SCCReader(SCCache* cache, SCCEntry* entry, CompileTask* task) {
2908   _cache = cache;
2909   _entry   = entry;
2910   _load_buffer = cache->cache_buffer();
2911   _read_position = 0;
2912   if (task != nullptr) {
2913     _compile_id = task->compile_id();
2914     _comp_level = task->comp_level();
2915     _preload    = task->preload();
2916   } else {
2917     _compile_id = 0;
2918     _comp_level = 0;
2919     _preload    = false;
2920   }
2921   _lookup_failed = false;
2922 }
2923 
2924 bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler) {
2925   uint entry_position = _entry->offset();
2926   uint code_offset = entry_position + _entry->code_offset();
2927   set_read_position(code_offset);
2928 
2929   // Read flags
2930   int flags = *(int*)addr(code_offset);
2931   code_offset += sizeof(int);
2932   bool has_monitors      = (flags & 0x1) != 0;
2933   bool has_wide_vectors  = (flags & 0x2) != 0;
2934   bool has_unsafe_access = (flags & 0x4) != 0;
2935   bool has_scoped_access = (flags & 0x8) != 0;
2936 
2937   int orig_pc_offset = *(int*)addr(code_offset);
2938   code_offset += sizeof(int);
2939   int frame_size = *(int*)addr(code_offset);
2940   code_offset += sizeof(int);
2941 
2942   // Read offsets
2943   CodeOffsets* offsets = (CodeOffsets*)addr(code_offset);
2944   code_offset += sizeof(CodeOffsets);
2945 
2946   // Create Debug Information Recorder to record scopes, oopmaps, etc.
2947   OopRecorder* oop_recorder = new OopRecorder(env->arena());
2948   env->set_oop_recorder(oop_recorder);
2949 
2950   set_read_position(code_offset);
2951 
2952   // Write OopRecorder data
2953   if (!read_oops(oop_recorder, target)) {
2954     return false;
2955   }
2956   if (!read_metadata(oop_recorder, target)) {
2957     return false;
2958   }
2959 
2960   // Read Debug info
2961   DebugInformationRecorder* recorder = read_debug_info(oop_recorder);
2962   if (recorder == nullptr) {
2963     return false;
2964   }
2965   env->set_debug_info(recorder);
2966 
2967   // Read Dependencies (compressed already)
2968   Dependencies* dependencies = new Dependencies(env);
2969   if (!read_dependencies(dependencies)) {
2970     return false;
2971   }
2972   env->set_dependencies(dependencies);
2973 
2974   // Read oop maps
2975   OopMapSet* oop_maps = read_oop_maps();
2976   if (oop_maps == nullptr) {
2977     return false;
2978   }
2979 
2980   // Read exception handles
2981   code_offset = read_position();
2982   int exc_table_length = *(int*)addr(code_offset);
2983   code_offset += sizeof(int);
2984   ExceptionHandlerTable handler_table(MAX2(exc_table_length, 4));
2985   if (exc_table_length > 0) {
2986     handler_table.set_length(exc_table_length);
2987     uint exc_table_size = handler_table.size_in_bytes();
2988     copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size);
2989     code_offset += exc_table_size;
2990   }
2991 
2992   // Read null check table
2993   int nul_chk_length = *(int*)addr(code_offset);
2994   code_offset += sizeof(int);
2995   ImplicitExceptionTable nul_chk_table;
2996   if (nul_chk_length > 0) {
2997     nul_chk_table.set_size(nul_chk_length);
2998     nul_chk_table.set_len(nul_chk_length);
2999     uint nul_chk_size = nul_chk_table.size_in_bytes();
3000     copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry));
3001     code_offset += nul_chk_size;
3002   }
3003 
3004   uint reloc_size = _entry->reloc_size();
3005   CodeBuffer buffer("Compile::Fill_buffer", _entry->code_size(), reloc_size);
3006   buffer.initialize_oop_recorder(oop_recorder);
3007 
3008   const char* name = addr(entry_position + _entry->name_offset());
3009 
3010   // Create fake original CodeBuffer
3011   CodeBuffer orig_buffer(name);
3012 
3013   // Read code
3014   if (!read_code(&buffer, &orig_buffer, align_up(code_offset, DATA_ALIGNMENT))) {
3015     return false;
3016   }
3017 
3018   // Read relocations
3019   uint reloc_offset = entry_position + _entry->reloc_offset();
3020   set_read_position(reloc_offset);
3021   if (!read_relocations(&buffer, &orig_buffer, oop_recorder, target)) {
3022     return false;
3023   }
3024 
3025   log_info(scc, nmethod)("%d (L%d): Read nmethod '%s' from Startup Code Cache '%s'", compile_id(), comp_level(), name, _cache->cache_path());
3026 #ifdef ASSERT
3027   LogStreamHandle(Debug, scc, nmethod) log;
3028   if (log.is_enabled()) {
3029     FlagSetting fs(PrintRelocations, true);
3030     buffer.print_on(&log);
3031     buffer.decode();
3032   }
3033 #endif
3034 
3035   if (VerifyCachedCode) {
3036     return false;
3037   }
3038 
3039   // Register nmethod
3040   TraceTime t1("SC total nmethod register time", &_t_totalRegister, enable_timers(), false);
3041   env->register_method(target, entry_bci,
3042                        offsets, orig_pc_offset,
3043                        &buffer, frame_size,
3044                        oop_maps, &handler_table,
3045                        &nul_chk_table, compiler,
3046                        _entry->has_clinit_barriers(),
3047                        false,
3048                        has_unsafe_access,
3049                        has_wide_vectors,
3050                        has_monitors,
3051                        has_scoped_access,
3052                        0, true /* install_code */,
3053                        (SCCEntry *)_entry);
3054   CompileTask* task = env->task();
3055   bool success = task->is_success();
3056   if (success) {
3057     ((SCCEntry *)_entry)->set_loaded();
3058   }
3059   return success;
3060 }
3061 
3062 // No concurency for writing to cache file because this method is called from
3063 // ciEnv::register_method() under MethodCompileQueue_lock and Compile_lock locks.
3064 SCCEntry* SCCache::store_nmethod(const methodHandle& method,
3065                      int comp_id,
3066                      int entry_bci,
3067                      CodeOffsets* offsets,
3068                      int orig_pc_offset,
3069                      DebugInformationRecorder* recorder,
3070                      Dependencies* dependencies,
3071                      CodeBuffer* buffer,
3072                      int frame_size,
3073                      OopMapSet* oop_maps,
3074                      ExceptionHandlerTable* handler_table,
3075                      ImplicitExceptionTable* nul_chk_table,
3076                      AbstractCompiler* compiler,
3077                      CompLevel comp_level,
3078                      bool has_clinit_barriers,
3079                      bool for_preload,
3080                      bool has_unsafe_access,
3081                      bool has_wide_vectors,
3082                      bool has_monitors,
3083                      bool has_scoped_access) {
3084   if (!CDSConfig::is_dumping_cached_code()) {
3085     return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
3086   }
3087   if (entry_bci != InvocationEntryBci) {
3088     return nullptr; // No OSR
3089   }
3090   if (compiler->is_c1() && (comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile)) {
3091     // Cache tier1 compilations
3092   } else if (!compiler->is_c2()) {
3093     return nullptr; // Only C2 now
3094   }
3095   TraceTime t1("SC total store time", &_t_totalStore, enable_timers(), false);
3096   SCCache* cache = open_for_write();
3097   if (cache == nullptr) {
3098     return nullptr; // Cache file is closed
3099   }
3100   SCCEntry* entry = cache->write_nmethod(method, comp_id, entry_bci, offsets, orig_pc_offset, recorder, dependencies, buffer,
3101                                   frame_size, oop_maps, handler_table, nul_chk_table, compiler, comp_level,
3102                                   has_clinit_barriers, for_preload, has_unsafe_access, has_wide_vectors, has_monitors, has_scoped_access);
3103   if (entry == nullptr) {
3104     log_info(scc, nmethod)("%d (L%d): nmethod store attempt failed", comp_id, (int)comp_level);
3105   }
3106   return entry;
3107 }
3108 
3109 SCCEntry* SCCache::write_nmethod(const methodHandle& method,
3110                                  int comp_id,
3111                                  int entry_bci,
3112                                  CodeOffsets* offsets,
3113                                  int orig_pc_offset,
3114                                  DebugInformationRecorder* recorder,
3115                                  Dependencies* dependencies,
3116                                  CodeBuffer* buffer,
3117                                  int frame_size,
3118                                  OopMapSet* oop_maps,
3119                                  ExceptionHandlerTable* handler_table,
3120                                  ImplicitExceptionTable* nul_chk_table,
3121                                  AbstractCompiler* compiler,
3122                                  CompLevel comp_level,
3123                                  bool has_clinit_barriers,
3124                                  bool for_preload,
3125                                  bool has_unsafe_access,
3126                                  bool has_wide_vectors,
3127                                  bool has_monitors,
3128                                  bool has_scoped_access) {
3129 //  if (method->is_hidden()) {
3130 //    ResourceMark rm;
3131 //    log_info(scc, nmethod)("%d (L%d): Skip hidden method '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3132 //    return nullptr;
3133 //  }
3134   if (buffer->before_expand() != nullptr) {
3135     ResourceMark rm;
3136     log_info(scc, nmethod)("%d (L%d): Skip nmethod with expanded buffer '%s'", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3137     return nullptr;
3138   }
3139 #ifdef ASSERT
3140   LogStreamHandle(Debug, scc, nmethod) log;
3141   if (log.is_enabled()) {
3142     tty->print_cr(" == store_nmethod");
3143     FlagSetting fs(PrintRelocations, true);
3144     buffer->print_on(&log);
3145     buffer->decode();
3146   }
3147 #endif
3148   assert(!has_clinit_barriers || _gen_preload_code, "sanity");
3149   Method* m = method();
3150   bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)m); // herere
3151   InstanceKlass* holder = m->method_holder();
3152   bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class();
3153   bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
3154   if (!builtin_loader) {
3155     ResourceMark rm;
3156     log_info(scc, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
3157     return nullptr;
3158   }
3159   if (for_preload && !(method_in_cds && klass_in_cds)) {
3160     ResourceMark rm;
3161     log_info(scc, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
3162     return nullptr;
3163   }
3164   assert(!for_preload || method_in_cds, "sanity");
3165   _for_preload = for_preload;
3166   _has_clinit_barriers = has_clinit_barriers;
3167 
3168   if (!align_write()) {
3169     return nullptr;
3170   }
3171   _compile_id = comp_id;
3172   _comp_level = (int)comp_level;
3173 
3174   uint entry_position = _write_position;
3175 
3176   uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count();
3177 
3178   // Is this one-step workflow assembly phase?
3179   // In this phase compilation is done based on saved profiling data
3180   // without application run. Ignore decompilation counters in such case.
3181   // Also ignore it for C1 code because it is decompiled unconditionally
3182   // when C2 generated code is published.
3183   bool ignore_decompile = (comp_level == CompLevel_limited_profile) ||
3184                           CDSConfig::is_dumping_final_static_archive();
3185 
3186   // Write name
3187   uint name_offset = 0;
3188   uint name_size   = 0;
3189   uint hash = 0;
3190   uint n;
3191   {
3192     ResourceMark rm;
3193     const char* name   = method->name_and_sig_as_C_string();
3194     log_info(scc, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to Startup Code Cache '%s'",
3195                            comp_id, (int)comp_level, name, comp_level, decomp,
3196                            (ignore_decompile ? ", ignore_decomp" : ""),
3197                            (has_clinit_barriers ? ", has clinit barriers" : ""), _cache_path);
3198 
3199     LogStreamHandle(Info, scc, loader) log;
3200     if (log.is_enabled()) {
3201       oop loader = holder->class_loader();
3202       oop domain = holder->protection_domain();
3203       log.print("Holder: ");
3204       holder->print_value_on(&log);
3205       log.print(" loader: ");
3206       if (loader == nullptr) {
3207         log.print("nullptr");
3208       } else {
3209         loader->print_value_on(&log);
3210       }
3211       log.print(" domain: ");
3212       if (domain == nullptr) {
3213         log.print("nullptr");
3214       } else {
3215         domain->print_value_on(&log);
3216       }
3217       log.cr();
3218     }
3219     name_offset = _write_position  - entry_position;
3220     name_size   = (uint)strlen(name) + 1; // Includes '/0'
3221     n = write_bytes(name, name_size);
3222     if (n != name_size) {
3223       return nullptr;
3224     }
3225     hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
3226   }
3227 
3228   if (!align_write()) {
3229     return nullptr;
3230   }
3231 
3232   uint code_offset = _write_position - entry_position;
3233 
3234   int flags = (has_scoped_access ? 0x8 : 0) |
3235               (has_unsafe_access ? 0x4 : 0) |
3236               (has_wide_vectors  ? 0x2 : 0) |
3237               (has_monitors      ? 0x1 : 0);
3238   n = write_bytes(&flags, sizeof(int));
3239   if (n != sizeof(int)) {
3240     return nullptr;
3241   }
3242 
3243   n = write_bytes(&orig_pc_offset, sizeof(int));
3244   if (n != sizeof(int)) {
3245     return nullptr;
3246   }
3247 
3248   n = write_bytes(&frame_size, sizeof(int));
3249   if (n != sizeof(int)) {
3250     return nullptr;
3251   }
3252 
3253   // Write offsets
3254   n = write_bytes(offsets, sizeof(CodeOffsets));
3255   if (n != sizeof(CodeOffsets)) {
3256     return nullptr;
3257   }
3258 
3259   // Write OopRecorder data
3260   if (!write_oops(buffer->oop_recorder())) {
3261     if (lookup_failed() && !failed()) {
3262       // Skip this method and reposition file
3263       set_write_position(entry_position);
3264     }
3265     return nullptr;
3266   }
3267   if (!write_metadata(buffer->oop_recorder())) {
3268     if (lookup_failed() && !failed()) {
3269       // Skip this method and reposition file
3270       set_write_position(entry_position);
3271     }
3272     return nullptr;
3273   }
3274 
3275   // Write Debug info
3276   if (!write_debug_info(recorder)) {
3277     return nullptr;
3278   }
3279   // Write Dependencies
3280   int dependencies_size = (int)dependencies->size_in_bytes();
3281   n = write_bytes(&dependencies_size, sizeof(int));
3282   if (n != sizeof(int)) {
3283     return nullptr;
3284   }
3285   if (!align_write()) {
3286     return nullptr;
3287   }
3288   n = write_bytes(dependencies->content_bytes(), dependencies_size);
3289   if (n != (uint)dependencies_size) {
3290     return nullptr;
3291   }
3292 
3293   // Write oop maps
3294   if (!write_oop_maps(oop_maps)) {
3295     return nullptr;
3296   }
3297 
3298   // Write exception handles
3299   int exc_table_length = handler_table->length();
3300   n = write_bytes(&exc_table_length, sizeof(int));
3301   if (n != sizeof(int)) {
3302     return nullptr;
3303   }
3304   uint exc_table_size = handler_table->size_in_bytes();
3305   n = write_bytes(handler_table->table(), exc_table_size);
3306   if (n != exc_table_size) {
3307     return nullptr;
3308   }
3309 
3310   // Write null check table
3311   int nul_chk_length = nul_chk_table->len();
3312   n = write_bytes(&nul_chk_length, sizeof(int));
3313   if (n != sizeof(int)) {
3314     return nullptr;
3315   }
3316   uint nul_chk_size = nul_chk_table->size_in_bytes();
3317   n = write_bytes(nul_chk_table->data(), nul_chk_size);
3318   if (n != nul_chk_size) {
3319     return nullptr;
3320   }
3321 
3322   // Write code section
3323   if (!align_write()) {
3324     return nullptr;
3325   }
3326   uint code_size = 0;
3327   if (!write_code(buffer, code_size)) {
3328     return nullptr;
3329   }
3330   // Write relocInfo array
3331   uint reloc_offset = _write_position - entry_position;
3332   uint reloc_size = 0;
3333   if (!write_relocations(buffer, reloc_size)) {
3334     if (lookup_failed() && !failed()) {
3335       // Skip this method and reposition file
3336       set_write_position(entry_position);
3337     }
3338     return nullptr;
3339   }
3340   uint entry_size = _write_position - entry_position;
3341 
3342   SCCEntry* entry = new (this) SCCEntry(entry_position, entry_size, name_offset, name_size,
3343                                         code_offset, code_size, reloc_offset, reloc_size,
3344                                         SCCEntry::Code, hash, (uint)comp_level, (uint)comp_id, decomp,
3345                                         has_clinit_barriers, _for_preload, ignore_decompile);
3346   if (method_in_cds) {
3347     entry->set_method(m);
3348   }
3349 #ifdef ASSERT
3350   if (has_clinit_barriers || _for_preload) {
3351     assert(for_preload, "sanity");
3352     assert(entry->method() != nullptr, "sanity");
3353   }
3354 #endif
3355   {
3356     ResourceMark rm;
3357     const char* name   = method->name_and_sig_as_C_string();
3358     log_info(scc, nmethod)("%d (L%d): Wrote nmethod '%s'%s to Startup Code Cache '%s'",
3359                            comp_id, (int)comp_level, name, (_for_preload ? " (for preload)" : ""), _cache_path);
3360   }
3361   if (VerifyCachedCode) {
3362     return nullptr;
3363   }
3364   return entry;
3365 }
3366 
3367 static void print_helper1(outputStream* st, const char* name, int count) {
3368   if (count > 0) {
3369     st->print(" %s=%d", name, count);
3370   }
3371 }
3372 static void print_helper(outputStream* st, const char* name, int stats[6+3][6], int idx) {
3373   int total = stats[idx][0];
3374   if (total > 0) {
3375     st->print("  %s:", name);
3376     print_helper1(st, "total",               stats[idx][0]);
3377     //print_helper1(st, "for_preload",         stats[idx][2]); // implied by Tier5
3378     print_helper1(st, "loaded",              stats[idx][3]);
3379     print_helper1(st, "invalidated",         stats[idx][4]);
3380     print_helper1(st, "failed",              stats[idx][5]);
3381     print_helper1(st, "has_clinit_barriers", stats[idx][1]);
3382     st->cr();
3383   }
3384 }
3385 
3386 void SCCache::print_statistics_on(outputStream* st) {
3387   SCCache* cache = open_for_read();
3388   if (cache != nullptr) {
3389     ReadingMark rdmk;
3390     if (rdmk.failed()) {
3391       // Cache is closed, cannot touch anything.
3392       return;
3393     }
3394 
3395     uint count = cache->_load_header->entries_count();
3396     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3397     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3398 
3399     int stats[6 + 3][6] = {0};
3400     for (uint i = 0; i < count; i++) {
3401       int index = search_entries[2*i + 1];
3402       SCCEntry* entry = &(load_entries[index]);
3403 
3404       int lvl = entry->kind();
3405       if (entry->kind() == SCCEntry::Code) {
3406         lvl += entry->comp_level() + (entry->for_preload() ? 1 : 0);
3407       }
3408       ++stats[lvl][0]; // total
3409       if (entry->has_clinit_barriers()) {
3410         ++stats[lvl][1];
3411       }
3412       if (entry->for_preload()) {
3413         ++stats[lvl][2];
3414       }
3415       if (entry->is_loaded()) {
3416         ++stats[lvl][3];
3417       }
3418       if (entry->not_entrant()) {
3419         ++stats[lvl][4];
3420       }
3421       if (entry->load_fail()) {
3422         ++stats[lvl][5];
3423       }
3424     }
3425 
3426     print_helper(st, "None", stats, SCCEntry::None);
3427     print_helper(st, "Stub", stats, SCCEntry::Stub);
3428     print_helper(st, "Blob", stats, SCCEntry::Blob);
3429     for (int lvl = 0; lvl <= CompLevel_full_optimization + 1; lvl++) {
3430       ResourceMark rm;
3431       stringStream ss;
3432       ss.print("SC T%d", lvl);
3433       print_helper(st, ss.freeze(), stats, SCCEntry::Code + lvl);
3434     }
3435 
3436   } else {
3437     st->print_cr("failed to open SCA at %s", CachedCodeFile);
3438   }
3439 }
3440 
3441 void SCCache::print_on(outputStream* st) {
3442   SCCache* cache = open_for_read();
3443   if (cache != nullptr) {
3444     ReadingMark rdmk;
3445     if (rdmk.failed()) {
3446       // Cache is closed, cannot touch anything.
3447       return;
3448     }
3449 
3450     uint count = cache->_load_header->entries_count();
3451     uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3452     SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count);
3453 
3454     for (uint i = 0; i < count; i++) {
3455       int index = search_entries[2*i + 1];
3456       SCCEntry* entry = &(load_entries[index]);
3457 
3458       st->print_cr("%4u: %4u: K%u L%u offset=%u decompile=%u size=%u code_size=%u%s%s%s%s",
3459                 i, index, entry->kind(), entry->comp_level(), entry->offset(),
3460                 entry->decompile(), entry->size(), entry->code_size(),
3461                 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
3462                 entry->for_preload()         ? " for_preload"         : "",
3463                 entry->is_loaded()           ? " loaded"              : "",
3464                 entry->not_entrant()         ? " not_entrant"         : "");
3465       st->print_raw("         ");
3466       SCCReader reader(cache, entry, nullptr);
3467       reader.print_on(st);
3468     }
3469   } else {
3470     st->print_cr("failed to open SCA at %s", CachedCodeFile);
3471   }
3472 }
3473 
3474 void SCCache::print_unused_entries_on(outputStream* st) {
3475   LogStreamHandle(Info, scc, init) info;
3476   if (info.is_enabled()) {
3477     SCCache::iterate([&](SCCEntry* entry) {
3478       if (!entry->is_loaded()) {
3479         MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
3480         if (mtd != nullptr) {
3481           if (mtd->has_holder()) {
3482             if (mtd->holder()->method_holder()->is_initialized()) {
3483               ResourceMark rm;
3484               mtd->iterate_all_compiles([&](CompileTrainingData* ctd) {
3485                 if ((uint)ctd->level() == entry->comp_level()) {
3486                   if (ctd->init_deps_left() == 0) {
3487                     nmethod* nm = mtd->holder()->code();
3488                     if (nm == nullptr) {
3489                       if (mtd->holder()->queued_for_compilation()) {
3490                         return; // scheduled for compilation
3491                       }
3492                     } else if ((uint)nm->comp_level() >= entry->comp_level()) {
3493                       return; // already online compiled and superseded by a more optimal method
3494                     }
3495                     info.print("SCC entry not loaded: ");
3496                     ctd->print_on(&info);
3497                     info.cr();
3498                   }
3499                 }
3500               });
3501             } else {
3502               // not yet initialized
3503             }
3504           } else {
3505             info.print("SCC entry doesn't have a holder: ");
3506             mtd->print_on(&info);
3507             info.cr();
3508           }
3509         }
3510       }
3511     });
3512   }
3513 }
3514 
3515 void SCCReader::print_on(outputStream* st) {
3516   uint entry_position = _entry->offset();
3517   set_read_position(entry_position);
3518 
3519   // Read name
3520   uint name_offset = entry_position + _entry->name_offset();
3521   uint name_size = _entry->name_size(); // Includes '/0'
3522   const char* name = addr(name_offset);
3523 
3524   st->print_cr("  name: %s", name);
3525 }
3526 
3527 #define _extrs_max 80
3528 #define _stubs_max 120
3529 #define _blobs_max 100
3530 #define _shared_blobs_max 24
3531 #define _C2_blobs_max 25
3532 #define _C1_blobs_max (_blobs_max - _shared_blobs_max - _C2_blobs_max)
3533 #define _all_max 300
3534 
3535 #define SET_ADDRESS(type, addr)                           \
3536   {                                                       \
3537     type##_addr[type##_length++] = (address) (addr);      \
3538     assert(type##_length <= type##_max, "increase size"); \
3539   }
3540 
3541 static bool initializing = false;
3542 void SCAddressTable::init() {
3543   if (_complete || initializing) return; // Done already
3544   initializing = true;
3545   _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3546   _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3547   _blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3548 
3549   // Divide _blobs_addr array to chunks because they could be initialized in parrallel
3550   _C2_blobs_addr = _blobs_addr + _shared_blobs_max;// C2 blobs addresses stored after shared blobs
3551   _C1_blobs_addr = _C2_blobs_addr + _C2_blobs_max; // C1 blobs addresses stored after C2 blobs
3552 
3553   _extrs_length = 0;
3554   _stubs_length = 0;
3555   _blobs_length = 0;       // for shared blobs
3556   _C1_blobs_length = 0;
3557   _C2_blobs_length = 0;
3558   _final_blobs_length = 0; // Depends on numnber of C1 blobs
3559 
3560   // Runtime methods
3561 #ifdef COMPILER2
3562   SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3563 #endif
3564 #ifdef COMPILER1
3565   SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3566   SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3567 #endif
3568 
3569   SET_ADDRESS(_extrs, CompressedOops::base_addr());
3570 #if INCLUDE_G1GC
3571   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3572   SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3573 #endif
3574 
3575 #if INCLUDE_SHENANDOAHGC
3576   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3577   SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3578   SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3579   SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3580   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3581   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3582   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3583   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3584   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3585   SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3586 #endif
3587 
3588   SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3589   SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3590 #if defined(AMD64) && !defined(ZERO)
3591   SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3592   SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3593 #endif // AMD64
3594   SET_ADDRESS(_extrs, SharedRuntime::d2f);
3595   SET_ADDRESS(_extrs, SharedRuntime::d2i);
3596   SET_ADDRESS(_extrs, SharedRuntime::d2l);
3597   SET_ADDRESS(_extrs, SharedRuntime::dcos);
3598   SET_ADDRESS(_extrs, SharedRuntime::dexp);
3599   SET_ADDRESS(_extrs, SharedRuntime::dlog);
3600   SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3601   SET_ADDRESS(_extrs, SharedRuntime::dpow);
3602   SET_ADDRESS(_extrs, SharedRuntime::dsin);
3603   SET_ADDRESS(_extrs, SharedRuntime::dtan);
3604   SET_ADDRESS(_extrs, SharedRuntime::f2i);
3605   SET_ADDRESS(_extrs, SharedRuntime::f2l);
3606 #ifndef ZERO
3607   SET_ADDRESS(_extrs, SharedRuntime::drem);
3608   SET_ADDRESS(_extrs, SharedRuntime::frem);
3609 #endif
3610   SET_ADDRESS(_extrs, SharedRuntime::l2d);
3611   SET_ADDRESS(_extrs, SharedRuntime::l2f);
3612   SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3613   SET_ADDRESS(_extrs, SharedRuntime::lmul);
3614   SET_ADDRESS(_extrs, SharedRuntime::lrem);
3615 #if INCLUDE_JVMTI
3616   SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3617 #endif /* INCLUDE_JVMTI */
3618   BarrierSet* bs = BarrierSet::barrier_set();
3619   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3620     SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3621   }
3622   SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3623   SET_ADDRESS(_extrs, Thread::current);
3624 
3625   SET_ADDRESS(_extrs, os::javaTimeMillis);
3626   SET_ADDRESS(_extrs, os::javaTimeNanos);
3627 
3628 #if INCLUDE_JVMTI
3629   SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3630 #endif /* INCLUDE_JVMTI */
3631   SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3632 #ifndef PRODUCT
3633   SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3634   SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3635 #endif
3636 
3637 #ifndef ZERO
3638 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3639   SET_ADDRESS(_extrs, MacroAssembler::debug64);
3640 #endif
3641 #if defined(AMD64)
3642   SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3643 #endif
3644 #endif
3645 
3646 #ifdef COMPILER1
3647 #ifdef X86
3648   SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3649   SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3650   SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3651   SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3652 #endif
3653 #endif
3654 
3655   // addresses of fields in AOT runtime constants area
3656   address* p = AOTRuntimeConstants::field_addresses_list();
3657   while (*p != nullptr) {
3658     SET_ADDRESS(_extrs, *p++);
3659   }
3660   // Stubs
3661   SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3662   SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3663 /*
3664   SET_ADDRESS(_stubs, StubRoutines::throw_AbstractMethodError_entry());
3665   SET_ADDRESS(_stubs, StubRoutines::throw_IncompatibleClassChangeError_entry());
3666   SET_ADDRESS(_stubs, StubRoutines::throw_NullPointerException_at_call_entry());
3667   SET_ADDRESS(_stubs, StubRoutines::throw_StackOverflowError_entry());
3668   SET_ADDRESS(_stubs, StubRoutines::throw_delayed_StackOverflowError_entry());
3669 */
3670   SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3671   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3672   SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3673   SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3674   SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3675 
3676   SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3677   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3678   SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3679 
3680   JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3681 
3682 
3683   SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3684   SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3685   SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3686   SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3687   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3688   SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3689 
3690   SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3691   SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3692   SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3693   SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3694   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3695   SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3696 
3697   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3698   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3699   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3700   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3701   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3702   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3703 
3704   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3705   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3706   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3707   SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3708   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3709   SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3710 
3711   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3712   SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3713 
3714   SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3715   SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3716 
3717   SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3718   SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3719   SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3720   SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3721   SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3722   SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3723 
3724   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3725   SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3726 
3727   SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3728   SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3729   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3730   SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3731   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3732   SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3733   SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3734   SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3735   SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3736   SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3737   SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3738   SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3739   SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3740   SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3741   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3742   SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3743   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3744   SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3745   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3746   SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3747   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3748   SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3749 
3750   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3751 
3752   SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3753   SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3754   SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3755 
3756   SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3757   SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3758   SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3759   SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3760   SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3761   SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3762   SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3763   SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3764 
3765   SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3766 
3767   SET_ADDRESS(_stubs, StubRoutines::dexp());
3768   SET_ADDRESS(_stubs, StubRoutines::dlog());
3769   SET_ADDRESS(_stubs, StubRoutines::dlog10());
3770   SET_ADDRESS(_stubs, StubRoutines::dpow());
3771   SET_ADDRESS(_stubs, StubRoutines::dsin());
3772   SET_ADDRESS(_stubs, StubRoutines::dcos());
3773   SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3774   SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3775   SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3776   SET_ADDRESS(_stubs, StubRoutines::dtan());
3777 
3778   SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3779   SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3780 
3781 #if defined(AMD64) && !defined(ZERO)
3782   SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3783   SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3784   SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3785   SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3786   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3787   SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3788   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3789   SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3790   SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3791   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3792   SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3793   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3794   SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3795   // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3796   // See C2_MacroAssembler::load_iota_indices().
3797   for (int i = 0; i < 6; i++) {
3798     SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3799   }
3800 #endif
3801 #if defined(AARCH64) && !defined(ZERO)
3802   SET_ADDRESS(_stubs, StubRoutines::aarch64::d2i_fixup());
3803   SET_ADDRESS(_stubs, StubRoutines::aarch64::f2i_fixup());
3804   SET_ADDRESS(_stubs, StubRoutines::aarch64::d2l_fixup());
3805   SET_ADDRESS(_stubs, StubRoutines::aarch64::f2l_fixup());
3806   SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_mask());
3807   SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_flip());
3808   SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_mask());
3809   SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_flip());
3810   SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3811   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3812   SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3813   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3814   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3815   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3816   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3817   SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3818   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3819   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3820   SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3821   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3822   SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3823 
3824   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3825   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3826   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3827   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3828   SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3829 #endif
3830 
3831   // Blobs
3832   SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub());
3833   SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub());
3834   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3835   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3836   SET_ADDRESS(_blobs, SharedRuntime::get_resolve_static_call_stub());
3837   SET_ADDRESS(_blobs, SharedRuntime::deopt_blob()->entry_point());
3838   SET_ADDRESS(_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3839   SET_ADDRESS(_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3840 #ifdef COMPILER2
3841   SET_ADDRESS(_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3842 #endif
3843 
3844   SET_ADDRESS(_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3845   SET_ADDRESS(_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3846   SET_ADDRESS(_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3847   SET_ADDRESS(_blobs, SharedRuntime::throw_StackOverflowError_entry());
3848   SET_ADDRESS(_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3849 
3850   assert(_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _blobs_length);
3851   _final_blobs_length = _blobs_length;
3852   _complete = true;
3853   log_info(scc,init)("External addresses and stubs recorded");
3854 }
3855 
3856 void SCAddressTable::init_opto() {
3857 #ifdef COMPILER2
3858   // OptoRuntime Blobs
3859   SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3860   SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3861   SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3862   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3863   SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3864   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3865   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3866   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3867   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3868   SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3869   SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3870   SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3871   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3872   SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3873   SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3874   SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3875   SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3876   SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3877 #if INCLUDE_JVMTI
3878   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3879   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3880   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3881   SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3882 #endif /* INCLUDE_JVMTI */
3883 #endif
3884 
3885   assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3886   _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_length));
3887   _opto_complete = true;
3888   log_info(scc,init)("OptoRuntime Blobs recorded");
3889 }
3890 
3891 void SCAddressTable::init_c1() {
3892 #ifdef COMPILER1
3893   // Runtime1 Blobs
3894   for (int i = 0; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3895     C1StubId id = (C1StubId)i;
3896     if (Runtime1::blob_for(id) == nullptr) {
3897       log_info(scc, init)("C1 blob %s is missing", Runtime1::name_for(id));
3898       continue;
3899     }
3900     if (Runtime1::entry_for(id) == nullptr) {
3901       log_info(scc, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3902       continue;
3903     }
3904     address entry = Runtime1::entry_for(id);
3905     SET_ADDRESS(_C1_blobs, entry);
3906   }
3907 #if INCLUDE_G1GC
3908   if (UseG1GC) {
3909     G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3910     address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3911     SET_ADDRESS(_C1_blobs, entry);
3912     entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3913     SET_ADDRESS(_C1_blobs, entry);
3914   }
3915 #endif // INCLUDE_G1GC
3916 #if INCLUDE_ZGC
3917   if (UseZGC) {
3918     ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3919     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3920     SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3921     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3922     SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3923   }
3924 #endif // INCLUDE_ZGC
3925 #if INCLUDE_SHENANDOAHGC
3926   if (UseShenandoahGC) {
3927     ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3928     SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3929     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3930     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3931     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3932     SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3933   }
3934 #endif // INCLUDE_SHENANDOAHGC
3935 #endif // COMPILER1
3936 
3937   assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3938   _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_max + _C1_blobs_length));
3939   _c1_complete = true;
3940   log_info(scc,init)("Runtime1 Blobs recorded");
3941 }
3942 
3943 #undef SET_ADDRESS
3944 #undef _extrs_max
3945 #undef _stubs_max
3946 #undef _blobs_max
3947 #undef _shared_blobs_max
3948 #undef _C1_blobs_max
3949 #undef _C2_blobs_max
3950 
3951 SCAddressTable::~SCAddressTable() {
3952   if (_extrs_addr != nullptr) {
3953     FREE_C_HEAP_ARRAY(address, _extrs_addr);
3954   }
3955   if (_stubs_addr != nullptr) {
3956     FREE_C_HEAP_ARRAY(address, _stubs_addr);
3957   }
3958   if (_blobs_addr != nullptr) {
3959     FREE_C_HEAP_ARRAY(address, _blobs_addr);
3960   }
3961 }
3962 
3963 #define MAX_STR_COUNT 200
3964 static const char* _C_strings[MAX_STR_COUNT] = {nullptr};
3965 static int _C_strings_count = 0;
3966 static int _C_strings_s[MAX_STR_COUNT] = {0};
3967 static int _C_strings_id[MAX_STR_COUNT] = {0};
3968 static int _C_strings_len[MAX_STR_COUNT] = {0};
3969 static int _C_strings_hash[MAX_STR_COUNT] = {0};
3970 static int _C_strings_used = 0;
3971 
3972 void SCCache::load_strings() {
3973   uint strings_count  = _load_header->strings_count();
3974   if (strings_count == 0) {
3975     return;
3976   }
3977   uint strings_offset = _load_header->strings_offset();
3978   uint strings_size   = _load_header->entries_offset() - strings_offset;
3979   uint data_size = (uint)(strings_count * sizeof(uint));
3980   uint* sizes = (uint*)addr(strings_offset);
3981   uint* hashs = (uint*)addr(strings_offset + data_size);
3982   strings_size -= 2 * data_size;
3983   // We have to keep cached strings longer than _cache buffer
3984   // because they are refernced from compiled code which may
3985   // still be executed on VM exit after _cache is freed.
3986   char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3987   memcpy(p, addr(strings_offset + 2 * data_size), strings_size);
3988   _C_strings_buf = p;
3989   assert(strings_count <= MAX_STR_COUNT, "sanity");
3990   for (uint i = 0; i < strings_count; i++) {
3991     _C_strings[i] = p;
3992     uint len = sizes[i];
3993     _C_strings_s[i] = i;
3994     _C_strings_id[i] = i;
3995     _C_strings_len[i] = len;
3996     _C_strings_hash[i] = hashs[i];
3997     p += len;
3998   }
3999   assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
4000   _C_strings_count = strings_count;
4001   _C_strings_used  = strings_count;
4002   log_info(scc, init)("Load %d C strings at offset %d from Startup Code Cache '%s'", _C_strings_count, strings_offset, _cache_path);
4003 }
4004 
4005 int SCCache::store_strings() {
4006   uint offset = _write_position;
4007   uint length = 0;
4008   if (_C_strings_used > 0) {
4009     // Write sizes first
4010     for (int i = 0; i < _C_strings_used; i++) {
4011       uint len = _C_strings_len[i] + 1; // Include 0
4012       length += len;
4013       assert(len < 1000, "big string: %s", _C_strings[i]);
4014       uint n = write_bytes(&len, sizeof(uint));
4015       if (n != sizeof(uint)) {
4016         return -1;
4017       }
4018     }
4019     // Write hashs
4020     for (int i = 0; i < _C_strings_used; i++) {
4021       uint n = write_bytes(&(_C_strings_hash[i]), sizeof(uint));
4022       if (n != sizeof(uint)) {
4023         return -1;
4024       }
4025     }
4026     for (int i = 0; i < _C_strings_used; i++) {
4027       uint len = _C_strings_len[i] + 1; // Include 0
4028       uint n = write_bytes(_C_strings[_C_strings_s[i]], len);
4029       if (n != len) {
4030         return -1;
4031       }
4032     }
4033     log_info(scc, exit)("Wrote %d C strings of total length %d at offset %d to Startup Code Cache '%s'",
4034                         _C_strings_used, length, offset, _cache_path);
4035   }
4036   return _C_strings_used;
4037 }
4038 
4039 void SCCache::add_new_C_string(const char* str) {
4040   assert(for_write(), "only when storing code");
4041   _table->add_C_string(str);
4042 }
4043 
4044 void SCAddressTable::add_C_string(const char* str) {
4045   if (str != nullptr && _complete && (_opto_complete || _c1_complete)) {
4046     // Check previous strings address
4047     for (int i = 0; i < _C_strings_count; i++) {
4048       if (_C_strings[i] == str) {
4049         return; // Found existing one
4050       }
4051     }
4052     // Add new one
4053     if (_C_strings_count < MAX_STR_COUNT) {
4054       log_trace(scc)("add_C_string: [%d] " INTPTR_FORMAT " %s", _C_strings_count, p2i(str), str);
4055       _C_strings_id[_C_strings_count] = -1; // Init
4056       _C_strings[_C_strings_count++] = str;
4057     } else {
4058       if (Thread::current()->is_Compiler_thread()) {
4059         CompileTask* task = ciEnv::current()->task();
4060         log_info(scc)("%d (L%d): Number of C strings > max %d %s",
4061                       task->compile_id(), task->comp_level(), MAX_STR_COUNT, str);
4062       }
4063     }
4064   }
4065 }
4066 
4067 int SCAddressTable::id_for_C_string(address str) {
4068   for (int i = 0; i < _C_strings_count; i++) {
4069     if (_C_strings[i] == (const char*)str) { // found
4070       int id = _C_strings_id[i];
4071       if (id >= 0) {
4072         assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
4073         return id; // Found recorded
4074       }
4075       // Search for the same string content
4076       int len = (int)strlen((const char*)str);
4077       int hash = java_lang_String::hash_code((const jbyte*)str, len);
4078       for (int j = 0; j < _C_strings_used; j++) {
4079         if ((_C_strings_len[j] == len) && (_C_strings_hash[j] == hash)) {
4080           _C_strings_id[i] = j; // Found match
4081           return j;
4082         }
4083       }
4084       // Not found in recorded, add new
4085       id = _C_strings_used++;
4086       _C_strings_s[id] = i;
4087       _C_strings_id[i] = id;
4088       _C_strings_len[id] = len;
4089       _C_strings_hash[id] = hash;
4090       return id;
4091     }
4092   }
4093   return -1;
4094 }
4095 
4096 address SCAddressTable::address_for_C_string(int idx) {
4097   assert(idx < _C_strings_count, "sanity");
4098   return (address)_C_strings[idx];
4099 }
4100 
4101 int search_address(address addr, address* table, uint length) {
4102   for (int i = 0; i < (int)length; i++) {
4103     if (table[i] == addr) {
4104       return i;
4105     }
4106   }
4107   return -1;
4108 }
4109 
4110 address SCAddressTable::address_for_id(int idx) {
4111   if (!_complete) {
4112     fatal("SCA table is not complete");
4113   }
4114   if (idx == -1) {
4115     return (address)-1;
4116   }
4117   uint id = (uint)idx;
4118   if (id >= _all_max && idx < (_all_max + _C_strings_count)) {
4119     return address_for_C_string(idx - _all_max);
4120   }
4121   if (idx < 0 || id == (_extrs_length + _stubs_length + _final_blobs_length)) {
4122     fatal("Incorrect id %d for SCA table", id);
4123   }
4124   if (idx > (_all_max + _C_strings_count)) {
4125     return (address)os::init + idx;
4126   }
4127   if (id < _extrs_length) {
4128     return _extrs_addr[id];
4129   }
4130   id -= _extrs_length;
4131   if (id < _stubs_length) {
4132     return _stubs_addr[id];
4133   }
4134   id -= _stubs_length;
4135   if (id < _final_blobs_length) {
4136     return _blobs_addr[id];
4137   }
4138   return nullptr;
4139 }
4140 
4141 int SCAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer) {
4142   int id = -1;
4143   if (addr == (address)-1) { // Static call stub has jump to itself
4144     return id;
4145   }
4146   if (!_complete) {
4147     fatal("SCA table is not complete");
4148   }
4149   // Seach for C string
4150   id = id_for_C_string(addr);
4151   if (id >=0) {
4152     return id + _all_max;
4153   }
4154   if (StubRoutines::contains(addr)) {
4155     // Search in stubs
4156     id = search_address(addr, _stubs_addr, _stubs_length);
4157     if (id < 0) {
4158       StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
4159       if (desc == nullptr) {
4160         desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
4161       }
4162       const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
4163       fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in SCA table", p2i(addr), sub_name);
4164     } else {
4165       id += _extrs_length;
4166     }
4167   } else {
4168     CodeBlob* cb = CodeCache::find_blob(addr);
4169     if (cb != nullptr) {
4170       // Search in code blobs
4171       id = search_address(addr, _blobs_addr, _final_blobs_length);
4172       if (id < 0) {
4173         fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in SCA table", p2i(addr), cb->name());
4174       } else {
4175         id += _extrs_length + _stubs_length;
4176       }
4177     } else {
4178       // Search in runtime functions
4179       id = search_address(addr, _extrs_addr, _extrs_length);
4180       if (id < 0) {
4181         ResourceMark rm;
4182         const int buflen = 1024;
4183         char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
4184         int offset = 0;
4185         if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
4186           if (offset > 0) {
4187             // Could be address of C string
4188             uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
4189             CompileTask* task = ciEnv::current()->task();
4190             uint compile_id = 0;
4191             uint comp_level =0;
4192             if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
4193               compile_id = task->compile_id();
4194               comp_level = task->comp_level();
4195             }
4196             log_info(scc)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in SCA table",
4197                           compile_id, comp_level, p2i(addr), dist, (const char*)addr);
4198             assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
4199             return dist;
4200           }
4201           fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in SCA table", p2i(addr), func_name, offset);
4202         } else {
4203           os::print_location(tty, p2i(addr), true);
4204           reloc.print_current_on(tty);
4205 #ifndef PRODUCT
4206           buffer->print_on(tty);
4207           buffer->decode();
4208 #endif // !PRODUCT
4209           fatal("Address " INTPTR_FORMAT " for <unknown> is missing in SCA table", p2i(addr));
4210         }
4211       }
4212     }
4213   }
4214   return id;
4215 }
4216 
4217 void AOTRuntimeConstants::initialize_from_runtime() {
4218   BarrierSet* bs = BarrierSet::barrier_set();
4219   if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4220     CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
4221     _aot_runtime_constants._grain_shift = ctbs->grain_shift();
4222     _aot_runtime_constants._card_shift = ctbs->card_shift();
4223   }
4224 }
4225 
4226 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
4227 
4228 address AOTRuntimeConstants::_field_addresses_list[] = {
4229   grain_shift_address(),
4230   card_shift_address(),
4231   nullptr
4232 };
4233 
4234 
4235 void SCCache::wait_for_no_nmethod_readers() {
4236   while (true) {
4237     int cur = Atomic::load(&_nmethod_readers);
4238     int upd = -(cur + 1);
4239     if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
4240       // Success, no new readers should appear.
4241       break;
4242     }
4243   }
4244 
4245   // Now wait for all readers to leave.
4246   SpinYield w;
4247   while (Atomic::load(&_nmethod_readers) != -1) {
4248     w.wait();
4249   }
4250 }
4251 
4252 SCCache::ReadingMark::ReadingMark() {
4253   while (true) {
4254     int cur = Atomic::load(&_nmethod_readers);
4255     if (cur < 0) {
4256       // Cache is already closed, cannot proceed.
4257       _failed = true;
4258       return;
4259     }
4260     if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4261       // Successfully recorded ourselves as entered.
4262       _failed = false;
4263       return;
4264     }
4265   }
4266 }
4267 
4268 SCCache::ReadingMark::~ReadingMark() {
4269   if (_failed) {
4270     return;
4271   }
4272   while (true) {
4273     int cur = Atomic::load(&_nmethod_readers);
4274     if (cur > 0) {
4275       // Cache is open, we are counting down towards 0.
4276       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
4277         return;
4278       }
4279     } else {
4280       // Cache is closed, we are counting up towards -1.
4281       if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4282         return;
4283       }
4284     }
4285   }
4286 }