1 /* 2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "cds/cdsAccess.hpp" 28 #include "cds/cdsConfig.hpp" 29 #include "cds/metaspaceShared.hpp" 30 #include "ci/ciConstant.hpp" 31 #include "ci/ciEnv.hpp" 32 #include "ci/ciField.hpp" 33 #include "ci/ciMethod.hpp" 34 #include "ci/ciMethodData.hpp" 35 #include "ci/ciObject.hpp" 36 #include "ci/ciUtilities.inline.hpp" 37 #include "classfile/javaAssertions.hpp" 38 #include "classfile/stringTable.hpp" 39 #include "classfile/symbolTable.hpp" 40 #include "classfile/systemDictionary.hpp" 41 #include "classfile/vmClasses.hpp" 42 #include "classfile/vmIntrinsics.hpp" 43 #include "code/codeBlob.hpp" 44 #include "code/codeCache.hpp" 45 #include "code/oopRecorder.inline.hpp" 46 #include "code/SCCache.hpp" 47 #include "compiler/abstractCompiler.hpp" 48 #include "compiler/compileBroker.hpp" 49 #include "compiler/compileTask.hpp" 50 #include "gc/g1/g1BarrierSetRuntime.hpp" 51 #include "gc/shared/gcConfig.hpp" 52 #include "logging/log.hpp" 53 #include "memory/universe.hpp" 54 #include "oops/klass.inline.hpp" 55 #include "oops/method.inline.hpp" 56 #include "oops/trainingData.hpp" 57 #include "prims/jvmtiThreadState.hpp" 58 #include "runtime/atomic.hpp" 59 #include "runtime/flags/flagSetting.hpp" 60 #include "runtime/globals_extension.hpp" 61 #include "runtime/handles.inline.hpp" 62 #include "runtime/jniHandles.inline.hpp" 63 #include "runtime/os.inline.hpp" 64 #include "runtime/sharedRuntime.hpp" 65 #include "runtime/stubCodeGenerator.hpp" 66 #include "runtime/stubRoutines.hpp" 67 #include "runtime/timerTrace.hpp" 68 #include "runtime/threadIdentifier.hpp" 69 #include "utilities/ostream.hpp" 70 #ifdef COMPILER1 71 #include "c1/c1_Runtime1.hpp" 72 #include "c1/c1_LIRAssembler.hpp" 73 #include "gc/shared/c1/barrierSetC1.hpp" 74 #include "gc/g1/c1/g1BarrierSetC1.hpp" 75 #include "gc/z/c1/zBarrierSetC1.hpp" 76 #endif 77 #ifdef COMPILER2 78 #include "opto/runtime.hpp" 79 #endif 80 #if INCLUDE_JVMCI 81 #include "jvmci/jvmci.hpp" 82 #endif 83 84 #include <sys/stat.h> 85 #include <errno.h> 86 87 #ifndef O_BINARY // if defined (Win32) use binary files. 88 #define O_BINARY 0 // otherwise do nothing. 89 #endif 90 91 static elapsedTimer _t_totalLoad; 92 static elapsedTimer _t_totalRegister; 93 static elapsedTimer _t_totalFind; 94 static elapsedTimer _t_totalStore; 95 96 SCCache* SCCache::_cache = nullptr; 97 98 static bool enable_timers() { 99 return CITime || log_is_enabled(Info, init); 100 } 101 102 void SCCache::initialize() { 103 if (LoadCachedCode && !UseSharedSpaces) { 104 return; 105 } 106 if (StoreCachedCode || LoadCachedCode) { 107 if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) { 108 FLAG_SET_DEFAULT(ClassInitBarrierMode, 1); 109 } 110 } else if (ClassInitBarrierMode > 0) { 111 log_warning(scc, init)("Set ClassInitBarrierMode to 0 because StoreCachedCode and LoadCachedCode are false."); 112 FLAG_SET_DEFAULT(ClassInitBarrierMode, 0); 113 } 114 if ((LoadCachedCode || StoreCachedCode) && CachedCodeFile != nullptr) { 115 const int len = (int)strlen(CachedCodeFile); 116 // cache file path 117 char* path = NEW_C_HEAP_ARRAY(char, len+1, mtCode); 118 memcpy(path, CachedCodeFile, len); 119 path[len] = '\0'; 120 if (!open_cache(path)) { 121 return; 122 } 123 if (StoreCachedCode) { 124 FLAG_SET_DEFAULT(FoldStableValues, false); 125 FLAG_SET_DEFAULT(ForceUnreachable, true); 126 } 127 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false); 128 } 129 } 130 131 void SCCache::init2() { 132 if (!is_on()) { 133 return; 134 } 135 // After Universe initialized 136 BarrierSet* bs = BarrierSet::barrier_set(); 137 if (bs->is_a(BarrierSet::CardTableBarrierSet)) { 138 address byte_map_base = ci_card_table_address_as<address>(); 139 if (is_on_for_write() && !external_word_Relocation::can_be_relocated(byte_map_base)) { 140 // Bail out since we can't encode card table base address with relocation 141 log_warning(scc, init)("Can't create Startup Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base)); 142 close(); 143 } 144 } 145 if (!verify_vm_config()) { 146 close(); 147 } 148 } 149 150 void SCCache::print_timers_on(outputStream* st) { 151 if (LoadCachedCode) { 152 st->print_cr (" SC Load Time: %7.3f s", _t_totalLoad.seconds()); 153 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds()); 154 st->print_cr (" find cached code: %7.3f s", _t_totalFind.seconds()); 155 } 156 if (StoreCachedCode) { 157 st->print_cr (" SC Store Time: %7.3f s", _t_totalStore.seconds()); 158 } 159 } 160 161 bool SCCache::is_C3_on() { 162 #if INCLUDE_JVMCI 163 if (UseJVMCICompiler) { 164 return (StoreCachedCode || LoadCachedCode) && UseC2asC3; 165 } 166 #endif 167 return false; 168 } 169 170 bool SCCache::is_code_load_thread_on() { 171 return UseCodeLoadThread && LoadCachedCode; 172 } 173 174 bool SCCache::gen_preload_code(ciMethod* m, int entry_bci) { 175 VM_ENTRY_MARK; 176 return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() && 177 CDSAccess::can_generate_cached_code(m->get_Method()); 178 } 179 180 static void print_helper(nmethod* nm, outputStream* st) { 181 SCCache::iterate([&](SCCEntry* e) { 182 if (e->method() == nm->method()) { 183 ResourceMark rm; 184 stringStream ss; 185 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level()); 186 if (e->decompile() > 0) { 187 ss.print("+D%d", e->decompile()); 188 } 189 ss.print("[%s%s%s]", 190 (e->is_loaded() ? "L" : ""), 191 (e->load_fail() ? "F" : ""), 192 (e->not_entrant() ? "I" : "")); 193 ss.print("#%d", e->comp_id()); 194 195 st->print(" %s", ss.freeze()); 196 } 197 }); 198 } 199 200 void SCCache::close() { 201 if (is_on()) { 202 if (SCCache::is_on_for_read()) { 203 LogStreamHandle(Info, init) log; 204 if (log.is_enabled()) { 205 log.print_cr("Startup Code Cache statistics (when closed): "); 206 SCCache::print_statistics_on(&log); 207 log.cr(); 208 SCCache::print_timers_on(&log); 209 210 LogStreamHandle(Info, scc, init) log1; 211 if (log1.is_enabled()) { 212 SCCache::print_unused_entries_on(&log1); 213 } 214 215 LogStreamHandle(Info, scc, codecache) info_scc; 216 if (info_scc.is_enabled()) { 217 NMethodIterator iter(NMethodIterator::all); 218 while (iter.next()) { 219 nmethod* nm = iter.method(); 220 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) { 221 info_scc.print("%5d:%c%c%c%d:", nm->compile_id(), 222 (nm->method()->is_shared() ? 'S' : ' '), 223 (nm->is_scc() ? 'A' : ' '), 224 (nm->preloaded() ? 'P' : ' '), 225 nm->comp_level()); 226 print_helper(nm, &info_scc); 227 info_scc.print(": "); 228 CompileTask::print(&info_scc, nm, nullptr, true /*short_form*/); 229 230 LogStreamHandle(Debug, scc, codecache) debug_scc; 231 if (debug_scc.is_enabled()) { 232 MethodTrainingData* mtd = MethodTrainingData::lookup_for(nm->method()); 233 if (mtd != nullptr) { 234 mtd->iterate_all_compiles([&](CompileTrainingData* ctd) { 235 debug_scc.print(" CTD: "); ctd->print_on(&debug_scc); debug_scc.cr(); 236 }); 237 } 238 } 239 } 240 } 241 } 242 } 243 } 244 245 delete _cache; // Free memory 246 _cache = nullptr; 247 } 248 } 249 250 void SCCache::invalidate(SCCEntry* entry) { 251 // This could be concurent execution 252 if (entry != nullptr && is_on()) { // Request could come after cache is closed. 253 _cache->invalidate_entry(entry); 254 } 255 } 256 257 bool SCCache::is_loaded(SCCEntry* entry) { 258 if (is_on() && _cache->cache_buffer() != nullptr) { 259 return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size(); 260 } 261 return false; 262 } 263 264 void SCCache::preload_code(JavaThread* thread) { 265 if ((ClassInitBarrierMode == 0) || !is_on_for_read()) { 266 return; 267 } 268 if ((DisableCachedCode & (1 << 3)) != 0) { 269 return; // no preloaded code (level 5); 270 } 271 _cache->preload_startup_code(thread); 272 } 273 274 SCCEntry* SCCache::find_code_entry(const methodHandle& method, uint comp_level) { 275 switch (comp_level) { 276 case CompLevel_simple: 277 if ((DisableCachedCode & (1 << 0)) != 0) { 278 return nullptr; 279 } 280 break; 281 case CompLevel_limited_profile: 282 if ((DisableCachedCode & (1 << 1)) != 0) { 283 return nullptr; 284 } 285 break; 286 case CompLevel_full_optimization: 287 if ((DisableCachedCode & (1 << 2)) != 0) { 288 return nullptr; 289 } 290 break; 291 292 default: return nullptr; // Level 1, 2, and 4 only 293 } 294 TraceTime t1("SC total find code time", &_t_totalFind, enable_timers(), false); 295 if (is_on() && _cache->cache_buffer() != nullptr) { 296 MethodData* md = method->method_data(); 297 uint decomp = (md == nullptr) ? 0 : md->decompile_count(); 298 299 ResourceMark rm; 300 const char* target_name = method->name_and_sig_as_C_string(); 301 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name)); 302 SCCEntry* entry = _cache->find_entry(SCCEntry::Code, hash, comp_level, decomp); 303 if (entry == nullptr) { 304 log_info(scc, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash); 305 #ifdef ASSERT 306 } else { 307 uint name_offset = entry->offset() + entry->name_offset(); 308 uint name_size = entry->name_size(); // Includes '/0' 309 const char* name = _cache->cache_buffer() + name_offset; 310 if (strncmp(target_name, name, name_size) != 0) { 311 assert(false, "SCA: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash); 312 } 313 #endif 314 } 315 316 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr); 317 if (directives->IgnorePrecompiledOption) { 318 LogStreamHandle(Info, scc, compilation) log; 319 if (log.is_enabled()) { 320 log.print("Ignore cached code entry on level %d for ", comp_level); 321 method->print_value_on(&log); 322 } 323 return nullptr; 324 } 325 326 return entry; 327 } 328 return nullptr; 329 } 330 331 void SCCache::add_C_string(const char* str) { 332 if (is_on_for_write()) { 333 _cache->add_new_C_string(str); 334 } 335 } 336 337 bool SCCache::allow_const_field(ciConstant& value) { 338 return !is_on() || !StoreCachedCode // Restrict only when we generate cache 339 // Can not trust primitive too || !is_reference_type(value.basic_type()) 340 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant() 341 ; 342 } 343 344 bool SCCache::open_cache(const char* cache_path) { 345 if (LoadCachedCode) { 346 log_info(scc)("Trying to load Startup Code Cache '%s'", cache_path); 347 struct stat st; 348 if (os::stat(cache_path, &st) != 0) { 349 log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path); 350 return false; 351 } else if ((st.st_mode & S_IFMT) != S_IFREG) { 352 log_warning(scc, init)("Specified Startup Code Cache is not file '%s'", cache_path); 353 return false; 354 } 355 int fd = os::open(cache_path, O_RDONLY | O_BINARY, 0); 356 if (fd < 0) { 357 if (errno == ENOENT) { 358 log_warning(scc, init)("Specified Startup Code Cache file not found '%s'", cache_path); 359 } else { 360 log_warning(scc, init)("Failed to open Startup Code Cache file '%s': (%s)", cache_path, os::strerror(errno)); 361 } 362 return false; 363 } else { 364 log_info(scc, init)("Opened for read Startup Code Cache '%s'", cache_path); 365 } 366 SCCache* cache = new SCCache(cache_path, fd, (uint)st.st_size); 367 bool failed = cache->failed(); 368 if (::close(fd) < 0) { 369 log_warning(scc)("Failed to close for read Startup Code Cache file '%s'", cache_path); 370 failed = true; 371 } 372 if (failed) { 373 delete cache; 374 _cache = nullptr; 375 return false; 376 } 377 _cache = cache; 378 } 379 if (_cache == nullptr && StoreCachedCode) { 380 SCCache* cache = new SCCache(cache_path, -1 /* fd */, 0 /* size */); 381 if (cache->failed()) { 382 delete cache; 383 _cache = nullptr; 384 return false; 385 } 386 _cache = cache; 387 } 388 return true; 389 } 390 391 class CachedCodeDirectory { 392 public: 393 int _some_number; 394 InstanceKlass* _some_klass; 395 size_t _my_data_length; 396 void* _my_data; 397 }; 398 399 // Skeleton code for including cached code in CDS: 400 // 401 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code. 402 // E.g., you can build a hashtable to record what methods have been archived. 403 // 404 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be 405 // allocated using CDSAccess::allocate_from_code_cache(). 406 // 407 // [3] CachedCodeDirectory must be the very first allocation. 408 // 409 // [4] Two kinds of pointer can be stored: 410 // - A pointer p that points to metadata. CDSAccess::can_generate_cached_code(p) must return true. 411 // - A pointer to a buffer returned by CDSAccess::allocate_from_code_cache(). 412 // (It's OK to point to an interior location within this buffer). 413 // Such pointers must be stored using CDSAccess::set_pointer() 414 // 415 // The buffers allocated by CDSAccess::allocate_from_code_cache() are in a contiguous region. At runtime, this 416 // region is mapped to the beginning of the CodeCache (see _cds_code_space in codeCache.cpp). All the pointers 417 // in this buffer are relocated as necessary (e.g., to account for the runtime location of the CodeCache). 418 // 419 // Example: 420 // 421 // # make sure hw.cds doesn't exist, so that it's regenerated (1.5 step training) 422 // $ rm -f hw.cds; java -Xlog:cds,scc::uptime,tags,pid -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld 423 // 424 // # After training is finish, hw.cds should contain a CachedCodeDirectory. You can see the effect of relocation 425 // # from the [scc] log. 426 // $ java -Xlog:cds,scc -XX:CacheDataStore=hw.cds -cp ~/tmp/HelloWorld.jar HelloWorld 427 // [0.016s][info][scc] new workflow: cached code mapped at 0x7fef97ebc000 428 // [0.016s][info][scc] _cached_code_directory->_some_klass = 0x800009ca8 (java.lang.String) 429 // [0.016s][info][scc] _cached_code_directory->_some_number = 0 430 // [0.016s][info][scc] _cached_code_directory->_my_data_length = 0 431 // [0.016s][info][scc] _cached_code_directory->_my_data = 0x7fef97ebc020 (32 bytes offset from base) 432 // 433 // The 1.5 step training may be hard to debug. If you want to run in a debugger, run the above training step 434 // with an additional "-XX:+CDSManualFinalImage" command-line argument. 435 436 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region 437 static CachedCodeDirectory* _cached_code_directory = nullptr; 438 439 #if INCLUDE_CDS_JAVA_HEAP 440 void SCCache::new_workflow_start_writing_cache() { 441 CachedCodeDirectory* dir = (CachedCodeDirectory*)CDSAccess::allocate_from_code_cache(sizeof(CachedCodeDirectory)); 442 _cached_code_directory = dir; 443 444 CDSAccess::set_pointer(&dir->_some_klass, vmClasses::String_klass()); 445 446 size_t n = 120; 447 void* d = (void*)CDSAccess::allocate_from_code_cache(n); 448 CDSAccess::set_pointer(&dir->_my_data, d); 449 } 450 451 void SCCache::new_workflow_end_writing_cache() { 452 453 } 454 455 void SCCache::new_workflow_load_cache() { 456 void* ptr = CodeCache::map_cached_code(); 457 if (ptr != nullptr) { 458 // At this point: 459 // - CodeCache::initialize_heaps() has finished. 460 // - CDS archive is fully mapped ("metadata", "heap" and "cached_code" regions are mapped) 461 // - All pointers in the mapped CDS regions are relocated. 462 // - CDSAccess::get_archived_object() works. 463 ResourceMark rm; 464 _cached_code_directory = (CachedCodeDirectory*)ptr; 465 InstanceKlass* k = _cached_code_directory->_some_klass; 466 log_info(scc)("new workflow: cached code mapped at %p", ptr); 467 log_info(scc)("_cached_code_directory->_some_klass = %p (%s)", k, k->external_name()); 468 log_info(scc)("_cached_code_directory->_some_number = %d", _cached_code_directory->_some_number); 469 log_info(scc)("_cached_code_directory->_my_data_length = %zu", _cached_code_directory->_my_data_length); 470 log_info(scc)("_cached_code_directory->_my_data = %p (%zu bytes offset from base)", _cached_code_directory->_my_data, 471 pointer_delta((address)_cached_code_directory->_my_data, (address)_cached_code_directory, 1)); 472 } 473 } 474 #endif // INCLUDE_CDS_JAVA_HEAP 475 476 #define DATA_ALIGNMENT HeapWordSize 477 478 SCCache::SCCache(const char* cache_path, int fd, uint load_size) { 479 _load_header = nullptr; 480 _cache_path = cache_path; 481 _for_read = LoadCachedCode; 482 _for_write = StoreCachedCode; 483 _load_size = load_size; 484 _store_size = 0; 485 _write_position = 0; 486 _closing = false; 487 _failed = false; 488 _lookup_failed = false; 489 _table = nullptr; 490 _load_entries = nullptr; 491 _store_entries = nullptr; 492 _C_strings_buf = nullptr; 493 _load_buffer = nullptr; 494 _store_buffer = nullptr; 495 _C_load_buffer = nullptr; 496 _C_store_buffer = nullptr; 497 _store_entries_cnt = 0; 498 _gen_preload_code = false; 499 _for_preload = false; // changed while storing entry data 500 _has_clinit_barriers = false; 501 502 _compile_id = 0; 503 _comp_level = 0; 504 505 _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false; 506 507 // Read header at the begining of cache 508 uint header_size = sizeof(SCCHeader); 509 if (_for_read) { 510 // Read cache 511 _C_load_buffer = NEW_C_HEAP_ARRAY(char, load_size + DATA_ALIGNMENT, mtCode); 512 _load_buffer = align_up(_C_load_buffer, DATA_ALIGNMENT); 513 uint n = (uint)::read(fd, _load_buffer, load_size); 514 if (n != load_size) { 515 log_warning(scc, init)("Failed to read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache file '%s'", load_size, p2i(_load_buffer), _cache_path); 516 set_failed(); 517 return; 518 } 519 log_info(scc, init)("Read %d bytes at address " INTPTR_FORMAT " from Startup Code Cache '%s'", load_size, p2i(_load_buffer), _cache_path); 520 521 _load_header = (SCCHeader*)addr(0); 522 const char* scc_jvm_version = addr(_load_header->jvm_version_offset()); 523 if (strncmp(scc_jvm_version, VM_Version::internal_vm_info_string(), strlen(scc_jvm_version)) != 0) { 524 log_warning(scc, init)("Disable Startup Code Cache: JVM version '%s' recorded in '%s' does not match current version '%s'", scc_jvm_version, _cache_path, VM_Version::internal_vm_info_string()); 525 set_failed(); 526 return; 527 } 528 if (!_load_header->verify_config(_cache_path, load_size)) { 529 set_failed(); 530 return; 531 } 532 log_info(scc, init)("Read header from Startup Code Cache '%s'", cache_path); 533 if (_load_header->has_meta_ptrs()) { 534 assert(UseSharedSpaces, "should be verified already"); 535 _use_meta_ptrs = true; // Regardless UseMetadataPointers 536 UseMetadataPointers = true; 537 } 538 // Read strings 539 load_strings(); 540 } 541 if (_for_write) { 542 _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0); 543 544 _C_store_buffer = NEW_C_HEAP_ARRAY(char, CachedCodeMaxSize + DATA_ALIGNMENT, mtCode); 545 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT); 546 // Entries allocated at the end of buffer in reverse (as on stack). 547 _store_entries = (SCCEntry*)align_up(_C_store_buffer + CachedCodeMaxSize, DATA_ALIGNMENT); 548 log_info(scc, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %d", p2i(_store_buffer), CachedCodeMaxSize); 549 } 550 _table = new SCAddressTable(); 551 } 552 553 void SCCache::init_table() { 554 SCCache* cache = SCCache::cache(); 555 if (cache != nullptr && cache->_table != nullptr) { 556 cache->_table->init(); 557 } 558 } 559 560 void SCCache::init_opto_table() { 561 SCCache* cache = SCCache::cache(); 562 if (cache != nullptr && cache->_table != nullptr) { 563 cache->_table->init_opto(); 564 } 565 } 566 567 void SCCache::init_c1_table() { 568 SCCache* cache = SCCache::cache(); 569 if (cache != nullptr && cache->_table != nullptr) { 570 cache->_table->init_c1(); 571 } 572 } 573 574 void SCConfig::record(bool use_meta_ptrs) { 575 _flags = 0; 576 if (use_meta_ptrs) { 577 _flags |= metadataPointers; 578 } 579 #ifdef ASSERT 580 _flags |= debugVM; 581 #endif 582 if (UseCompressedOops) { 583 _flags |= compressedOops; 584 } 585 if (UseCompressedClassPointers) { 586 _flags |= compressedClassPointers; 587 } 588 if (UseTLAB) { 589 _flags |= useTLAB; 590 } 591 if (JavaAssertions::systemClassDefault()) { 592 _flags |= systemClassAssertions; 593 } 594 if (JavaAssertions::userClassDefault()) { 595 _flags |= userClassAssertions; 596 } 597 if (EnableContended) { 598 _flags |= enableContendedPadding; 599 } 600 if (RestrictContended) { 601 _flags |= restrictContendedPadding; 602 } 603 if (UseEmptySlotsInSupers) { 604 _flags |= useEmptySlotsInSupers; 605 } 606 _compressedOopShift = CompressedOops::shift(); 607 _compressedKlassShift = CompressedKlassPointers::shift(); 608 _contendedPaddingWidth = ContendedPaddingWidth; 609 _objectAlignment = ObjectAlignmentInBytes; 610 _gc = (uint)Universe::heap()->kind(); 611 } 612 613 bool SCConfig::verify(const char* cache_path) const { 614 #ifdef ASSERT 615 if ((_flags & debugVM) == 0) { 616 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by product VM, it can't be used by debug VM", cache_path); 617 return false; 618 } 619 #else 620 if ((_flags & debugVM) != 0) { 621 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created by debug VM, it can't be used by product VM", cache_path); 622 return false; 623 } 624 #endif 625 626 CollectedHeap::Name scc_gc = (CollectedHeap::Name)_gc; 627 if (scc_gc != Universe::heap()->kind()) { 628 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with %s vs current %s", cache_path, GCConfig::hs_err_name(scc_gc), GCConfig::hs_err_name()); 629 return false; 630 } 631 632 if (((_flags & compressedOops) != 0) != UseCompressedOops) { 633 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedOops = %s", cache_path, UseCompressedOops ? "false" : "true"); 634 return false; 635 } 636 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) { 637 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseCompressedClassPointers = %s", cache_path, UseCompressedClassPointers ? "false" : "true"); 638 return false; 639 } 640 641 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) { 642 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::systemClassDefault() = %s", cache_path, JavaAssertions::systemClassDefault() ? "disabled" : "enabled"); 643 return false; 644 } 645 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) { 646 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with JavaAssertions::userClassDefault() = %s", cache_path, JavaAssertions::userClassDefault() ? "disabled" : "enabled"); 647 return false; 648 } 649 650 if (((_flags & enableContendedPadding) != 0) != EnableContended) { 651 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with EnableContended = %s", cache_path, EnableContended ? "false" : "true"); 652 return false; 653 } 654 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) { 655 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with RestrictContended = %s", cache_path, RestrictContended ? "false" : "true"); 656 return false; 657 } 658 if (((_flags & useEmptySlotsInSupers) != 0) != UseEmptySlotsInSupers) { 659 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with UseEmptySlotsInSupers = %s", cache_path, UseEmptySlotsInSupers ? "false" : "true"); 660 return false; 661 } 662 663 if (_compressedOopShift != (uint)CompressedOops::shift()) { 664 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedOops::shift() = %d vs current %d", cache_path, _compressedOopShift, CompressedOops::shift()); 665 return false; 666 } 667 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) { 668 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with CompressedKlassPointers::shift() = %d vs current %d", cache_path, _compressedKlassShift, CompressedKlassPointers::shift()); 669 return false; 670 } 671 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) { 672 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ContendedPaddingWidth = %d vs current %d", cache_path, _contendedPaddingWidth, ContendedPaddingWidth); 673 return false; 674 } 675 if (_objectAlignment != (uint)ObjectAlignmentInBytes) { 676 log_warning(scc, init)("Disable Startup Code Cache: '%s' was created with ObjectAlignmentInBytes = %d vs current %d", cache_path, _objectAlignment, ObjectAlignmentInBytes); 677 return false; 678 } 679 return true; 680 } 681 682 bool SCCHeader::verify_config(const char* cache_path, uint load_size) const { 683 if (_version != SCC_VERSION) { 684 log_warning(scc, init)("Disable Startup Code Cache: different SCC version %d vs %d recorded in '%s'", SCC_VERSION, _version, cache_path); 685 return false; 686 } 687 if (_cache_size != load_size) { 688 log_warning(scc, init)("Disable Startup Code Cache: different cached code size %d vs %d recorded in '%s'", load_size, _cache_size, cache_path); 689 return false; 690 } 691 if (has_meta_ptrs() && !UseSharedSpaces) { 692 log_warning(scc, init)("Disable Startup Cached Code: '%s' contains metadata pointers but CDS is off", cache_path); 693 return false; 694 } 695 return true; 696 } 697 698 volatile int SCCache::_reading_nmethod = 0; 699 700 SCCache::~SCCache() { 701 if (_closing) { 702 return; // Already closed 703 } 704 // Stop any further access to cache. 705 // Checked on entry to load_nmethod() and store_nmethod(). 706 _closing = true; 707 if (_for_read && _reading_nmethod > 0) { 708 // Wait for all load_nmethod() finish. 709 // TODO: may be have new separate locker for SCA. 710 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 711 while (_reading_nmethod > 0) { 712 locker.wait(10); // Wait 10 ms 713 } 714 } 715 // Prevent writing code into cache while we are closing it. 716 // This lock held by ciEnv::register_method() which calls store_nmethod(). 717 MutexLocker ml(Compile_lock); 718 if (for_write()) { // Finalize cache 719 finish_write(); 720 } 721 FREE_C_HEAP_ARRAY(char, _cache_path); 722 if (_C_load_buffer != nullptr) { 723 FREE_C_HEAP_ARRAY(char, _C_load_buffer); 724 _C_load_buffer = nullptr; 725 _load_buffer = nullptr; 726 } 727 if (_C_store_buffer != nullptr) { 728 FREE_C_HEAP_ARRAY(char, _C_store_buffer); 729 _C_store_buffer = nullptr; 730 _store_buffer = nullptr; 731 } 732 if (_table != nullptr) { 733 delete _table; 734 _table = nullptr; 735 } 736 } 737 738 SCCache* SCCache::open_for_read() { 739 if (SCCache::is_on_for_read()) { 740 return SCCache::cache(); 741 } 742 return nullptr; 743 } 744 745 SCCache* SCCache::open_for_write() { 746 if (SCCache::is_on_for_write()) { 747 SCCache* cache = SCCache::cache(); 748 cache->clear_lookup_failed(); // Reset bit 749 return cache; 750 } 751 return nullptr; 752 } 753 754 void copy_bytes(const char* from, address to, uint size) { 755 assert(size > 0, "sanity"); 756 bool by_words = true; 757 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) { 758 // Use wordwise copies if possible: 759 Copy::disjoint_words((HeapWord*)from, 760 (HeapWord*)to, 761 ((size_t)size + HeapWordSize-1) / HeapWordSize); 762 } else { 763 by_words = false; 764 Copy::conjoint_jbytes(from, to, (size_t)size); 765 } 766 log_trace(scc)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to)); 767 } 768 769 void SCCReader::set_read_position(uint pos) { 770 if (pos == _read_position) { 771 return; 772 } 773 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size()); 774 _read_position = pos; 775 } 776 777 bool SCCache::set_write_position(uint pos) { 778 if (pos == _write_position) { 779 return true; 780 } 781 if (_store_size < _write_position) { 782 _store_size = _write_position; // Adjust during write 783 } 784 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size); 785 _write_position = pos; 786 return true; 787 } 788 789 static char align_buffer[256] = { 0 }; 790 791 bool SCCache::align_write() { 792 // We are not executing code from cache - we copy it by bytes first. 793 // No need for big alignment (or at all). 794 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1)); 795 if (padding == DATA_ALIGNMENT) { 796 return true; 797 } 798 uint n = write_bytes((const void*)&align_buffer, padding); 799 if (n != padding) { 800 return false; 801 } 802 log_trace(scc)("Adjust write alignment in Startup Code Cache '%s'", _cache_path); 803 return true; 804 } 805 806 uint SCCache::write_bytes(const void* buffer, uint nbytes) { 807 assert(for_write(), "Code Cache file is not created"); 808 if (nbytes == 0) { 809 return 0; 810 } 811 uint new_position = _write_position + nbytes; 812 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) { 813 log_warning(scc)("Failed to write %d bytes at offset %d to Startup Code Cache file '%s'. Increase CachedCodeMaxSize.", 814 nbytes, _write_position, _cache_path); 815 set_failed(); 816 return 0; 817 } 818 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes); 819 log_trace(scc)("Wrote %d bytes at offset %d to Startup Code Cache '%s'", nbytes, _write_position, _cache_path); 820 _write_position += nbytes; 821 if (_store_size < _write_position) { 822 _store_size = _write_position; 823 } 824 return nbytes; 825 } 826 827 void SCCEntry::update_method_for_writing() { 828 if (_method != nullptr) { 829 _method = CDSAccess::method_in_cached_code(_method); 830 } 831 } 832 833 void SCCEntry::print(outputStream* st) const { 834 st->print_cr(" SCA entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s]", 835 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile, 836 (_not_entrant? "not_entrant" : "entrant"), 837 (_loaded ? ", loaded" : ""), 838 (_has_clinit_barriers ? ", has_clinit_barriers" : ""), 839 (_for_preload ? ", for_preload" : "")); 840 } 841 842 void* SCCEntry::operator new(size_t x, SCCache* cache) { 843 return (void*)(cache->add_entry()); 844 } 845 846 bool skip_preload(methodHandle mh) { 847 if (!mh->method_holder()->is_loaded()) { 848 return true; 849 } 850 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr); 851 if (directives->DontPreloadOption) { 852 LogStreamHandle(Info, scc, init) log; 853 if (log.is_enabled()) { 854 log.print("Exclude preloading code for "); 855 mh->print_value_on(&log); 856 } 857 return true; 858 } 859 return false; 860 } 861 862 void SCCache::preload_startup_code(TRAPS) { 863 assert(_for_read, "sanity"); 864 uint count = _load_header->entries_count(); 865 if (_load_entries == nullptr) { 866 // Read it 867 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index] 868 _load_entries = (SCCEntry*)(_search_entries + 2 * count); 869 log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path); 870 } 871 uint preload_entries_count = _load_header->preload_entries_count(); 872 if (preload_entries_count > 0) { 873 uint* entries_index = (uint*)addr(_load_header->preload_entries_offset()); 874 log_info(scc, init)("Load %d preload entries from Startup Code Cache '%s'", preload_entries_count, _cache_path); 875 uint count = MIN2(preload_entries_count, SCLoadStop); 876 for (uint i = SCLoadStart; i < count; i++) { 877 uint index = entries_index[i]; 878 SCCEntry* entry = &(_load_entries[index]); 879 if (entry->not_entrant()) { 880 continue; 881 } 882 methodHandle mh(THREAD, entry->method()); 883 assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity"); 884 if (skip_preload(mh)) { 885 continue; // Exclude preloading for this method 886 } 887 assert(mh->method_holder()->is_loaded(), ""); 888 if (!mh->method_holder()->is_linked()) { 889 assert(!HAS_PENDING_EXCEPTION, ""); 890 mh->method_holder()->link_class(THREAD); 891 if (HAS_PENDING_EXCEPTION) { 892 LogStreamHandle(Warning, scc) log; 893 if (log.is_enabled()) { 894 ResourceMark rm; 895 log.print("Linkage failed for %s: ", mh->method_holder()->external_name()); 896 THREAD->pending_exception()->print_value_on(&log); 897 if (log_is_enabled(Debug, scc)) { 898 THREAD->pending_exception()->print_on(&log); 899 } 900 } 901 CLEAR_PENDING_EXCEPTION; 902 } 903 } 904 if (mh->scc_entry() != nullptr) { 905 // Second C2 compilation of the same method could happen for 906 // different reasons without marking first entry as not entrant. 907 continue; // Keep old entry to avoid issues 908 } 909 mh->set_scc_entry(entry); 910 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0, false, CompileTask::Reason_Preload, CHECK); 911 } 912 } 913 } 914 915 static bool check_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp, SCCEntry* entry) { 916 if (entry->kind() == kind) { 917 assert(entry->id() == id, "sanity"); 918 if (kind != SCCEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() && 919 entry->comp_level() == comp_level && 920 (comp_level == CompLevel_limited_profile || entry->decompile() == decomp))) { 921 return true; // Found 922 } 923 } 924 return false; 925 } 926 927 SCCEntry* SCCache::find_entry(SCCEntry::Kind kind, uint id, uint comp_level, uint decomp) { 928 assert(_for_read, "sanity"); 929 uint count = _load_header->entries_count(); 930 if (_load_entries == nullptr) { 931 // Read it 932 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index] 933 _load_entries = (SCCEntry*)(_search_entries + 2 * count); 934 log_info(scc, init)("Read %d entries table at offset %d from Startup Code Cache '%s'", count, _load_header->entries_offset(), _cache_path); 935 } 936 // Binary search 937 int l = 0; 938 int h = count - 1; 939 while (l <= h) { 940 int mid = (l + h) >> 1; 941 int ix = mid * 2; 942 uint is = _search_entries[ix]; 943 if (is == id) { 944 int index = _search_entries[ix + 1]; 945 SCCEntry* entry = &(_load_entries[index]); 946 if (check_entry(kind, id, comp_level, decomp, entry)) { 947 return entry; // Found 948 } 949 // Leaner search around (could be the same nmethod with different decompile count) 950 for (int i = mid - 1; i >= l; i--) { // search back 951 ix = i * 2; 952 is = _search_entries[ix]; 953 if (is != id) { 954 break; 955 } 956 index = _search_entries[ix + 1]; 957 SCCEntry* entry = &(_load_entries[index]); 958 if (check_entry(kind, id, comp_level, decomp, entry)) { 959 return entry; // Found 960 } 961 } 962 for (int i = mid + 1; i <= h; i++) { // search forward 963 ix = i * 2; 964 is = _search_entries[ix]; 965 if (is != id) { 966 break; 967 } 968 index = _search_entries[ix + 1]; 969 SCCEntry* entry = &(_load_entries[index]); 970 if (check_entry(kind, id, comp_level, decomp, entry)) { 971 return entry; // Found 972 } 973 } 974 break; // Not found match (different decompile count or not_entrant state). 975 } else if (is < id) { 976 l = mid + 1; 977 } else { 978 h = mid - 1; 979 } 980 } 981 return nullptr; 982 } 983 984 void SCCache::invalidate_entry(SCCEntry* entry) { 985 assert(entry!= nullptr, "all entries should be read already"); 986 if (entry->not_entrant()) { 987 return; // Someone invalidated it already 988 } 989 #ifdef ASSERT 990 bool found = false; 991 if (_for_read) { 992 uint count = _load_header->entries_count(); 993 uint i = 0; 994 for(; i < count; i++) { 995 if (entry == &(_load_entries[i])) { 996 break; 997 } 998 } 999 found = (i < count); 1000 } 1001 if (!found && _for_write) { 1002 uint count = _store_entries_cnt; 1003 uint i = 0; 1004 for(; i < count; i++) { 1005 if (entry == &(_store_entries[i])) { 1006 break; 1007 } 1008 } 1009 found = (i < count); 1010 } 1011 assert(found, "entry should exist"); 1012 #endif 1013 entry->set_not_entrant(); 1014 { 1015 uint name_offset = entry->offset() + entry->name_offset(); 1016 const char* name; 1017 if (SCCache::is_loaded(entry)) { 1018 name = _load_buffer + name_offset; 1019 } else { 1020 name = _store_buffer + name_offset; 1021 } 1022 uint level = entry->comp_level(); 1023 uint comp_id = entry->comp_id(); 1024 uint decomp = entry->decompile(); 1025 bool clinit_brs = entry->has_clinit_barriers(); 1026 log_info(scc, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)", 1027 name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : "")); 1028 } 1029 if (entry->next() != nullptr) { 1030 entry = entry->next(); 1031 assert(entry->has_clinit_barriers(), "expecting only such entries here"); 1032 invalidate_entry(entry); 1033 } 1034 } 1035 1036 extern "C" { 1037 static int uint_cmp(const void *i, const void *j) { 1038 uint a = *(uint *)i; 1039 uint b = *(uint *)j; 1040 return a > b ? 1 : a < b ? -1 : 0; 1041 } 1042 } 1043 1044 bool SCCache::finish_write() { 1045 if (!align_write()) { 1046 return false; 1047 } 1048 uint strings_offset = _write_position; 1049 int strings_count = store_strings(); 1050 if (strings_count < 0) { 1051 return false; 1052 } 1053 if (!align_write()) { 1054 return false; 1055 } 1056 uint strings_size = _write_position - strings_offset; 1057 1058 uint entries_count = 0; // Number of entrant (useful) code entries 1059 uint entries_offset = _write_position; 1060 1061 uint store_count = _store_entries_cnt; 1062 if (store_count > 0) { 1063 uint header_size = (uint)align_up(sizeof(SCCHeader), DATA_ALIGNMENT); 1064 const char* vm_version = VM_Version::internal_vm_info_string(); 1065 uint vm_version_size = (uint)align_up(strlen(vm_version) + 1, DATA_ALIGNMENT); 1066 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0; 1067 uint code_count = store_count + load_count; 1068 uint search_count = code_count * 2; 1069 uint search_size = search_count * sizeof(uint); 1070 uint entries_size = (uint)align_up(code_count * sizeof(SCCEntry), DATA_ALIGNMENT); // In bytes 1071 uint preload_entries_cnt = 0; 1072 uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode); 1073 uint preload_entries_size = code_count * sizeof(uint); 1074 // _write_position should include code and strings 1075 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it. 1076 uint total_size = _write_position + _load_size + header_size + vm_version_size + 1077 code_alignment + search_size + preload_entries_size + entries_size; 1078 1079 // Create ordered search table for entries [id, index]; 1080 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode); 1081 char* buffer = NEW_C_HEAP_ARRAY(char, total_size + DATA_ALIGNMENT, mtCode); 1082 char* start = align_up(buffer, DATA_ALIGNMENT); 1083 char* current = start + header_size; // Skip header 1084 uint jvm_version_offset = current - start; 1085 copy_bytes(vm_version, (address)current, (uint)strlen(vm_version) + 1); 1086 current += vm_version_size; 1087 1088 SCCEntry* entries_address = _store_entries; // Pointer to latest entry 1089 uint not_entrant_nb = 0; 1090 uint max_size = 0; 1091 // Add old entries first 1092 if (_for_read && (_load_header != nullptr)) { 1093 for(uint i = 0; i < load_count; i++) { 1094 if (_load_entries[i].load_fail()) { 1095 continue; 1096 } 1097 if (_load_entries[i].not_entrant()) { 1098 log_info(scc, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id()); 1099 not_entrant_nb++; 1100 if (_load_entries[i].for_preload()) { 1101 // Skip not entrant preload code: 1102 // we can't pre-load code which may have failing dependencies. 1103 continue; 1104 } 1105 _load_entries[i].set_entrant(); // Reset 1106 } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) { 1107 // record entrant first version code for pre-loading 1108 preload_entries[preload_entries_cnt++] = entries_count; 1109 } 1110 { 1111 uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT); 1112 if (size > max_size) { 1113 max_size = size; 1114 } 1115 copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size); 1116 _load_entries[i].set_offset(current - start); // New offset 1117 current += size; 1118 uint n = write_bytes(&(_load_entries[i]), sizeof(SCCEntry)); 1119 if (n != sizeof(SCCEntry)) { 1120 FREE_C_HEAP_ARRAY(char, buffer); 1121 FREE_C_HEAP_ARRAY(uint, search); 1122 return false; 1123 } 1124 search[entries_count*2 + 0] = _load_entries[i].id(); 1125 search[entries_count*2 + 1] = entries_count; 1126 entries_count++; 1127 } 1128 } 1129 } 1130 // SCCEntry entries were allocated in reverse in store buffer. 1131 // Process them in reverse order to cache first code first. 1132 for (int i = store_count - 1; i >= 0; i--) { 1133 if (entries_address[i].load_fail()) { 1134 continue; 1135 } 1136 if (entries_address[i].not_entrant()) { 1137 log_info(scc, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : "")); 1138 not_entrant_nb++; 1139 if (entries_address[i].for_preload()) { 1140 // Skip not entrant preload code: 1141 // we can't pre-load code which may have failing dependencies. 1142 continue; 1143 } 1144 entries_address[i].set_entrant(); // Reset 1145 } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) { 1146 // record entrant first version code for pre-loading 1147 preload_entries[preload_entries_cnt++] = entries_count; 1148 } 1149 { 1150 entries_address[i].set_next(nullptr); // clear pointers before storing data 1151 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT); 1152 if (size > max_size) { 1153 max_size = size; 1154 } 1155 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size); 1156 entries_address[i].set_offset(current - start); // New offset 1157 entries_address[i].update_method_for_writing(); 1158 current += size; 1159 uint n = write_bytes(&(entries_address[i]), sizeof(SCCEntry)); 1160 if (n != sizeof(SCCEntry)) { 1161 FREE_C_HEAP_ARRAY(char, buffer); 1162 FREE_C_HEAP_ARRAY(uint, search); 1163 return false; 1164 } 1165 search[entries_count*2 + 0] = entries_address[i].id(); 1166 search[entries_count*2 + 1] = entries_count; 1167 entries_count++; 1168 } 1169 } 1170 if (entries_count == 0) { 1171 log_info(scc, exit)("No new entires, cache files %s was not %s", _cache_path, (_for_read ? "updated" : "created")); 1172 FREE_C_HEAP_ARRAY(char, buffer); 1173 FREE_C_HEAP_ARRAY(uint, search); 1174 return true; // Nothing to write 1175 } 1176 assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count); 1177 // Write strings 1178 if (strings_count > 0) { 1179 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size); 1180 strings_offset = (current - start); // New offset 1181 current += strings_size; 1182 } 1183 uint preload_entries_offset = (current - start); 1184 preload_entries_size = preload_entries_cnt * sizeof(uint); 1185 if (preload_entries_size > 0) { 1186 copy_bytes((const char*)preload_entries, (address)current, preload_entries_size); 1187 current += preload_entries_size; 1188 log_info(scc, exit)("Wrote %d preload entries to Startup Code Cache '%s'", preload_entries_cnt, _cache_path); 1189 } 1190 if (preload_entries != nullptr) { 1191 FREE_C_HEAP_ARRAY(uint, preload_entries); 1192 } 1193 1194 uint new_entries_offset = (current - start); // New offset 1195 // Sort and store search table 1196 qsort(search, entries_count, 2*sizeof(uint), uint_cmp); 1197 search_size = 2 * entries_count * sizeof(uint); 1198 copy_bytes((const char*)search, (address)current, search_size); 1199 FREE_C_HEAP_ARRAY(uint, search); 1200 current += search_size; 1201 1202 // Write entries 1203 entries_size = entries_count * sizeof(SCCEntry); // New size 1204 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size); 1205 current += entries_size; 1206 log_info(scc, exit)("Wrote %d SCCEntry entries (%d were not entrant, %d max size) to Startup Code Cache '%s'", entries_count, not_entrant_nb, max_size, _cache_path); 1207 1208 uint size = (current - start); 1209 assert(size <= total_size, "%d > %d", size , total_size); 1210 1211 // Finalize header 1212 SCCHeader* header = (SCCHeader*)start; 1213 header->init(jvm_version_offset, size, 1214 (uint)strings_count, strings_offset, 1215 entries_count, new_entries_offset, 1216 preload_entries_cnt, preload_entries_offset, 1217 _use_meta_ptrs); 1218 log_info(scc, init)("Wrote header to Startup Code Cache '%s'", _cache_path); 1219 1220 // Now store to file 1221 #ifdef _WINDOWS // On Windows, need WRITE permission to remove the file. 1222 chmod(_cache_path, _S_IREAD | _S_IWRITE); 1223 #endif 1224 // Use remove() to delete the existing file because, on Unix, this will 1225 // allow processes that have it open continued access to the file. 1226 remove(_cache_path); 1227 int fd = os::open(_cache_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444); 1228 if (fd < 0) { 1229 log_warning(scc, exit)("Unable to create Startup Code Cache file '%s': (%s)", _cache_path, os::strerror(errno)); 1230 FREE_C_HEAP_ARRAY(char, buffer); 1231 return false; 1232 } else { 1233 log_info(scc, exit)("Opened for write Startup Code Cache '%s'", _cache_path); 1234 } 1235 bool success = os::write(fd, start, (size_t)size); 1236 if (!success) { 1237 log_warning(scc, exit)("Failed to write %d bytes to Startup Code Cache file '%s': (%s)", size, _cache_path, os::strerror(errno)); 1238 FREE_C_HEAP_ARRAY(char, buffer); 1239 return false; 1240 } 1241 log_info(scc, exit)("Wrote %d bytes to Startup Code Cache '%s'", size, _cache_path); 1242 if (::close(fd) < 0) { 1243 log_warning(scc, exit)("Failed to close for write Startup Code Cache file '%s'", _cache_path); 1244 } else { 1245 log_info(scc, exit)("Closed for write Startup Code Cache '%s'", _cache_path); 1246 } 1247 FREE_C_HEAP_ARRAY(char, buffer); 1248 } 1249 return true; 1250 } 1251 1252 bool SCCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) { 1253 assert(start == cgen->assembler()->pc(), "wrong buffer"); 1254 SCCache* cache = open_for_read(); 1255 if (cache == nullptr) { 1256 return false; 1257 } 1258 SCCEntry* entry = cache->find_entry(SCCEntry::Stub, (uint)id); 1259 if (entry == nullptr) { 1260 return false; 1261 } 1262 uint entry_position = entry->offset(); 1263 // Read name 1264 uint name_offset = entry->name_offset() + entry_position; 1265 uint name_size = entry->name_size(); // Includes '/0' 1266 const char* saved_name = cache->addr(name_offset); 1267 if (strncmp(name, saved_name, (name_size - 1)) != 0) { 1268 log_warning(scc)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id); 1269 cache->set_failed(); 1270 return false; 1271 } 1272 log_info(scc,stubs)("Reading stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path); 1273 // Read code 1274 uint code_offset = entry->code_offset() + entry_position; 1275 uint code_size = entry->code_size(); 1276 copy_bytes(cache->addr(code_offset), start, code_size); 1277 cgen->assembler()->code_section()->set_end(start + code_size); 1278 log_info(scc,stubs)("Read stub '%s' id:%d from Startup Code Cache '%s'", name, (int)id, cache->_cache_path); 1279 return true; 1280 } 1281 1282 bool SCCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) { 1283 SCCache* cache = open_for_write(); 1284 if (cache == nullptr) { 1285 return false; 1286 } 1287 log_info(scc, stubs)("Writing stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path); 1288 if (!cache->align_write()) { 1289 return false; 1290 } 1291 #ifdef ASSERT 1292 CodeSection* cs = cgen->assembler()->code_section(); 1293 if (cs->has_locs()) { 1294 uint reloc_count = cs->locs_count(); 1295 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count); 1296 // Collect additional data 1297 RelocIterator iter(cs); 1298 while (iter.next()) { 1299 switch (iter.type()) { 1300 case relocInfo::none: 1301 break; 1302 default: { 1303 iter.print_current_on(tty); 1304 fatal("stub's relocation %d unimplemented", (int)iter.type()); 1305 break; 1306 } 1307 } 1308 } 1309 } 1310 #endif 1311 uint entry_position = cache->_write_position; 1312 1313 // Write code 1314 uint code_offset = 0; 1315 uint code_size = cgen->assembler()->pc() - start; 1316 uint n = cache->write_bytes(start, code_size); 1317 if (n != code_size) { 1318 return false; 1319 } 1320 // Write name 1321 uint name_offset = cache->_write_position - entry_position; 1322 uint name_size = (uint)strlen(name) + 1; // Includes '/0' 1323 n = cache->write_bytes(name, name_size); 1324 if (n != name_size) { 1325 return false; 1326 } 1327 uint entry_size = cache->_write_position - entry_position; 1328 SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size, 1329 code_offset, code_size, 0, 0, 1330 SCCEntry::Stub, (uint32_t)id); 1331 log_info(scc, stubs)("Wrote stub '%s' id:%d to Startup Code Cache '%s'", name, (int)id, cache->_cache_path); 1332 return true; 1333 } 1334 1335 Klass* SCCReader::read_klass(const methodHandle& comp_method, bool shared) { 1336 uint code_offset = read_position(); 1337 uint state = *(uint*)addr(code_offset); 1338 uint init_state = (state & 1); 1339 uint array_dim = (state >> 1); 1340 code_offset += sizeof(int); 1341 if (_cache->use_meta_ptrs() && shared) { 1342 uint klass_offset = *(uint*)addr(code_offset); 1343 code_offset += sizeof(uint); 1344 set_read_position(code_offset); 1345 Klass* k = (Klass*)((address)SharedBaseAddress + klass_offset); 1346 if (!MetaspaceShared::is_in_shared_metaspace((address)k)) { 1347 // Something changed in CDS 1348 set_lookup_failed(); 1349 log_warning(scc)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k)); 1350 return nullptr; 1351 } 1352 assert(k->is_klass(), "sanity"); 1353 ResourceMark rm; 1354 const char* comp_name = comp_method->name_and_sig_as_C_string(); 1355 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) { 1356 set_lookup_failed(); 1357 log_warning(scc)("%d '%s' (L%d): Lookup failed for klass %s: not loaded", 1358 compile_id(), comp_name, comp_level(), k->external_name()); 1359 return nullptr; 1360 } else 1361 // Allow not initialized klass which was uninitialized during code caching or for preload 1362 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) { 1363 set_lookup_failed(); 1364 log_warning(scc)("%d '%s' (L%d): Lookup failed for klass %s: not initialized", 1365 compile_id(), comp_name, comp_level(), k->external_name()); 1366 return nullptr; 1367 } 1368 if (array_dim > 0) { 1369 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check"); 1370 Klass* ak = k->array_klass_or_null(array_dim); 1371 // FIXME: what would it take to create an array class on the fly? 1372 // Klass* ak = k->array_klass(dim, JavaThread::current()); 1373 // guarantee(JavaThread::current()->pending_exception() == nullptr, ""); 1374 if (ak == nullptr) { 1375 set_lookup_failed(); 1376 log_warning(scc)("%d (L%d): %d-dimension array klass lookup failed: %s", 1377 compile_id(), comp_level(), array_dim, k->external_name()); 1378 } 1379 log_info(scc)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name()); 1380 return ak; 1381 } else { 1382 log_info(scc)("%d (L%d): Shared klass lookup: %s", 1383 compile_id(), comp_level(), k->external_name()); 1384 return k; 1385 } 1386 } 1387 int name_length = *(int*)addr(code_offset); 1388 code_offset += sizeof(int); 1389 const char* dest = addr(code_offset); 1390 code_offset += name_length + 1; 1391 set_read_position(code_offset); 1392 TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length); 1393 if (klass_sym == nullptr) { 1394 set_lookup_failed(); 1395 log_warning(scc)("%d (L%d): Probe failed for class %s", 1396 compile_id(), comp_level(), &(dest[0])); 1397 return nullptr; 1398 } 1399 // Use class loader of compiled method. 1400 Thread* thread = Thread::current(); 1401 Handle loader(thread, comp_method->method_holder()->class_loader()); 1402 Handle protection_domain(thread, comp_method->method_holder()->protection_domain()); 1403 Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader, protection_domain); 1404 assert(!thread->has_pending_exception(), "should not throw"); 1405 if (k == nullptr && !loader.is_null()) { 1406 // Try default loader and domain 1407 k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle(), Handle()); 1408 assert(!thread->has_pending_exception(), "should not throw"); 1409 } 1410 if (k != nullptr) { 1411 // Allow not initialized klass which was uninitialized during code caching 1412 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) { 1413 set_lookup_failed(); 1414 log_warning(scc)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0])); 1415 return nullptr; 1416 } 1417 log_info(scc)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name()); 1418 } else { 1419 set_lookup_failed(); 1420 log_warning(scc)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0])); 1421 return nullptr; 1422 } 1423 return k; 1424 } 1425 1426 Method* SCCReader::read_method(const methodHandle& comp_method, bool shared) { 1427 uint code_offset = read_position(); 1428 if (_cache->use_meta_ptrs() && shared) { 1429 uint method_offset = *(uint*)addr(code_offset); 1430 code_offset += sizeof(uint); 1431 set_read_position(code_offset); 1432 Method* m = (Method*)((address)SharedBaseAddress + method_offset); 1433 if (!MetaspaceShared::is_in_shared_metaspace((address)m)) { 1434 // Something changed in CDS 1435 set_lookup_failed(); 1436 log_warning(scc)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m)); 1437 return nullptr; 1438 } 1439 assert(m->is_method(), "sanity"); 1440 ResourceMark rm; 1441 const char* comp_name = comp_method->name_and_sig_as_C_string(); 1442 Klass* k = m->method_holder(); 1443 if (!k->is_instance_klass()) { 1444 set_lookup_failed(); 1445 log_warning(scc)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass", compile_id(), comp_name, comp_level(), k->external_name()); 1446 return nullptr; 1447 } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) { 1448 set_lookup_failed(); 1449 log_warning(scc)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS", compile_id(), comp_name, comp_level(), k->external_name()); 1450 return nullptr; 1451 } else if (!InstanceKlass::cast(k)->is_loaded()) { 1452 set_lookup_failed(); 1453 log_warning(scc)("%d '%s' (L%d): Lookup failed for holder %s: not loaded", compile_id(), comp_name, comp_level(), k->external_name()); 1454 return nullptr; 1455 } else if (!InstanceKlass::cast(k)->is_linked()) { 1456 set_lookup_failed(); 1457 log_warning(scc)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s", compile_id(), comp_name, comp_level(), k->external_name(), (_preload ? " for code preload" : "")); 1458 return nullptr; 1459 } 1460 log_info(scc)("%d (L%d): Shared method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string()); 1461 return m; 1462 } 1463 int holder_length = *(int*)addr(code_offset); 1464 code_offset += sizeof(int); 1465 int name_length = *(int*)addr(code_offset); 1466 code_offset += sizeof(int); 1467 int signat_length = *(int*)addr(code_offset); 1468 code_offset += sizeof(int); 1469 1470 const char* dest = addr(code_offset); 1471 code_offset += holder_length + 1 + name_length + 1 + signat_length + 1; 1472 set_read_position(code_offset); 1473 TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length); 1474 if (klass_sym == nullptr) { 1475 set_lookup_failed(); 1476 log_warning(scc)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0])); 1477 return nullptr; 1478 } 1479 // Use class loader of compiled method. 1480 Thread* thread = Thread::current(); 1481 Handle loader(thread, comp_method->method_holder()->class_loader()); 1482 Handle protection_domain(thread, comp_method->method_holder()->protection_domain()); 1483 Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader, protection_domain); 1484 assert(!thread->has_pending_exception(), "should not throw"); 1485 if (k == nullptr && !loader.is_null()) { 1486 // Try default loader and domain 1487 k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle(), Handle()); 1488 assert(!thread->has_pending_exception(), "should not throw"); 1489 } 1490 if (k != nullptr) { 1491 if (!k->is_instance_klass()) { 1492 set_lookup_failed(); 1493 log_warning(scc)("%d (L%d): Lookup failed for holder %s: not instance klass", 1494 compile_id(), comp_level(), &(dest[0])); 1495 return nullptr; 1496 } else if (!InstanceKlass::cast(k)->is_linked()) { 1497 set_lookup_failed(); 1498 log_warning(scc)("%d (L%d): Lookup failed for holder %s: not linked", 1499 compile_id(), comp_level(), &(dest[0])); 1500 return nullptr; 1501 } 1502 log_info(scc)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name()); 1503 } else { 1504 set_lookup_failed(); 1505 log_warning(scc)("%d (L%d): Lookup failed for holder %s", 1506 compile_id(), comp_level(), &(dest[0])); 1507 return nullptr; 1508 } 1509 TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length); 1510 int pos = holder_length + 1 + name_length + 1; 1511 TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length); 1512 if (name_sym == nullptr) { 1513 set_lookup_failed(); 1514 log_warning(scc)("%d (L%d): Probe failed for method name %s", 1515 compile_id(), comp_level(), &(dest[holder_length + 1])); 1516 return nullptr; 1517 } 1518 if (sign_sym == nullptr) { 1519 set_lookup_failed(); 1520 log_warning(scc)("%d (L%d): Probe failed for method signature %s", 1521 compile_id(), comp_level(), &(dest[pos])); 1522 return nullptr; 1523 } 1524 Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym); 1525 if (m != nullptr) { 1526 ResourceMark rm; 1527 log_info(scc)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string()); 1528 } else { 1529 set_lookup_failed(); 1530 log_warning(scc)("%d (L%d): Lookup failed for method %s::%s%s", 1531 compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos])); 1532 return nullptr; 1533 } 1534 return m; 1535 } 1536 1537 bool SCCache::write_klass(Klass* klass) { 1538 if (klass->is_hidden()) { // Skip such nmethod 1539 set_lookup_failed(); 1540 return false; 1541 } 1542 bool can_use_meta_ptrs = _use_meta_ptrs; 1543 uint array_dim = 0; 1544 if (klass->is_objArray_klass()) { 1545 array_dim = ObjArrayKlass::cast(klass)->dimension(); 1546 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass 1547 } 1548 uint init_state = 0; 1549 if (klass->is_instance_klass()) { 1550 InstanceKlass* ik = InstanceKlass::cast(klass); 1551 ClassLoaderData* cld = ik->class_loader_data(); 1552 if (!cld->is_builtin_class_loader_data()) { 1553 set_lookup_failed(); 1554 return false; 1555 } 1556 if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) { 1557 _for_preload = false; 1558 // Bailout if code has clinit barriers: 1559 // method will be recompiled without them in any case 1560 if (_has_clinit_barriers) { 1561 set_lookup_failed(); 1562 return false; 1563 } 1564 can_use_meta_ptrs = false; 1565 } 1566 init_state = (ik->is_initialized() ? 1 : 0); 1567 } 1568 ResourceMark rm; 1569 uint state = (array_dim << 1) | (init_state & 1); 1570 if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(klass)) { 1571 DataKind kind = DataKind::Klass_Shared; 1572 uint n = write_bytes(&kind, sizeof(int)); 1573 if (n != sizeof(int)) { 1574 return false; 1575 } 1576 // Record state of instance klass initialization. 1577 n = write_bytes(&state, sizeof(int)); 1578 if (n != sizeof(int)) { 1579 return false; 1580 } 1581 uint klass_offset = CDSAccess::delta_from_shared_address_base((address)klass); 1582 n = write_bytes(&klass_offset, sizeof(uint)); 1583 if (n != sizeof(uint)) { 1584 return false; 1585 } 1586 log_info(scc)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(), 1587 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")), 1588 (array_dim > 0 ? " (object array)" : ""), 1589 klass_offset); 1590 return true; 1591 } 1592 // Bailout if code has clinit barriers: 1593 // method will be recompiled without them in any case 1594 if (_for_preload && _has_clinit_barriers) { 1595 set_lookup_failed(); 1596 return false; 1597 } 1598 _for_preload = false; 1599 log_info(scc,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name()); 1600 DataKind kind = DataKind::Klass; 1601 uint n = write_bytes(&kind, sizeof(int)); 1602 if (n != sizeof(int)) { 1603 return false; 1604 } 1605 // Record state of instance klass initialization. 1606 n = write_bytes(&state, sizeof(int)); 1607 if (n != sizeof(int)) { 1608 return false; 1609 } 1610 Symbol* name = klass->name(); 1611 int name_length = name->utf8_length(); 1612 int total_length = name_length + 1; 1613 char* dest = NEW_RESOURCE_ARRAY(char, total_length); 1614 name->as_C_string(dest, total_length); 1615 dest[total_length - 1] = '\0'; 1616 LogTarget(Info, scc, loader) log; 1617 if (log.is_enabled()) { 1618 LogStream ls(log); 1619 oop loader = klass->class_loader(); 1620 oop domain = klass->protection_domain(); 1621 ls.print("Class %s loader: ", dest); 1622 if (loader == nullptr) { 1623 ls.print("nullptr"); 1624 } else { 1625 loader->print_value_on(&ls); 1626 } 1627 ls.print(" domain: "); 1628 if (domain == nullptr) { 1629 ls.print("nullptr"); 1630 } else { 1631 domain->print_value_on(&ls); 1632 } 1633 ls.cr(); 1634 } 1635 n = write_bytes(&name_length, sizeof(int)); 1636 if (n != sizeof(int)) { 1637 return false; 1638 } 1639 n = write_bytes(dest, total_length); 1640 if (n != (uint)total_length) { 1641 return false; 1642 } 1643 log_info(scc)("%d (L%d): Wrote klass: %s%s%s", 1644 compile_id(), comp_level(), 1645 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")), 1646 (array_dim > 0 ? " (object array)" : "")); 1647 return true; 1648 } 1649 1650 bool SCCache::write_method(Method* method) { 1651 bool can_use_meta_ptrs = _use_meta_ptrs; 1652 Klass* klass = method->method_holder(); 1653 if (klass->is_instance_klass()) { 1654 InstanceKlass* ik = InstanceKlass::cast(klass); 1655 ClassLoaderData* cld = ik->class_loader_data(); 1656 if (!cld->is_builtin_class_loader_data()) { 1657 set_lookup_failed(); 1658 return false; 1659 } 1660 if (_for_preload && !CDSAccess::can_generate_cached_code(ik)) { 1661 _for_preload = false; 1662 // Bailout if code has clinit barriers: 1663 // method will be recompiled without them in any case 1664 if (_has_clinit_barriers) { 1665 set_lookup_failed(); 1666 return false; 1667 } 1668 can_use_meta_ptrs = false; 1669 } 1670 } 1671 ResourceMark rm; 1672 if (can_use_meta_ptrs && CDSAccess::can_generate_cached_code(method)) { 1673 DataKind kind = DataKind::Method_Shared; 1674 uint n = write_bytes(&kind, sizeof(int)); 1675 if (n != sizeof(int)) { 1676 return false; 1677 } 1678 uint method_offset = CDSAccess::delta_from_shared_address_base((address)method); 1679 n = write_bytes(&method_offset, sizeof(uint)); 1680 if (n != sizeof(uint)) { 1681 return false; 1682 } 1683 log_info(scc)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset); 1684 return true; 1685 } 1686 // Bailout if code has clinit barriers: 1687 // method will be recompiled without them in any case 1688 if (_for_preload && _has_clinit_barriers) { 1689 set_lookup_failed(); 1690 return false; 1691 } 1692 _for_preload = false; 1693 log_info(scc,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string()); 1694 if (method->is_hidden()) { // Skip such nmethod 1695 set_lookup_failed(); 1696 return false; 1697 } 1698 DataKind kind = DataKind::Method; 1699 uint n = write_bytes(&kind, sizeof(int)); 1700 if (n != sizeof(int)) { 1701 return false; 1702 } 1703 Symbol* name = method->name(); 1704 Symbol* holder = method->klass_name(); 1705 Symbol* signat = method->signature(); 1706 int name_length = name->utf8_length(); 1707 int holder_length = holder->utf8_length(); 1708 int signat_length = signat->utf8_length(); 1709 1710 // Write sizes and strings 1711 int total_length = holder_length + 1 + name_length + 1 + signat_length + 1; 1712 char* dest = NEW_RESOURCE_ARRAY(char, total_length); 1713 holder->as_C_string(dest, total_length); 1714 dest[holder_length] = '\0'; 1715 int pos = holder_length + 1; 1716 name->as_C_string(&(dest[pos]), (total_length - pos)); 1717 pos += name_length; 1718 dest[pos++] = '\0'; 1719 signat->as_C_string(&(dest[pos]), (total_length - pos)); 1720 dest[total_length - 1] = '\0'; 1721 1722 LogTarget(Info, scc, loader) log; 1723 if (log.is_enabled()) { 1724 LogStream ls(log); 1725 oop loader = klass->class_loader(); 1726 oop domain = klass->protection_domain(); 1727 ls.print("Holder %s loader: ", dest); 1728 if (loader == nullptr) { 1729 ls.print("nullptr"); 1730 } else { 1731 loader->print_value_on(&ls); 1732 } 1733 ls.print(" domain: "); 1734 if (domain == nullptr) { 1735 ls.print("nullptr"); 1736 } else { 1737 domain->print_value_on(&ls); 1738 } 1739 ls.cr(); 1740 } 1741 1742 n = write_bytes(&holder_length, sizeof(int)); 1743 if (n != sizeof(int)) { 1744 return false; 1745 } 1746 n = write_bytes(&name_length, sizeof(int)); 1747 if (n != sizeof(int)) { 1748 return false; 1749 } 1750 n = write_bytes(&signat_length, sizeof(int)); 1751 if (n != sizeof(int)) { 1752 return false; 1753 } 1754 n = write_bytes(dest, total_length); 1755 if (n != (uint)total_length) { 1756 return false; 1757 } 1758 dest[holder_length] = ' '; 1759 dest[holder_length + 1 + name_length] = ' '; 1760 log_info(scc)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest); 1761 return true; 1762 } 1763 1764 // Repair the pc relative information in the code after load 1765 bool SCCReader::read_relocations(CodeBuffer* buffer, CodeBuffer* orig_buffer, 1766 OopRecorder* oop_recorder, ciMethod* target) { 1767 bool success = true; 1768 for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) { 1769 uint code_offset = read_position(); 1770 int reloc_count = *(int*)addr(code_offset); 1771 code_offset += sizeof(int); 1772 if (reloc_count == 0) { 1773 set_read_position(code_offset); 1774 continue; 1775 } 1776 // Read _locs_point (as offset from start) 1777 int locs_point_off = *(int*)addr(code_offset); 1778 code_offset += sizeof(int); 1779 uint reloc_size = reloc_count * sizeof(relocInfo); 1780 CodeSection* cs = buffer->code_section(i); 1781 if (cs->locs_capacity() < reloc_count) { 1782 cs->expand_locs(reloc_count); 1783 } 1784 relocInfo* reloc_start = cs->locs_start(); 1785 copy_bytes(addr(code_offset), (address)reloc_start, reloc_size); 1786 code_offset += reloc_size; 1787 cs->set_locs_end(reloc_start + reloc_count); 1788 cs->set_locs_point(cs->start() + locs_point_off); 1789 1790 // Read additional relocation data: uint per relocation 1791 uint data_size = reloc_count * sizeof(uint); 1792 uint* reloc_data = (uint*)addr(code_offset); 1793 code_offset += data_size; 1794 set_read_position(code_offset); 1795 LogStreamHandle(Info, scc, reloc) log; 1796 if (log.is_enabled()) { 1797 log.print_cr("======== read code section %d relocations [%d]:", i, reloc_count); 1798 } 1799 RelocIterator iter(cs); 1800 int j = 0; 1801 while (iter.next()) { 1802 switch (iter.type()) { 1803 case relocInfo::none: 1804 break; 1805 case relocInfo::oop_type: { 1806 VM_ENTRY_MARK; 1807 oop_Relocation* r = (oop_Relocation*)iter.reloc(); 1808 if (r->oop_is_immediate()) { 1809 assert(reloc_data[j] == (uint)j, "should be"); 1810 methodHandle comp_method(THREAD, target->get_Method()); 1811 jobject jo = read_oop(THREAD, comp_method); 1812 if (lookup_failed()) { 1813 success = false; 1814 break; 1815 } 1816 r->set_value((address)jo); 1817 } else if (false) { 1818 // Get already updated value from OopRecorder. 1819 assert(oop_recorder != nullptr, "sanity"); 1820 int index = r->oop_index(); 1821 jobject jo = oop_recorder->oop_at(index); 1822 oop obj = JNIHandles::resolve(jo); 1823 r->set_value(*reinterpret_cast<address*>(&obj)); 1824 } 1825 break; 1826 } 1827 case relocInfo::metadata_type: { 1828 VM_ENTRY_MARK; 1829 metadata_Relocation* r = (metadata_Relocation*)iter.reloc(); 1830 Metadata* m; 1831 if (r->metadata_is_immediate()) { 1832 assert(reloc_data[j] == (uint)j, "should be"); 1833 methodHandle comp_method(THREAD, target->get_Method()); 1834 m = read_metadata(comp_method); 1835 if (lookup_failed()) { 1836 success = false; 1837 break; 1838 } 1839 } else { 1840 // Get already updated value from OopRecorder. 1841 assert(oop_recorder != nullptr, "sanity"); 1842 int index = r->metadata_index(); 1843 m = oop_recorder->metadata_at(index); 1844 } 1845 r->set_value((address)m); 1846 break; 1847 } 1848 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs. 1849 case relocInfo::opt_virtual_call_type: 1850 case relocInfo::static_call_type: { 1851 address dest = _cache->address_for_id(reloc_data[j]); 1852 if (dest != (address)-1) { 1853 ((CallRelocation*)iter.reloc())->set_destination(dest); 1854 } 1855 break; 1856 } 1857 case relocInfo::trampoline_stub_type: { 1858 address dest = _cache->address_for_id(reloc_data[j]); 1859 if (dest != (address)-1) { 1860 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest); 1861 } 1862 break; 1863 } 1864 case relocInfo::static_stub_type: 1865 break; 1866 case relocInfo::runtime_call_type: { 1867 address dest = _cache->address_for_id(reloc_data[j]); 1868 if (dest != (address)-1) { 1869 ((CallRelocation*)iter.reloc())->set_destination(dest); 1870 } 1871 break; 1872 } 1873 case relocInfo::runtime_call_w_cp_type: 1874 fatal("runtime_call_w_cp_type unimplemented"); 1875 //address destination = iter.reloc()->value(); 1876 break; 1877 case relocInfo::external_word_type: { 1878 address target = _cache->address_for_id(reloc_data[j]); 1879 int data_len = iter.datalen(); 1880 if (data_len > 0) { 1881 // Overwrite RelocInfo embedded address 1882 RelocationHolder rh = external_word_Relocation::spec(target); 1883 external_word_Relocation* new_reloc = (external_word_Relocation*)rh.reloc(); 1884 short buf[4] = {0}; // 8 bytes 1885 short* p = new_reloc->pack_data_to(buf); 1886 if ((p - buf) != data_len) { 1887 return false; // New address does not fit into old relocInfo 1888 } 1889 short* data = iter.data(); 1890 for (int i = 0; i < data_len; i++) { 1891 data[i] = buf[i]; 1892 } 1893 } 1894 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc(); 1895 reloc->set_value(target); // Patch address in the code 1896 iter.reloc()->fix_relocation_after_move(orig_buffer, buffer); 1897 break; 1898 } 1899 case relocInfo::internal_word_type: 1900 iter.reloc()->fix_relocation_after_move(orig_buffer, buffer); 1901 break; 1902 case relocInfo::section_word_type: 1903 iter.reloc()->fix_relocation_after_move(orig_buffer, buffer); 1904 break; 1905 case relocInfo::poll_type: 1906 break; 1907 case relocInfo::poll_return_type: 1908 break; 1909 case relocInfo::post_call_nop_type: 1910 break; 1911 case relocInfo::entry_guard_type: 1912 break; 1913 default: 1914 fatal("relocation %d unimplemented", (int)iter.type()); 1915 break; 1916 } 1917 if (success && log.is_enabled()) { 1918 iter.print_current_on(&log); 1919 } 1920 j++; 1921 } 1922 assert(j <= (int)reloc_count, "sanity"); 1923 } 1924 return success; 1925 } 1926 1927 bool SCCReader::read_code(CodeBuffer* buffer, CodeBuffer* orig_buffer, uint code_offset) { 1928 assert(code_offset == align_up(code_offset, DATA_ALIGNMENT), "%d not aligned to %d", code_offset, DATA_ALIGNMENT); 1929 assert(buffer->blob() != nullptr, "sanity"); 1930 SCCodeSection* scc_cs = (SCCodeSection*)addr(code_offset); 1931 for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) { 1932 CodeSection* cs = buffer->code_section(i); 1933 // Read original section size and address. 1934 uint orig_size = scc_cs[i]._size; 1935 log_debug(scc)("======== read code section %d [%d]:", i, orig_size); 1936 uint orig_size_align = align_up(orig_size, DATA_ALIGNMENT); 1937 if (i != (int)CodeBuffer::SECT_INSTS) { 1938 buffer->initialize_section_size(cs, orig_size_align); 1939 } 1940 if (orig_size_align > (uint)cs->capacity()) { // Will not fit 1941 log_warning(scc)("%d (L%d): original code section %d size %d > current capacity %d", 1942 compile_id(), comp_level(), i, orig_size, cs->capacity()); 1943 return false; 1944 } 1945 if (orig_size == 0) { 1946 assert(cs->size() == 0, "should match"); 1947 continue; // skip trivial section 1948 } 1949 address orig_start = scc_cs[i]._origin_address; 1950 1951 // Populate fake original buffer (no code allocation in CodeCache). 1952 // It is used for relocations to calculate sections addesses delta. 1953 CodeSection* orig_cs = orig_buffer->code_section(i); 1954 assert(!orig_cs->is_allocated(), "This %d section should not be set", i); 1955 orig_cs->initialize(orig_start, orig_size); 1956 1957 // Load code to new buffer. 1958 address code_start = cs->start(); 1959 copy_bytes(addr(scc_cs[i]._offset + code_offset), code_start, orig_size_align); 1960 cs->set_end(code_start + orig_size); 1961 } 1962 1963 return true; 1964 } 1965 1966 bool SCCache::load_exception_blob(CodeBuffer* buffer, int* pc_offset) { 1967 #ifdef ASSERT 1968 LogStreamHandle(Debug, scc, nmethod) log; 1969 if (log.is_enabled()) { 1970 FlagSetting fs(PrintRelocations, true); 1971 buffer->print_on(&log); 1972 } 1973 #endif 1974 SCCache* cache = open_for_read(); 1975 if (cache == nullptr) { 1976 return false; 1977 } 1978 SCCEntry* entry = cache->find_entry(SCCEntry::Blob, 999); 1979 if (entry == nullptr) { 1980 return false; 1981 } 1982 SCCReader reader(cache, entry, nullptr); 1983 return reader.compile_blob(buffer, pc_offset); 1984 } 1985 1986 bool SCCReader::compile_blob(CodeBuffer* buffer, int* pc_offset) { 1987 uint entry_position = _entry->offset(); 1988 1989 // Read pc_offset 1990 *pc_offset = *(int*)addr(entry_position); 1991 1992 // Read name 1993 uint name_offset = entry_position + _entry->name_offset(); 1994 uint name_size = _entry->name_size(); // Includes '/0' 1995 const char* name = addr(name_offset); 1996 1997 log_info(scc, stubs)("%d (L%d): Reading blob '%s' with pc_offset %d from Startup Code Cache '%s'", 1998 compile_id(), comp_level(), name, *pc_offset, _cache->cache_path()); 1999 2000 if (strncmp(buffer->name(), name, (name_size - 1)) != 0) { 2001 log_warning(scc)("%d (L%d): Saved blob's name '%s' is different from '%s'", 2002 compile_id(), comp_level(), name, buffer->name()); 2003 ((SCCache*)_cache)->set_failed(); 2004 return false; 2005 } 2006 2007 // Create fake original CodeBuffer 2008 CodeBuffer orig_buffer(name); 2009 2010 // Read code 2011 uint code_offset = entry_position + _entry->code_offset(); 2012 if (!read_code(buffer, &orig_buffer, code_offset)) { 2013 return false; 2014 } 2015 2016 // Read relocations 2017 uint reloc_offset = entry_position + _entry->reloc_offset(); 2018 set_read_position(reloc_offset); 2019 if (!read_relocations(buffer, &orig_buffer, nullptr, nullptr)) { 2020 return false; 2021 } 2022 2023 log_info(scc, stubs)("%d (L%d): Read blob '%s' from Startup Code Cache '%s'", 2024 compile_id(), comp_level(), name, _cache->cache_path()); 2025 #ifdef ASSERT 2026 LogStreamHandle(Debug, scc, nmethod) log; 2027 if (log.is_enabled()) { 2028 FlagSetting fs(PrintRelocations, true); 2029 buffer->print_on(&log); 2030 buffer->decode(); 2031 } 2032 #endif 2033 return true; 2034 } 2035 2036 bool SCCache::write_relocations(CodeBuffer* buffer, uint& all_reloc_size) { 2037 uint all_reloc_count = 0; 2038 for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) { 2039 CodeSection* cs = buffer->code_section(i); 2040 uint reloc_count = cs->has_locs() ? cs->locs_count() : 0; 2041 all_reloc_count += reloc_count; 2042 } 2043 all_reloc_size = all_reloc_count * sizeof(relocInfo); 2044 bool success = true; 2045 uint* reloc_data = NEW_C_HEAP_ARRAY(uint, all_reloc_count, mtCode); 2046 for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) { 2047 CodeSection* cs = buffer->code_section(i); 2048 int reloc_count = cs->has_locs() ? cs->locs_count() : 0; 2049 uint n = write_bytes(&reloc_count, sizeof(int)); 2050 if (n != sizeof(int)) { 2051 success = false; 2052 break; 2053 } 2054 if (reloc_count == 0) { 2055 continue; 2056 } 2057 // Write _locs_point (as offset from start) 2058 int locs_point_off = cs->locs_point_off(); 2059 n = write_bytes(&locs_point_off, sizeof(int)); 2060 if (n != sizeof(int)) { 2061 success = false; 2062 break; 2063 } 2064 relocInfo* reloc_start = cs->locs_start(); 2065 uint reloc_size = reloc_count * sizeof(relocInfo); 2066 n = write_bytes(reloc_start, reloc_size); 2067 if (n != reloc_size) { 2068 success = false; 2069 break; 2070 } 2071 LogStreamHandle(Info, scc, reloc) log; 2072 if (log.is_enabled()) { 2073 log.print_cr("======== write code section %d relocations [%d]:", i, reloc_count); 2074 } 2075 // Collect additional data 2076 RelocIterator iter(cs); 2077 bool has_immediate = false; 2078 int j = 0; 2079 while (iter.next()) { 2080 reloc_data[j] = 0; // initialize 2081 switch (iter.type()) { 2082 case relocInfo::none: 2083 break; 2084 case relocInfo::oop_type: { 2085 oop_Relocation* r = (oop_Relocation*)iter.reloc(); 2086 if (r->oop_is_immediate()) { 2087 reloc_data[j] = (uint)j; // Indication that we need to restore immediate 2088 has_immediate = true; 2089 } 2090 break; 2091 } 2092 case relocInfo::metadata_type: { 2093 metadata_Relocation* r = (metadata_Relocation*)iter.reloc(); 2094 if (r->metadata_is_immediate()) { 2095 reloc_data[j] = (uint)j; // Indication that we need to restore immediate 2096 has_immediate = true; 2097 } 2098 break; 2099 } 2100 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs. 2101 case relocInfo::opt_virtual_call_type: 2102 case relocInfo::static_call_type: { 2103 CallRelocation* r = (CallRelocation*)iter.reloc(); 2104 address dest = r->destination(); 2105 if (dest == r->addr()) { // possible call via trampoline on Aarch64 2106 dest = (address)-1; // do nothing in this case when loading this relocation 2107 } 2108 reloc_data[j] = _table->id_for_address(dest, iter, buffer); 2109 break; 2110 } 2111 case relocInfo::trampoline_stub_type: { 2112 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination(); 2113 reloc_data[j] = _table->id_for_address(dest, iter, buffer); 2114 break; 2115 } 2116 case relocInfo::static_stub_type: 2117 break; 2118 case relocInfo::runtime_call_type: { 2119 // Record offset of runtime destination 2120 CallRelocation* r = (CallRelocation*)iter.reloc(); 2121 address dest = r->destination(); 2122 if (dest == r->addr()) { // possible call via trampoline on Aarch64 2123 dest = (address)-1; // do nothing in this case when loading this relocation 2124 } 2125 reloc_data[j] = _table->id_for_address(dest, iter, buffer); 2126 break; 2127 } 2128 case relocInfo::runtime_call_w_cp_type: 2129 fatal("runtime_call_w_cp_type unimplemented"); 2130 break; 2131 case relocInfo::external_word_type: { 2132 // Record offset of runtime target 2133 address target = ((external_word_Relocation*)iter.reloc())->target(); 2134 reloc_data[j] = _table->id_for_address(target, iter, buffer); 2135 break; 2136 } 2137 case relocInfo::internal_word_type: 2138 break; 2139 case relocInfo::section_word_type: 2140 break; 2141 case relocInfo::poll_type: 2142 break; 2143 case relocInfo::poll_return_type: 2144 break; 2145 case relocInfo::post_call_nop_type: 2146 break; 2147 case relocInfo::entry_guard_type: 2148 break; 2149 default: 2150 fatal("relocation %d unimplemented", (int)iter.type()); 2151 break; 2152 } 2153 if (log.is_enabled()) { 2154 iter.print_current_on(&log); 2155 } 2156 j++; 2157 } 2158 assert(j <= (int)reloc_count, "sanity"); 2159 // Write additional relocation data: uint per relocation 2160 uint data_size = reloc_count * sizeof(uint); 2161 n = write_bytes(reloc_data, data_size); 2162 if (n != data_size) { 2163 success = false; 2164 break; 2165 } 2166 if (has_immediate) { 2167 // Save information about immediates in this Code Section 2168 RelocIterator iter_imm(cs); 2169 int j = 0; 2170 while (iter_imm.next()) { 2171 switch (iter_imm.type()) { 2172 case relocInfo::oop_type: { 2173 oop_Relocation* r = (oop_Relocation*)iter_imm.reloc(); 2174 if (r->oop_is_immediate()) { 2175 assert(reloc_data[j] == (uint)j, "should be"); 2176 jobject jo = *(jobject*)(r->oop_addr()); // Handle currently 2177 if (!write_oop(jo)) { 2178 success = false; 2179 } 2180 } 2181 break; 2182 } 2183 case relocInfo::metadata_type: { 2184 metadata_Relocation* r = (metadata_Relocation*)iter_imm.reloc(); 2185 if (r->metadata_is_immediate()) { 2186 assert(reloc_data[j] == (uint)j, "should be"); 2187 Metadata* m = r->metadata_value(); 2188 if (!write_metadata(m)) { 2189 success = false; 2190 } 2191 } 2192 break; 2193 } 2194 default: 2195 break; 2196 } 2197 if (!success) { 2198 break; 2199 } 2200 j++; 2201 } // while (iter_imm.next()) 2202 } // if (has_immediate) 2203 } // for(i < SECT_LIMIT) 2204 FREE_C_HEAP_ARRAY(uint, reloc_data); 2205 return success; 2206 } 2207 2208 bool SCCache::write_code(CodeBuffer* buffer, uint& code_size) { 2209 assert(_write_position == align_up(_write_position, DATA_ALIGNMENT), "%d not aligned to %d", _write_position, DATA_ALIGNMENT); 2210 //assert(buffer->blob() != nullptr, "sanity"); 2211 uint code_offset = _write_position; 2212 uint cb_total_size = (uint)buffer->total_content_size(); 2213 // Write information about Code sections first. 2214 SCCodeSection scc_cs[CodeBuffer::SECT_LIMIT]; 2215 uint scc_cs_size = (uint)(sizeof(SCCodeSection) * CodeBuffer::SECT_LIMIT); 2216 uint offset = align_up(scc_cs_size, DATA_ALIGNMENT); 2217 uint total_size = 0; 2218 for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) { 2219 const CodeSection* cs = buffer->code_section(i); 2220 assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented"); 2221 uint cs_size = (uint)cs->size(); 2222 scc_cs[i]._size = cs_size; 2223 scc_cs[i]._origin_address = (cs_size == 0) ? nullptr : cs->start(); 2224 scc_cs[i]._offset = (cs_size == 0) ? 0 : (offset + total_size); 2225 assert(cs->mark() == nullptr, "CodeSection::_mark is not implemented"); 2226 total_size += align_up(cs_size, DATA_ALIGNMENT); 2227 } 2228 uint n = write_bytes(scc_cs, scc_cs_size); 2229 if (n != scc_cs_size) { 2230 return false; 2231 } 2232 if (!align_write()) { 2233 return false; 2234 } 2235 assert(_write_position == (code_offset + offset), "%d != (%d + %d)", _write_position, code_offset, offset); 2236 for (int i = 0; i < (int)CodeBuffer::SECT_LIMIT; i++) { 2237 const CodeSection* cs = buffer->code_section(i); 2238 uint cs_size = (uint)cs->size(); 2239 if (cs_size == 0) { 2240 continue; // skip trivial section 2241 } 2242 assert((_write_position - code_offset) == scc_cs[i]._offset, "%d != %d", _write_position, scc_cs[i]._offset); 2243 // Write code 2244 n = write_bytes(cs->start(), cs_size); 2245 if (n != cs_size) { 2246 return false; 2247 } 2248 if (!align_write()) { 2249 return false; 2250 } 2251 } 2252 assert((_write_position - code_offset) == (offset + total_size), "(%d - %d) != (%d + %d)", _write_position, code_offset, offset, total_size); 2253 code_size = total_size; 2254 return true; 2255 } 2256 2257 bool SCCache::store_exception_blob(CodeBuffer* buffer, int pc_offset) { 2258 SCCache* cache = open_for_write(); 2259 if (cache == nullptr) { 2260 return false; 2261 } 2262 log_info(scc, stubs)("Writing blob '%s' to Startup Code Cache '%s'", buffer->name(), cache->_cache_path); 2263 2264 #ifdef ASSERT 2265 LogStreamHandle(Debug, scc, nmethod) log; 2266 if (log.is_enabled()) { 2267 FlagSetting fs(PrintRelocations, true); 2268 buffer->print_on(&log); 2269 buffer->decode(); 2270 } 2271 #endif 2272 if (!cache->align_write()) { 2273 return false; 2274 } 2275 uint entry_position = cache->_write_position; 2276 2277 // Write pc_offset 2278 uint n = cache->write_bytes(&pc_offset, sizeof(int)); 2279 if (n != sizeof(int)) { 2280 return false; 2281 } 2282 2283 // Write name 2284 const char* name = buffer->name(); 2285 uint name_offset = cache->_write_position - entry_position; 2286 uint name_size = (uint)strlen(name) + 1; // Includes '/0' 2287 n = cache->write_bytes(name, name_size); 2288 if (n != name_size) { 2289 return false; 2290 } 2291 2292 // Write code section 2293 if (!cache->align_write()) { 2294 return false; 2295 } 2296 uint code_offset = cache->_write_position - entry_position; 2297 uint code_size = 0; 2298 if (!cache->write_code(buffer, code_size)) { 2299 return false; 2300 } 2301 // Write relocInfo array 2302 uint reloc_offset = cache->_write_position - entry_position; 2303 uint reloc_size = 0; 2304 if (!cache->write_relocations(buffer, reloc_size)) { 2305 return false; 2306 } 2307 2308 uint entry_size = cache->_write_position - entry_position; 2309 SCCEntry* entry = new(cache) SCCEntry(entry_position, entry_size, name_offset, name_size, 2310 code_offset, code_size, reloc_offset, reloc_size, 2311 SCCEntry::Blob, (uint32_t)999); 2312 log_info(scc, stubs)("Wrote stub '%s' to Startup Code Cache '%s'", name, cache->_cache_path); 2313 return true; 2314 } 2315 2316 DebugInformationRecorder* SCCReader::read_debug_info(OopRecorder* oop_recorder) { 2317 uint code_offset = align_up(read_position(), DATA_ALIGNMENT); 2318 int data_size = *(int*)addr(code_offset); 2319 code_offset += sizeof(int); 2320 int pcs_length = *(int*)addr(code_offset); 2321 code_offset += sizeof(int); 2322 2323 log_debug(scc)("======== read DebugInfo [%d, %d]:", data_size, pcs_length); 2324 2325 // Aligned initial sizes 2326 int data_size_align = align_up(data_size, DATA_ALIGNMENT); 2327 int pcs_length_align = pcs_length + 1; 2328 assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity"); 2329 DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length); 2330 2331 copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align); 2332 recorder->stream()->set_position(data_size); 2333 code_offset += data_size; 2334 2335 uint pcs_size = pcs_length * sizeof(PcDesc); 2336 copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size); 2337 code_offset += pcs_size; 2338 set_read_position(code_offset); 2339 return recorder; 2340 } 2341 2342 bool SCCache::write_debug_info(DebugInformationRecorder* recorder) { 2343 if (!align_write()) { 2344 return false; 2345 } 2346 // Don't call data_size() and pcs_size(). They will freeze OopRecorder. 2347 int data_size = recorder->stream()->position(); // In bytes 2348 uint n = write_bytes(&data_size, sizeof(int)); 2349 if (n != sizeof(int)) { 2350 return false; 2351 } 2352 int pcs_length = recorder->pcs_length(); // In bytes 2353 n = write_bytes(&pcs_length, sizeof(int)); 2354 if (n != sizeof(int)) { 2355 return false; 2356 } 2357 n = write_bytes(recorder->stream()->buffer(), data_size); 2358 if (n != (uint)data_size) { 2359 return false; 2360 } 2361 uint pcs_size = pcs_length * sizeof(PcDesc); 2362 n = write_bytes(recorder->pcs(), pcs_size); 2363 if (n != pcs_size) { 2364 return false; 2365 } 2366 return true; 2367 } 2368 2369 OopMapSet* SCCReader::read_oop_maps() { 2370 uint code_offset = read_position(); 2371 int om_count = *(int*)addr(code_offset); 2372 code_offset += sizeof(int); 2373 2374 log_debug(scc)("======== read oop maps [%d]:", om_count); 2375 2376 OopMapSet* oop_maps = new OopMapSet(om_count); 2377 for (int i = 0; i < (int)om_count; i++) { 2378 int data_size = *(int*)addr(code_offset); 2379 code_offset += sizeof(int); 2380 2381 OopMap* oop_map = new OopMap(data_size); 2382 // Preserve allocated stream 2383 CompressedWriteStream* stream = oop_map->write_stream(); 2384 2385 // Read data which overwrites default data 2386 copy_bytes(addr(code_offset), (address)oop_map, sizeof(OopMap)); 2387 code_offset += sizeof(OopMap); 2388 stream->set_position(data_size); 2389 oop_map->set_write_stream(stream); 2390 if (data_size > 0) { 2391 copy_bytes(addr(code_offset), (address)(oop_map->data()), (uint)data_size); 2392 code_offset += data_size; 2393 } 2394 #ifdef ASSERT 2395 oop_map->_locs_length = 0; 2396 oop_map->_locs_used = nullptr; 2397 #endif 2398 oop_maps->add(oop_map); 2399 } 2400 set_read_position(code_offset); 2401 return oop_maps; 2402 } 2403 2404 bool SCCache::write_oop_maps(OopMapSet* oop_maps) { 2405 uint om_count = oop_maps->size(); 2406 uint n = write_bytes(&om_count, sizeof(int)); 2407 if (n != sizeof(int)) { 2408 return false; 2409 } 2410 for (int i = 0; i < (int)om_count; i++) { 2411 OopMap* om = oop_maps->at(i); 2412 int data_size = om->data_size(); 2413 n = write_bytes(&data_size, sizeof(int)); 2414 if (n != sizeof(int)) { 2415 return false; 2416 } 2417 n = write_bytes(om, sizeof(OopMap)); 2418 if (n != sizeof(OopMap)) { 2419 return false; 2420 } 2421 n = write_bytes(om->data(), (uint)data_size); 2422 if (n != (uint)data_size) { 2423 return false; 2424 } 2425 } 2426 return true; 2427 } 2428 2429 jobject SCCReader::read_oop(JavaThread* thread, const methodHandle& comp_method) { 2430 uint code_offset = read_position(); 2431 oop obj = nullptr; 2432 DataKind kind = *(DataKind*)addr(code_offset); 2433 code_offset += sizeof(DataKind); 2434 set_read_position(code_offset); 2435 if (kind == DataKind::Null) { 2436 return nullptr; 2437 } else if (kind == DataKind::No_Data) { 2438 return (jobject)Universe::non_oop_word(); 2439 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) { 2440 Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared)); 2441 if (k == nullptr) { 2442 return nullptr; 2443 } 2444 obj = k->java_mirror(); 2445 if (obj == nullptr) { 2446 set_lookup_failed(); 2447 log_warning(scc)("Lookup failed for java_mirror of klass %s", k->external_name()); 2448 return nullptr; 2449 } 2450 } else if (kind == DataKind::Primitive) { 2451 code_offset = read_position(); 2452 int t = *(int*)addr(code_offset); 2453 code_offset += sizeof(int); 2454 set_read_position(code_offset); 2455 BasicType bt = (BasicType)t; 2456 obj = java_lang_Class::primitive_mirror(bt); 2457 log_info(scc)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt)); 2458 } else if (kind == DataKind::String_Shared) { 2459 code_offset = read_position(); 2460 int k = *(int*)addr(code_offset); 2461 code_offset += sizeof(int); 2462 set_read_position(code_offset); 2463 obj = CDSAccess::get_archived_object(k); 2464 assert(k == CDSAccess::get_archived_object_permanent_index(obj), "sanity"); 2465 } else if (kind == DataKind::String) { 2466 code_offset = read_position(); 2467 int length = *(int*)addr(code_offset); 2468 code_offset += sizeof(int); 2469 set_read_position(code_offset); 2470 const char* dest = addr(code_offset); 2471 set_read_position(code_offset + length); 2472 obj = StringTable::intern(&(dest[0]), thread); 2473 if (obj == nullptr) { 2474 set_lookup_failed(); 2475 log_warning(scc)("%d (L%d): Lookup failed for String %s", 2476 compile_id(), comp_level(), &(dest[0])); 2477 return nullptr; 2478 } 2479 assert(java_lang_String::is_instance(obj), "must be string"); 2480 log_info(scc)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest); 2481 } else if (kind == DataKind::SysLoader) { 2482 obj = SystemDictionary::java_system_loader(); 2483 log_info(scc)("%d (L%d): Read java_system_loader", compile_id(), comp_level()); 2484 } else if (kind == DataKind::PlaLoader) { 2485 obj = SystemDictionary::java_platform_loader(); 2486 log_info(scc)("%d (L%d): Read java_platform_loader", compile_id(), comp_level()); 2487 } else if (kind == DataKind::MH_Oop_Shared) { 2488 code_offset = read_position(); 2489 int k = *(int*)addr(code_offset); 2490 code_offset += sizeof(int); 2491 set_read_position(code_offset); 2492 obj = CDSAccess::get_archived_object(k); 2493 assert(k == CDSAccess::get_archived_object_permanent_index(obj), "sanity"); 2494 } else { 2495 set_lookup_failed(); 2496 log_warning(scc)("%d (L%d): Unknown oop's kind: %d", 2497 compile_id(), comp_level(), (int)kind); 2498 return nullptr; 2499 } 2500 return JNIHandles::make_local(thread, obj); 2501 } 2502 2503 bool SCCReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) { 2504 uint code_offset = read_position(); 2505 int oop_count = *(int*)addr(code_offset); 2506 code_offset += sizeof(int); 2507 set_read_position(code_offset); 2508 log_debug(scc)("======== read oops [%d]:", oop_count); 2509 if (oop_count == 0) { 2510 return true; 2511 } 2512 { 2513 VM_ENTRY_MARK; 2514 methodHandle comp_method(THREAD, target->get_Method()); 2515 for (int i = 1; i < oop_count; i++) { 2516 jobject jo = read_oop(THREAD, comp_method); 2517 if (lookup_failed()) { 2518 return false; 2519 } 2520 if (oop_recorder->is_real(jo)) { 2521 oop_recorder->find_index(jo); 2522 } else { 2523 oop_recorder->allocate_oop_index(jo); 2524 } 2525 LogStreamHandle(Debug, scc, oops) log; 2526 if (log.is_enabled()) { 2527 log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo)); 2528 if (jo == (jobject)Universe::non_oop_word()) { 2529 log.print("non-oop word"); 2530 } else if (jo == nullptr) { 2531 log.print("nullptr-oop"); 2532 } else { 2533 JNIHandles::resolve(jo)->print_value_on(&log); 2534 } 2535 log.cr(); 2536 } 2537 } 2538 } 2539 return true; 2540 } 2541 2542 Metadata* SCCReader::read_metadata(const methodHandle& comp_method) { 2543 uint code_offset = read_position(); 2544 Metadata* m = nullptr; 2545 DataKind kind = *(DataKind*)addr(code_offset); 2546 code_offset += sizeof(DataKind); 2547 set_read_position(code_offset); 2548 if (kind == DataKind::Null) { 2549 m = (Metadata*)nullptr; 2550 } else if (kind == DataKind::No_Data) { 2551 m = (Metadata*)Universe::non_oop_word(); 2552 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) { 2553 m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared)); 2554 } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) { 2555 m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared)); 2556 } else if (kind == DataKind::MethodCnts) { 2557 kind = *(DataKind*)addr(code_offset); 2558 bool shared = (kind == DataKind::Method_Shared); 2559 assert(kind == DataKind::Method || shared, "Sanity"); 2560 code_offset += sizeof(DataKind); 2561 set_read_position(code_offset); 2562 m = (Metadata*)read_method(comp_method, shared); 2563 if (m != nullptr) { 2564 Method* method = (Method*)m; 2565 m = method->get_method_counters(Thread::current()); 2566 if (m == nullptr) { 2567 set_lookup_failed(); 2568 log_warning(scc)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level()); 2569 } else { 2570 log_info(scc)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m)); 2571 } 2572 } 2573 } else { 2574 set_lookup_failed(); 2575 log_warning(scc)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind); 2576 } 2577 return m; 2578 } 2579 2580 bool SCCReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) { 2581 uint code_offset = read_position(); 2582 int metadata_count = *(int*)addr(code_offset); 2583 code_offset += sizeof(int); 2584 set_read_position(code_offset); 2585 2586 log_debug(scc)("======== read metadata [%d]:", metadata_count); 2587 2588 if (metadata_count == 0) { 2589 return true; 2590 } 2591 { 2592 VM_ENTRY_MARK; 2593 methodHandle comp_method(THREAD, target->get_Method()); 2594 2595 for (int i = 1; i < metadata_count; i++) { 2596 Metadata* m = read_metadata(comp_method); 2597 if (lookup_failed()) { 2598 return false; 2599 } 2600 if (oop_recorder->is_real(m)) { 2601 oop_recorder->find_index(m); 2602 } else { 2603 oop_recorder->allocate_metadata_index(m); 2604 } 2605 LogTarget(Debug, scc, metadata) log; 2606 if (log.is_enabled()) { 2607 LogStream ls(log); 2608 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m)); 2609 if (m == (Metadata*)Universe::non_oop_word()) { 2610 ls.print("non-metadata word"); 2611 } else if (m == nullptr) { 2612 ls.print("nullptr-oop"); 2613 } else { 2614 Metadata::print_value_on_maybe_null(&ls, m); 2615 } 2616 ls.cr(); 2617 } 2618 } 2619 } 2620 return true; 2621 } 2622 2623 bool SCCache::write_oop(jobject& jo) { 2624 DataKind kind; 2625 uint n = 0; 2626 oop obj = JNIHandles::resolve(jo); 2627 if (jo == nullptr) { 2628 kind = DataKind::Null; 2629 n = write_bytes(&kind, sizeof(int)); 2630 if (n != sizeof(int)) { 2631 return false; 2632 } 2633 } else if (jo == (jobject)Universe::non_oop_word()) { 2634 kind = DataKind::No_Data; 2635 n = write_bytes(&kind, sizeof(int)); 2636 if (n != sizeof(int)) { 2637 return false; 2638 } 2639 } else if (java_lang_Class::is_instance(obj)) { 2640 if (java_lang_Class::is_primitive(obj)) { 2641 int bt = (int)java_lang_Class::primitive_type(obj); 2642 kind = DataKind::Primitive; 2643 n = write_bytes(&kind, sizeof(int)); 2644 if (n != sizeof(int)) { 2645 return false; 2646 } 2647 n = write_bytes(&bt, sizeof(int)); 2648 if (n != sizeof(int)) { 2649 return false; 2650 } 2651 log_info(scc)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt)); 2652 } else { 2653 Klass* klass = java_lang_Class::as_Klass(obj); 2654 if (!write_klass(klass)) { 2655 return false; 2656 } 2657 } 2658 } else if (java_lang_String::is_instance(obj)) { // herere 2659 int k = CDSAccess::get_archived_object_permanent_index(obj); // k >= 1 means obj is a "permanent heap object" 2660 if (k > 0) { 2661 kind = DataKind::String_Shared; 2662 n = write_bytes(&kind, sizeof(int)); 2663 if (n != sizeof(int)) { 2664 return false; 2665 } 2666 n = write_bytes(&k, sizeof(int)); 2667 if (n != sizeof(int)) { 2668 return false; 2669 } 2670 return true; 2671 } 2672 kind = DataKind::String; 2673 n = write_bytes(&kind, sizeof(int)); 2674 if (n != sizeof(int)) { 2675 return false; 2676 } 2677 ResourceMark rm; 2678 int length = 0; 2679 const char* string = java_lang_String::as_utf8_string(obj, length); 2680 length++; // write tailing '/0' 2681 n = write_bytes(&length, sizeof(int)); 2682 if (n != sizeof(int)) { 2683 return false; 2684 } 2685 n = write_bytes(string, (uint)length); 2686 if (n != (uint)length) { 2687 return false; 2688 } 2689 log_info(scc)("%d (L%d): Write String: %s", compile_id(), comp_level(), string); 2690 } else if (java_lang_Module::is_instance(obj)) { 2691 fatal("Module object unimplemented"); 2692 } else if (java_lang_ClassLoader::is_instance(obj)) { 2693 if (obj == SystemDictionary::java_system_loader()) { 2694 kind = DataKind::SysLoader; 2695 log_info(scc)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level()); 2696 } else if (obj == SystemDictionary::java_platform_loader()) { 2697 kind = DataKind::PlaLoader; 2698 log_info(scc)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level()); 2699 } else { 2700 fatal("ClassLoader object unimplemented"); 2701 return false; 2702 } 2703 n = write_bytes(&kind, sizeof(int)); 2704 if (n != sizeof(int)) { 2705 return false; 2706 } 2707 } else { // herere 2708 int k = CDSAccess::get_archived_object_permanent_index(obj); // k >= 1 means obj is a "permanent heap object" 2709 if (k > 0) { 2710 kind = DataKind::MH_Oop_Shared; 2711 n = write_bytes(&kind, sizeof(int)); 2712 if (n != sizeof(int)) { 2713 return false; 2714 } 2715 n = write_bytes(&k, sizeof(int)); 2716 if (n != sizeof(int)) { 2717 return false; 2718 } 2719 return true; 2720 } 2721 // Unhandled oop - bailout 2722 set_lookup_failed(); 2723 log_warning(scc, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s", 2724 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name()); 2725 return false; 2726 } 2727 return true; 2728 } 2729 2730 bool SCCache::write_oops(OopRecorder* oop_recorder) { 2731 int oop_count = oop_recorder->oop_count(); 2732 uint n = write_bytes(&oop_count, sizeof(int)); 2733 if (n != sizeof(int)) { 2734 return false; 2735 } 2736 log_debug(scc)("======== write oops [%d]:", oop_count); 2737 2738 for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr 2739 jobject jo = oop_recorder->oop_at(i); 2740 LogStreamHandle(Info, scc, oops) log; 2741 if (log.is_enabled()) { 2742 log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo)); 2743 if (jo == (jobject)Universe::non_oop_word()) { 2744 log.print("non-oop word"); 2745 } else if (jo == nullptr) { 2746 log.print("nullptr-oop"); 2747 } else { 2748 JNIHandles::resolve(jo)->print_value_on(&log); 2749 } 2750 log.cr(); 2751 } 2752 if (!write_oop(jo)) { 2753 return false; 2754 } 2755 } 2756 return true; 2757 } 2758 2759 bool SCCache::write_metadata(Metadata* m) { 2760 uint n = 0; 2761 if (m == nullptr) { 2762 DataKind kind = DataKind::Null; 2763 n = write_bytes(&kind, sizeof(int)); 2764 if (n != sizeof(int)) { 2765 return false; 2766 } 2767 } else if (m == (Metadata*)Universe::non_oop_word()) { 2768 DataKind kind = DataKind::No_Data; 2769 n = write_bytes(&kind, sizeof(int)); 2770 if (n != sizeof(int)) { 2771 return false; 2772 } 2773 } else if (m->is_klass()) { 2774 if (!write_klass((Klass*)m)) { 2775 return false; 2776 } 2777 } else if (m->is_method()) { 2778 if (!write_method((Method*)m)) { 2779 return false; 2780 } 2781 } else if (m->is_methodCounters()) { 2782 DataKind kind = DataKind::MethodCnts; 2783 n = write_bytes(&kind, sizeof(int)); 2784 if (n != sizeof(int)) { 2785 return false; 2786 } 2787 if (!write_method(((MethodCounters*)m)->method())) { 2788 return false; 2789 } 2790 log_info(scc)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m)); 2791 } else { // Not supported 2792 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m)); 2793 return false; 2794 } 2795 return true; 2796 } 2797 2798 bool SCCache::write_metadata(OopRecorder* oop_recorder) { 2799 int metadata_count = oop_recorder->metadata_count(); 2800 uint n = write_bytes(&metadata_count, sizeof(int)); 2801 if (n != sizeof(int)) { 2802 return false; 2803 } 2804 2805 log_debug(scc)("======== write metadata [%d]:", metadata_count); 2806 2807 for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr 2808 Metadata* m = oop_recorder->metadata_at(i); 2809 LogStreamHandle(Debug, scc, metadata) log; 2810 if (log.is_enabled()) { 2811 log.print("%d: " INTPTR_FORMAT " ", i, p2i(m)); 2812 if (m == (Metadata*)Universe::non_oop_word()) { 2813 log.print("non-metadata word"); 2814 } else if (m == nullptr) { 2815 log.print("nullptr-oop"); 2816 } else { 2817 Metadata::print_value_on_maybe_null(&log, m); 2818 } 2819 log.cr(); 2820 } 2821 if (!write_metadata(m)) { 2822 return false; 2823 } 2824 } 2825 return true; 2826 } 2827 2828 bool SCCReader::read_dependencies(Dependencies* dependencies) { 2829 uint code_offset = read_position(); 2830 int dependencies_size = *(int*)addr(code_offset); 2831 2832 log_debug(scc)("======== read dependencies [%d]:", dependencies_size); 2833 2834 code_offset += sizeof(int); 2835 code_offset = align_up(code_offset, DATA_ALIGNMENT); 2836 if (dependencies_size > 0) { 2837 dependencies->set_content((u_char*)addr(code_offset), dependencies_size); 2838 } 2839 code_offset += dependencies_size; 2840 set_read_position(code_offset); 2841 return true; 2842 } 2843 2844 bool SCCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) { 2845 TraceTime t1("SC total load time", &_t_totalLoad, enable_timers(), false); 2846 CompileTask* task = env->task(); 2847 SCCEntry* entry = task->scc_entry(); 2848 bool preload = task->preload(); 2849 assert(entry != nullptr, "sanity"); 2850 SCCache* cache = open_for_read(); 2851 if (cache == nullptr) { 2852 return false; 2853 } 2854 if (log_is_enabled(Info, scc, nmethod)) { 2855 uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count(); 2856 VM_ENTRY_MARK; 2857 ResourceMark rm; 2858 methodHandle method(THREAD, target->get_Method()); 2859 const char* target_name = method->name_and_sig_as_C_string(); 2860 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name)); 2861 bool clinit_brs = entry->has_clinit_barriers(); 2862 log_info(scc, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)", 2863 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"), 2864 target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : "")); 2865 } 2866 ReadingMark rdmk; 2867 SCCReader reader(cache, entry, task); 2868 bool success = reader.compile(env, target, entry_bci, compiler); 2869 if (success) { 2870 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes()); 2871 } else { 2872 entry->set_load_fail(); 2873 } 2874 return success; 2875 } 2876 2877 SCCReader::SCCReader(SCCache* cache, SCCEntry* entry, CompileTask* task) { 2878 _cache = cache; 2879 _entry = entry; 2880 _load_buffer = cache->cache_buffer(); 2881 _read_position = 0; 2882 if (task != nullptr) { 2883 _compile_id = task->compile_id(); 2884 _comp_level = task->comp_level(); 2885 _preload = task->preload(); 2886 } else { 2887 _compile_id = 0; 2888 _comp_level = 0; 2889 _preload = false; 2890 } 2891 _lookup_failed = false; 2892 } 2893 2894 bool SCCReader::compile(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler) { 2895 uint entry_position = _entry->offset(); 2896 uint code_offset = entry_position + _entry->code_offset(); 2897 set_read_position(code_offset); 2898 2899 // Read flags 2900 int flags = *(int*)addr(code_offset); 2901 code_offset += sizeof(int); 2902 bool has_monitors = (flags & 0xFF) > 0; 2903 bool has_wide_vectors = ((flags >> 8) & 0xFF) > 0; 2904 bool has_unsafe_access = ((flags >> 16) & 0xFF) > 0; 2905 2906 int orig_pc_offset = *(int*)addr(code_offset); 2907 code_offset += sizeof(int); 2908 int frame_size = *(int*)addr(code_offset); 2909 code_offset += sizeof(int); 2910 2911 // Read offsets 2912 CodeOffsets* offsets = (CodeOffsets*)addr(code_offset); 2913 code_offset += sizeof(CodeOffsets); 2914 2915 // Create Debug Information Recorder to record scopes, oopmaps, etc. 2916 OopRecorder* oop_recorder = new OopRecorder(env->arena()); 2917 env->set_oop_recorder(oop_recorder); 2918 2919 set_read_position(code_offset); 2920 2921 // Write OopRecorder data 2922 if (!read_oops(oop_recorder, target)) { 2923 return false; 2924 } 2925 if (!read_metadata(oop_recorder, target)) { 2926 return false; 2927 } 2928 2929 // Read Debug info 2930 DebugInformationRecorder* recorder = read_debug_info(oop_recorder); 2931 if (recorder == nullptr) { 2932 return false; 2933 } 2934 env->set_debug_info(recorder); 2935 2936 // Read Dependencies (compressed already) 2937 Dependencies* dependencies = new Dependencies(env); 2938 if (!read_dependencies(dependencies)) { 2939 return false; 2940 } 2941 env->set_dependencies(dependencies); 2942 2943 // Read oop maps 2944 OopMapSet* oop_maps = read_oop_maps(); 2945 if (oop_maps == nullptr) { 2946 return false; 2947 } 2948 2949 // Read exception handles 2950 code_offset = read_position(); 2951 int exc_table_length = *(int*)addr(code_offset); 2952 code_offset += sizeof(int); 2953 ExceptionHandlerTable handler_table(MAX2(exc_table_length, 4)); 2954 if (exc_table_length > 0) { 2955 handler_table.set_length(exc_table_length); 2956 uint exc_table_size = handler_table.size_in_bytes(); 2957 copy_bytes(addr(code_offset), (address)handler_table.table(), exc_table_size); 2958 code_offset += exc_table_size; 2959 } 2960 2961 // Read null check table 2962 int nul_chk_length = *(int*)addr(code_offset); 2963 code_offset += sizeof(int); 2964 ImplicitExceptionTable nul_chk_table; 2965 if (nul_chk_length > 0) { 2966 nul_chk_table.set_size(nul_chk_length); 2967 nul_chk_table.set_len(nul_chk_length); 2968 uint nul_chk_size = nul_chk_table.size_in_bytes(); 2969 copy_bytes(addr(code_offset), (address)nul_chk_table.data(), nul_chk_size - sizeof(implicit_null_entry)); 2970 code_offset += nul_chk_size; 2971 } 2972 2973 uint reloc_size = _entry->reloc_size(); 2974 CodeBuffer buffer("Compile::Fill_buffer", _entry->code_size(), reloc_size); 2975 buffer.initialize_oop_recorder(oop_recorder); 2976 2977 const char* name = addr(entry_position + _entry->name_offset()); 2978 2979 // Create fake original CodeBuffer 2980 CodeBuffer orig_buffer(name); 2981 2982 // Read code 2983 if (!read_code(&buffer, &orig_buffer, align_up(code_offset, DATA_ALIGNMENT))) { 2984 return false; 2985 } 2986 2987 // Read relocations 2988 uint reloc_offset = entry_position + _entry->reloc_offset(); 2989 set_read_position(reloc_offset); 2990 if (!read_relocations(&buffer, &orig_buffer, oop_recorder, target)) { 2991 return false; 2992 } 2993 2994 log_info(scc, nmethod)("%d (L%d): Read nmethod '%s' from Startup Code Cache '%s'", compile_id(), comp_level(), name, _cache->cache_path()); 2995 #ifdef ASSERT 2996 LogStreamHandle(Debug, scc, nmethod) log; 2997 if (log.is_enabled()) { 2998 FlagSetting fs(PrintRelocations, true); 2999 buffer.print_on(&log); 3000 buffer.decode(); 3001 } 3002 #endif 3003 3004 if (VerifyCachedCode) { 3005 return false; 3006 } 3007 3008 // Register nmethod 3009 TraceTime t1("SC total nmethod register time", &_t_totalRegister, enable_timers(), false); 3010 env->register_method(target, entry_bci, 3011 offsets, orig_pc_offset, 3012 &buffer, frame_size, 3013 oop_maps, &handler_table, 3014 &nul_chk_table, compiler, 3015 _entry->has_clinit_barriers(), 3016 false, 3017 has_unsafe_access, 3018 has_wide_vectors, 3019 has_monitors, 3020 0, true /* install_code */, NoRTM, 3021 (SCCEntry *)_entry); 3022 CompileTask* task = env->task(); 3023 bool success = task->is_success(); 3024 if (success) { 3025 ((SCCEntry *)_entry)->set_loaded(); 3026 } 3027 return success; 3028 } 3029 3030 // No concurency for writing to cache file because this method is called from 3031 // ciEnv::register_method() under MethodCompileQueue_lock and Compile_lock locks. 3032 SCCEntry* SCCache::store_nmethod(const methodHandle& method, 3033 int comp_id, 3034 int entry_bci, 3035 CodeOffsets* offsets, 3036 int orig_pc_offset, 3037 DebugInformationRecorder* recorder, 3038 Dependencies* dependencies, 3039 CodeBuffer* buffer, 3040 int frame_size, 3041 OopMapSet* oop_maps, 3042 ExceptionHandlerTable* handler_table, 3043 ImplicitExceptionTable* nul_chk_table, 3044 AbstractCompiler* compiler, 3045 CompLevel comp_level, 3046 bool has_clinit_barriers, 3047 bool for_preload, 3048 bool has_unsafe_access, 3049 bool has_wide_vectors, 3050 bool has_monitors) { 3051 CompileTask* task = ciEnv::current()->task(); 3052 3053 if (!CDSConfig::is_dumping_cached_code()) { 3054 return nullptr; // The metadata and heap in the CDS image haven't been finalized yet. 3055 } 3056 if (entry_bci != InvocationEntryBci) { 3057 return nullptr; // No OSR 3058 } 3059 if (compiler->is_c1() && (comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile)) { 3060 // Cache tier1 compilations 3061 } else if (!compiler->is_c2()) { 3062 return nullptr; // Only C2 now 3063 } 3064 TraceTime t1("SC total store time", &_t_totalStore, enable_timers(), false); 3065 SCCache* cache = open_for_write(); 3066 if (cache == nullptr) { 3067 return nullptr; // Cache file is closed 3068 } 3069 SCCEntry* entry = cache->write_nmethod(method, comp_id, entry_bci, offsets, orig_pc_offset, recorder, dependencies, buffer, 3070 frame_size, oop_maps, handler_table, nul_chk_table, compiler, comp_level, 3071 has_clinit_barriers, for_preload, has_unsafe_access, has_wide_vectors, has_monitors); 3072 if (entry == nullptr) { 3073 log_warning(scc, nmethod)("%d (L%d): nmethod store attempt failed", task->compile_id(), task->comp_level()); 3074 } 3075 return entry; 3076 } 3077 3078 SCCEntry* SCCache::write_nmethod(const methodHandle& method, 3079 int comp_id, 3080 int entry_bci, 3081 CodeOffsets* offsets, 3082 int orig_pc_offset, 3083 DebugInformationRecorder* recorder, 3084 Dependencies* dependencies, 3085 CodeBuffer* buffer, 3086 int frame_size, 3087 OopMapSet* oop_maps, 3088 ExceptionHandlerTable* handler_table, 3089 ImplicitExceptionTable* nul_chk_table, 3090 AbstractCompiler* compiler, 3091 CompLevel comp_level, 3092 bool has_clinit_barriers, 3093 bool for_preload, 3094 bool has_unsafe_access, 3095 bool has_wide_vectors, 3096 bool has_monitors) { 3097 CompileTask* task = ciEnv::current()->task(); 3098 3099 // if (method->is_hidden()) { 3100 // ResourceMark rm; 3101 // log_info(scc, nmethod)("%d (L%d): Skip hidden method '%s'", task->compile_id(), task->comp_level(), method->name_and_sig_as_C_string()); 3102 // return nullptr; 3103 // } 3104 if (buffer->before_expand() != nullptr) { 3105 ResourceMark rm; 3106 log_info(scc, nmethod)("%d (L%d): Skip nmethod with expanded buffer '%s'", task->compile_id(), task->comp_level(), method->name_and_sig_as_C_string()); 3107 return nullptr; 3108 } 3109 #ifdef ASSERT 3110 LogStreamHandle(Debug, scc, nmethod) log; 3111 if (log.is_enabled()) { 3112 tty->print_cr(" == store_nmethod"); 3113 FlagSetting fs(PrintRelocations, true); 3114 buffer->print_on(&log); 3115 buffer->decode(); 3116 } 3117 #endif 3118 assert(!has_clinit_barriers || _gen_preload_code, "sanity"); 3119 Method* m = method(); 3120 bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)m); // herere 3121 InstanceKlass* holder = m->method_holder(); 3122 bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class(); 3123 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data(); 3124 if (!builtin_loader) { 3125 ResourceMark rm; 3126 log_info(scc, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", task->compile_id(), task->comp_level(), method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name()); 3127 return nullptr; 3128 } 3129 if (for_preload && !(method_in_cds && klass_in_cds)) { 3130 ResourceMark rm; 3131 log_info(scc, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", task->compile_id(), task->comp_level(), method->name_and_sig_as_C_string()); 3132 return nullptr; 3133 } 3134 assert(!for_preload || method_in_cds, "sanity"); 3135 _for_preload = for_preload; 3136 _has_clinit_barriers = has_clinit_barriers; 3137 3138 if (!align_write()) { 3139 return nullptr; 3140 } 3141 _compile_id = task->compile_id(); 3142 _comp_level = task->comp_level(); 3143 3144 uint entry_position = _write_position; 3145 3146 uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count(); 3147 // Write name 3148 uint name_offset = 0; 3149 uint name_size = 0; 3150 uint hash = 0; 3151 uint n; 3152 { 3153 ResourceMark rm; 3154 const char* name = method->name_and_sig_as_C_string(); 3155 log_info(scc, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s) to Startup Code Cache '%s'", 3156 task->compile_id(), task->comp_level(), name, comp_level, decomp, 3157 (has_clinit_barriers ? ", has clinit barriers" : ""), _cache_path); 3158 3159 LogStreamHandle(Info, scc, loader) log; 3160 if (log.is_enabled()) { 3161 oop loader = holder->class_loader(); 3162 oop domain = holder->protection_domain(); 3163 log.print("Holder: "); 3164 holder->print_value_on(&log); 3165 log.print(" loader: "); 3166 if (loader == nullptr) { 3167 log.print("nullptr"); 3168 } else { 3169 loader->print_value_on(&log); 3170 } 3171 log.print(" domain: "); 3172 if (domain == nullptr) { 3173 log.print("nullptr"); 3174 } else { 3175 domain->print_value_on(&log); 3176 } 3177 log.cr(); 3178 } 3179 name_offset = _write_position - entry_position; 3180 name_size = (uint)strlen(name) + 1; // Includes '/0' 3181 n = write_bytes(name, name_size); 3182 if (n != name_size) { 3183 return nullptr; 3184 } 3185 hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name)); 3186 } 3187 3188 if (!align_write()) { 3189 return nullptr; 3190 } 3191 3192 uint code_offset = _write_position - entry_position; 3193 3194 int flags = ((has_unsafe_access ? 1 : 0) << 16) | ((has_wide_vectors ? 1 : 0) << 8) | (has_monitors ? 1 : 0); 3195 n = write_bytes(&flags, sizeof(int)); 3196 if (n != sizeof(int)) { 3197 return nullptr; 3198 } 3199 3200 n = write_bytes(&orig_pc_offset, sizeof(int)); 3201 if (n != sizeof(int)) { 3202 return nullptr; 3203 } 3204 3205 n = write_bytes(&frame_size, sizeof(int)); 3206 if (n != sizeof(int)) { 3207 return nullptr; 3208 } 3209 3210 // Write offsets 3211 n = write_bytes(offsets, sizeof(CodeOffsets)); 3212 if (n != sizeof(CodeOffsets)) { 3213 return nullptr; 3214 } 3215 3216 // Write OopRecorder data 3217 if (!write_oops(buffer->oop_recorder())) { 3218 if (lookup_failed() && !failed()) { 3219 // Skip this method and reposition file 3220 set_write_position(entry_position); 3221 } 3222 return nullptr; 3223 } 3224 if (!write_metadata(buffer->oop_recorder())) { 3225 if (lookup_failed() && !failed()) { 3226 // Skip this method and reposition file 3227 set_write_position(entry_position); 3228 } 3229 return nullptr; 3230 } 3231 3232 // Write Debug info 3233 if (!write_debug_info(recorder)) { 3234 return nullptr; 3235 } 3236 // Write Dependencies 3237 int dependencies_size = (int)dependencies->size_in_bytes(); 3238 n = write_bytes(&dependencies_size, sizeof(int)); 3239 if (n != sizeof(int)) { 3240 return nullptr; 3241 } 3242 if (!align_write()) { 3243 return nullptr; 3244 } 3245 n = write_bytes(dependencies->content_bytes(), dependencies_size); 3246 if (n != (uint)dependencies_size) { 3247 return nullptr; 3248 } 3249 3250 // Write oop maps 3251 if (!write_oop_maps(oop_maps)) { 3252 return nullptr; 3253 } 3254 3255 // Write exception handles 3256 int exc_table_length = handler_table->length(); 3257 n = write_bytes(&exc_table_length, sizeof(int)); 3258 if (n != sizeof(int)) { 3259 return nullptr; 3260 } 3261 uint exc_table_size = handler_table->size_in_bytes(); 3262 n = write_bytes(handler_table->table(), exc_table_size); 3263 if (n != exc_table_size) { 3264 return nullptr; 3265 } 3266 3267 // Write null check table 3268 int nul_chk_length = nul_chk_table->len(); 3269 n = write_bytes(&nul_chk_length, sizeof(int)); 3270 if (n != sizeof(int)) { 3271 return nullptr; 3272 } 3273 uint nul_chk_size = nul_chk_table->size_in_bytes(); 3274 n = write_bytes(nul_chk_table->data(), nul_chk_size); 3275 if (n != nul_chk_size) { 3276 return nullptr; 3277 } 3278 3279 // Write code section 3280 if (!align_write()) { 3281 return nullptr; 3282 } 3283 uint code_size = 0; 3284 if (!write_code(buffer, code_size)) { 3285 return nullptr; 3286 } 3287 // Write relocInfo array 3288 uint reloc_offset = _write_position - entry_position; 3289 uint reloc_size = 0; 3290 if (!write_relocations(buffer, reloc_size)) { 3291 if (lookup_failed() && !failed()) { 3292 // Skip this method and reposition file 3293 set_write_position(entry_position); 3294 } 3295 return nullptr; 3296 } 3297 uint entry_size = _write_position - entry_position; 3298 3299 SCCEntry* entry = new (this) SCCEntry(entry_position, entry_size, name_offset, name_size, 3300 code_offset, code_size, reloc_offset, reloc_size, 3301 SCCEntry::Code, hash, (uint)comp_level, (uint)comp_id, decomp, 3302 has_clinit_barriers, _for_preload); 3303 if (method_in_cds) { 3304 entry->set_method(m); 3305 } 3306 #ifdef ASSERT 3307 if (has_clinit_barriers || _for_preload) { 3308 assert(for_preload, "sanity"); 3309 assert(entry->method() != nullptr, "sanity"); 3310 } 3311 #endif 3312 { 3313 ResourceMark rm; 3314 const char* name = method->name_and_sig_as_C_string(); 3315 log_info(scc, nmethod)("%d (L%d): Wrote nmethod '%s'%s to Startup Code Cache '%s'", 3316 task->compile_id(), task->comp_level(), name, (_for_preload ? " (for preload)" : ""), _cache_path); 3317 } 3318 if (VerifyCachedCode) { 3319 return nullptr; 3320 } 3321 return entry; 3322 } 3323 3324 static void print_helper1(outputStream* st, const char* name, int count) { 3325 if (count > 0) { 3326 st->print(" %s=%d", name, count); 3327 } 3328 } 3329 static void print_helper(outputStream* st, const char* name, int stats[6+3][6], int idx) { 3330 int total = stats[idx][0]; 3331 if (total > 0) { 3332 st->print(" %s:", name); 3333 print_helper1(st, "total", stats[idx][0]); 3334 //print_helper1(st, "for_preload", stats[idx][2]); // implied by Tier5 3335 print_helper1(st, "loaded", stats[idx][3]); 3336 print_helper1(st, "invalidated", stats[idx][4]); 3337 print_helper1(st, "failed", stats[idx][5]); 3338 print_helper1(st, "has_clinit_barriers", stats[idx][1]); 3339 st->cr(); 3340 } 3341 } 3342 3343 void SCCache::print_statistics_on(outputStream* st) { 3344 SCCache* cache = open_for_read(); 3345 if (cache != nullptr) { 3346 ReadingMark rdmk; 3347 3348 uint count = cache->_load_header->entries_count(); 3349 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index] 3350 SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count); 3351 3352 int stats[6 + 3][6] = {0}; 3353 for (uint i = 0; i < count; i++) { 3354 int index = search_entries[2*i + 1]; 3355 SCCEntry* entry = &(load_entries[index]); 3356 3357 int lvl = entry->kind(); 3358 if (entry->kind() == SCCEntry::Code) { 3359 lvl += entry->comp_level() + (entry->for_preload() ? 1 : 0); 3360 } 3361 ++stats[lvl][0]; // total 3362 if (entry->has_clinit_barriers()) { 3363 ++stats[lvl][1]; 3364 } 3365 if (entry->for_preload()) { 3366 ++stats[lvl][2]; 3367 } 3368 if (entry->is_loaded()) { 3369 ++stats[lvl][3]; 3370 } 3371 if (entry->not_entrant()) { 3372 ++stats[lvl][4]; 3373 } 3374 if (entry->load_fail()) { 3375 ++stats[lvl][5]; 3376 } 3377 } 3378 3379 print_helper(st, "None", stats, SCCEntry::None); 3380 print_helper(st, "Stub", stats, SCCEntry::Stub); 3381 print_helper(st, "Blob", stats, SCCEntry::Blob); 3382 for (int lvl = 0; lvl <= CompLevel_full_optimization + 1; lvl++) { 3383 ResourceMark rm; 3384 stringStream ss; 3385 ss.print("SC T%d", lvl); 3386 print_helper(st, ss.freeze(), stats, SCCEntry::Code + lvl); 3387 } 3388 3389 } else { 3390 st->print_cr("failed to open SCA at %s", CachedCodeFile); 3391 } 3392 } 3393 3394 void SCCache::print_on(outputStream* st) { 3395 SCCache* cache = open_for_read(); 3396 if (cache != nullptr) { 3397 ReadingMark rdmk; 3398 3399 uint count = cache->_load_header->entries_count(); 3400 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index] 3401 SCCEntry* load_entries = (SCCEntry*)(search_entries + 2 * count); 3402 3403 for (uint i = 0; i < count; i++) { 3404 int index = search_entries[2*i + 1]; 3405 SCCEntry* entry = &(load_entries[index]); 3406 3407 st->print_cr("%4u: %4u: K%u L%u offset=%u decompile=%u size=%u code_size=%u%s%s%s%s", 3408 i, index, entry->kind(), entry->comp_level(), entry->offset(), 3409 entry->decompile(), entry->size(), entry->code_size(), 3410 entry->has_clinit_barriers() ? " has_clinit_barriers" : "", 3411 entry->for_preload() ? " for_preload" : "", 3412 entry->is_loaded() ? " loaded" : "", 3413 entry->not_entrant() ? " not_entrant" : ""); 3414 st->print_raw(" "); 3415 SCCReader reader(cache, entry, nullptr); 3416 reader.print_on(st); 3417 } 3418 } else { 3419 st->print_cr("failed to open SCA at %s", CachedCodeFile); 3420 } 3421 } 3422 3423 void SCCache::print_unused_entries_on(outputStream* st) { 3424 LogStreamHandle(Info, scc, init) info; 3425 if (info.is_enabled()) { 3426 SCCache::iterate([&](SCCEntry* entry) { 3427 if (!entry->is_loaded()) { 3428 MethodTrainingData* mtd = MethodTrainingData::lookup_for(entry->method()); 3429 if (mtd != nullptr) { 3430 if (mtd->has_holder()) { 3431 if (mtd->holder()->method_holder()->is_initialized()) { 3432 ResourceMark rm; 3433 mtd->iterate_all_compiles([&](CompileTrainingData* ctd) { 3434 if ((uint)ctd->level() == entry->comp_level()) { 3435 if (ctd->init_deps_left() == 0) { 3436 nmethod* nm = mtd->holder()->code(); 3437 if (nm == nullptr) { 3438 if (mtd->holder()->queued_for_compilation()) { 3439 return; // scheduled for compilation 3440 } 3441 } else if ((uint)nm->comp_level() >= entry->comp_level()) { 3442 return; // already online compiled and superseded by a more optimal method 3443 } 3444 info.print("SCC entry not loaded: "); 3445 ctd->print_on(&info); 3446 info.cr(); 3447 } 3448 } 3449 }); 3450 } else { 3451 // not yet initialized 3452 } 3453 } else { 3454 info.print("SCC entry doesn't have a holder: "); 3455 mtd->print_on(&info); 3456 info.cr(); 3457 } 3458 } 3459 } 3460 }); 3461 } 3462 } 3463 3464 void SCCReader::print_on(outputStream* st) { 3465 uint entry_position = _entry->offset(); 3466 set_read_position(entry_position); 3467 3468 // Read name 3469 uint name_offset = entry_position + _entry->name_offset(); 3470 uint name_size = _entry->name_size(); // Includes '/0' 3471 const char* name = addr(name_offset); 3472 3473 st->print_cr(" name: %s", name); 3474 } 3475 3476 #define _extrs_max 80 3477 #define _stubs_max 120 3478 #define _blobs_max 80 3479 #define _shared_blobs_max 16 3480 #define _C2_blobs_max 21 3481 #define _C1_blobs_max (_blobs_max - _shared_blobs_max - _C2_blobs_max) 3482 #define _all_max 280 3483 3484 #define SET_ADDRESS(type, addr) \ 3485 { \ 3486 type##_addr[type##_length++] = (address) (addr); \ 3487 assert(type##_length <= type##_max, "increase size"); \ 3488 } 3489 3490 static bool initializing = false; 3491 void SCAddressTable::init() { 3492 if (_complete || initializing) return; // Done already 3493 initializing = true; 3494 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode); 3495 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode); 3496 _blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode); 3497 3498 // Divide _blobs_addr array to chunks because they could be initialized in parrallel 3499 _C2_blobs_addr = _blobs_addr + _shared_blobs_max;// C2 blobs addresses stored after shared blobs 3500 _C1_blobs_addr = _C2_blobs_addr + _C2_blobs_max; // C1 blobs addresses stored after C2 blobs 3501 3502 _extrs_length = 0; 3503 _stubs_length = 0; 3504 _blobs_length = 0; // for shared blobs 3505 _C1_blobs_length = 0; 3506 _C2_blobs_length = 0; 3507 _final_blobs_length = 0; // Depends on numnber of C1 blobs 3508 3509 // Runtime methods 3510 #ifdef COMPILER2 3511 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C); 3512 #endif 3513 #ifdef COMPILER1 3514 SET_ADDRESS(_extrs, Runtime1::is_instance_of); 3515 SET_ADDRESS(_extrs, Runtime1::trace_block_entry); 3516 #endif 3517 3518 SET_ADDRESS(_extrs, CompressedOops::ptrs_base_addr()); 3519 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry); 3520 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry); 3521 3522 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C); 3523 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone); 3524 #ifdef AMD64 3525 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply); 3526 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square); 3527 #endif // AMD64 3528 SET_ADDRESS(_extrs, SharedRuntime::d2f); 3529 SET_ADDRESS(_extrs, SharedRuntime::d2i); 3530 SET_ADDRESS(_extrs, SharedRuntime::d2l); 3531 SET_ADDRESS(_extrs, SharedRuntime::dcos); 3532 SET_ADDRESS(_extrs, SharedRuntime::dexp); 3533 SET_ADDRESS(_extrs, SharedRuntime::dlog); 3534 SET_ADDRESS(_extrs, SharedRuntime::dlog10); 3535 SET_ADDRESS(_extrs, SharedRuntime::dpow); 3536 SET_ADDRESS(_extrs, SharedRuntime::drem); 3537 SET_ADDRESS(_extrs, SharedRuntime::dsin); 3538 SET_ADDRESS(_extrs, SharedRuntime::dtan); 3539 SET_ADDRESS(_extrs, SharedRuntime::f2i); 3540 SET_ADDRESS(_extrs, SharedRuntime::f2l); 3541 SET_ADDRESS(_extrs, SharedRuntime::frem); 3542 SET_ADDRESS(_extrs, SharedRuntime::l2d); 3543 SET_ADDRESS(_extrs, SharedRuntime::l2f); 3544 SET_ADDRESS(_extrs, SharedRuntime::ldiv); 3545 SET_ADDRESS(_extrs, SharedRuntime::lmul); 3546 SET_ADDRESS(_extrs, SharedRuntime::lrem); 3547 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc); 3548 3549 BarrierSet* bs = BarrierSet::barrier_set(); 3550 if (bs->is_a(BarrierSet::CardTableBarrierSet)) { 3551 SET_ADDRESS(_extrs, ci_card_table_address_as<address>()); 3552 } 3553 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset()); 3554 SET_ADDRESS(_extrs, Thread::current); 3555 3556 SET_ADDRESS(_extrs, os::javaTimeMillis); 3557 SET_ADDRESS(_extrs, os::javaTimeNanos); 3558 3559 SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events); 3560 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr()); 3561 #ifndef PRODUCT 3562 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr); 3563 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure); 3564 #endif 3565 3566 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64) 3567 SET_ADDRESS(_extrs, MacroAssembler::debug64); 3568 #endif 3569 #if defined(AMD64) 3570 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31()); 3571 #endif 3572 3573 #ifdef X86 3574 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool); 3575 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool); 3576 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool); 3577 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool); 3578 #endif 3579 3580 // Stubs 3581 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier()); 3582 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry()); 3583 /* 3584 SET_ADDRESS(_stubs, StubRoutines::throw_AbstractMethodError_entry()); 3585 SET_ADDRESS(_stubs, StubRoutines::throw_IncompatibleClassChangeError_entry()); 3586 SET_ADDRESS(_stubs, StubRoutines::throw_NullPointerException_at_call_entry()); 3587 SET_ADDRESS(_stubs, StubRoutines::throw_StackOverflowError_entry()); 3588 SET_ADDRESS(_stubs, StubRoutines::throw_delayed_StackOverflowError_entry()); 3589 */ 3590 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry()); 3591 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry()); 3592 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry()); 3593 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry()); 3594 SET_ADDRESS(_stubs, StubRoutines::fence_entry()); 3595 3596 SET_ADDRESS(_stubs, StubRoutines::cont_thaw()); 3597 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier()); 3598 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc()); 3599 3600 JFR_ONLY(SET_ADDRESS(_stubs, StubRoutines::jfr_write_checkpoint());) 3601 3602 3603 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy()); 3604 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy()); 3605 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy()); 3606 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy()); 3607 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy); 3608 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit); 3609 3610 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy()); 3611 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy()); 3612 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy()); 3613 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy()); 3614 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy); 3615 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit); 3616 3617 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy()); 3618 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy()); 3619 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy()); 3620 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy()); 3621 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy); 3622 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit); 3623 3624 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy()); 3625 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy()); 3626 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy()); 3627 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy()); 3628 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy); 3629 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit); 3630 3631 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy); 3632 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit); 3633 3634 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy()); 3635 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy()); 3636 3637 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill()); 3638 SET_ADDRESS(_stubs, StubRoutines::jshort_fill()); 3639 SET_ADDRESS(_stubs, StubRoutines::jint_fill()); 3640 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill()); 3641 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill()); 3642 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill()); 3643 3644 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback()); 3645 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync()); 3646 3647 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock()); 3648 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock()); 3649 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt()); 3650 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt()); 3651 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt()); 3652 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt()); 3653 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks()); 3654 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt()); 3655 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks()); 3656 SET_ADDRESS(_stubs, StubRoutines::chacha20Block()); 3657 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock()); 3658 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock()); 3659 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress()); 3660 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB()); 3661 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress()); 3662 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB()); 3663 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress()); 3664 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB()); 3665 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress()); 3666 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB()); 3667 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress()); 3668 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB()); 3669 3670 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32()); 3671 3672 SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr()); 3673 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C()); 3674 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32()); 3675 3676 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen()); 3677 SET_ADDRESS(_stubs, StubRoutines::squareToLen()); 3678 SET_ADDRESS(_stubs, StubRoutines::mulAdd()); 3679 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply()); 3680 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare()); 3681 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift()); 3682 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift()); 3683 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt()); 3684 3685 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch()); 3686 3687 SET_ADDRESS(_stubs, StubRoutines::dexp()); 3688 SET_ADDRESS(_stubs, StubRoutines::dlog()); 3689 SET_ADDRESS(_stubs, StubRoutines::dlog10()); 3690 SET_ADDRESS(_stubs, StubRoutines::dpow()); 3691 SET_ADDRESS(_stubs, StubRoutines::dsin()); 3692 SET_ADDRESS(_stubs, StubRoutines::dcos()); 3693 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l()); 3694 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge()); 3695 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge()); 3696 SET_ADDRESS(_stubs, StubRoutines::dtan()); 3697 3698 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr()); 3699 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr()); 3700 3701 #if defined(AMD64) 3702 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup()); 3703 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup()); 3704 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup()); 3705 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup()); 3706 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask()); 3707 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip()); 3708 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask()); 3709 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip()); 3710 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut()); 3711 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask()); 3712 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip()); 3713 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask()); 3714 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip()); 3715 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64. 3716 // See C2_MacroAssembler::load_iota_indices(). 3717 for (int i = 0; i < 6; i++) { 3718 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64); 3719 } 3720 #endif 3721 #if defined(AARCH64) 3722 SET_ADDRESS(_stubs, StubRoutines::aarch64::d2i_fixup()); 3723 SET_ADDRESS(_stubs, StubRoutines::aarch64::f2i_fixup()); 3724 SET_ADDRESS(_stubs, StubRoutines::aarch64::d2l_fixup()); 3725 SET_ADDRESS(_stubs, StubRoutines::aarch64::f2l_fixup()); 3726 SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_mask()); 3727 SET_ADDRESS(_stubs, StubRoutines::aarch64::float_sign_flip()); 3728 SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_mask()); 3729 SET_ADDRESS(_stubs, StubRoutines::aarch64::double_sign_flip()); 3730 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks()); 3731 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives()); 3732 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long()); 3733 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals()); 3734 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL()); 3735 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU()); 3736 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU()); 3737 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL()); 3738 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul()); 3739 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll()); 3740 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu()); 3741 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate()); 3742 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait()); 3743 #endif 3744 3745 // Blobs 3746 SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub()); 3747 SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub()); 3748 SET_ADDRESS(_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub()); 3749 SET_ADDRESS(_blobs, SharedRuntime::get_resolve_virtual_call_stub()); 3750 SET_ADDRESS(_blobs, SharedRuntime::get_resolve_static_call_stub()); 3751 SET_ADDRESS(_blobs, SharedRuntime::deopt_blob()->entry_point()); 3752 SET_ADDRESS(_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point()); 3753 SET_ADDRESS(_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point()); 3754 #ifdef COMPILER2 3755 SET_ADDRESS(_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point()); 3756 SET_ADDRESS(_blobs, SharedRuntime::uncommon_trap_blob()->entry_point()); 3757 #endif 3758 SET_ADDRESS(_blobs, StubRoutines::throw_AbstractMethodError_entry()); 3759 SET_ADDRESS(_blobs, StubRoutines::throw_IncompatibleClassChangeError_entry()); 3760 SET_ADDRESS(_blobs, StubRoutines::throw_NullPointerException_at_call_entry()); 3761 SET_ADDRESS(_blobs, StubRoutines::throw_StackOverflowError_entry()); 3762 SET_ADDRESS(_blobs, StubRoutines::throw_delayed_StackOverflowError_entry()); 3763 3764 assert(_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _blobs_length); 3765 _final_blobs_length = _blobs_length; 3766 _complete = true; 3767 log_info(scc,init)("External addresses and stubs recorded"); 3768 } 3769 3770 void SCAddressTable::init_opto() { 3771 #ifdef COMPILER2 3772 // OptoRuntime Blobs 3773 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point()); 3774 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java()); 3775 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java()); 3776 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java()); 3777 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java()); 3778 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java()); 3779 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java()); 3780 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java()); 3781 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java()); 3782 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub()); 3783 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java()); 3784 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java()); 3785 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java()); 3786 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub()); 3787 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java()); 3788 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java()); 3789 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java()); 3790 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start()); 3791 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end()); 3792 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount()); 3793 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount()); 3794 #endif 3795 3796 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length); 3797 _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_length)); 3798 _opto_complete = true; 3799 log_info(scc,init)("OptoRuntime Blobs recorded"); 3800 } 3801 3802 void SCAddressTable::init_c1() { 3803 #ifdef COMPILER1 3804 // Runtime1 Blobs 3805 for (int i = 0; i < Runtime1::number_of_ids; i++) { 3806 Runtime1::StubID id = (Runtime1::StubID)i; 3807 if (Runtime1::blob_for(id) == nullptr) { 3808 log_info(scc, init)("C1 blob %s is missing", Runtime1::name_for(id)); 3809 continue; 3810 } 3811 if (Runtime1::entry_for(id) == nullptr) { 3812 log_info(scc, init)("C1 blob %s is missing entry", Runtime1::name_for(id)); 3813 continue; 3814 } 3815 address entry = Runtime1::entry_for(id); 3816 SET_ADDRESS(_C1_blobs, entry); 3817 } 3818 #if INCLUDE_G1GC 3819 if (UseG1GC) { 3820 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 3821 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin(); 3822 SET_ADDRESS(_C1_blobs, entry); 3823 entry = bs->post_barrier_c1_runtime_code_blob()->code_begin(); 3824 SET_ADDRESS(_C1_blobs, entry); 3825 } 3826 #endif // INCLUDE_G1GC 3827 #if INCLUDE_ZGC 3828 if (UseZGC) { 3829 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 3830 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub); 3831 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub); 3832 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing); 3833 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing); 3834 } 3835 #endif // INCLUDE_ZGC 3836 #endif // COMPILER1 3837 3838 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length); 3839 _final_blobs_length = MAX2(_final_blobs_length, (_shared_blobs_max + _C2_blobs_max + _C1_blobs_length)); 3840 _c1_complete = true; 3841 log_info(scc,init)("Runtime1 Blobs recorded"); 3842 } 3843 3844 #undef SET_ADDRESS 3845 #undef _extrs_max 3846 #undef _stubs_max 3847 #undef _blobs_max 3848 #undef _shared_blobs_max 3849 #undef _C1_blobs_max 3850 #undef _C2_blobs_max 3851 3852 SCAddressTable::~SCAddressTable() { 3853 if (_extrs_addr != nullptr) { 3854 FREE_C_HEAP_ARRAY(address, _extrs_addr); 3855 } 3856 if (_stubs_addr != nullptr) { 3857 FREE_C_HEAP_ARRAY(address, _stubs_addr); 3858 } 3859 if (_blobs_addr != nullptr) { 3860 FREE_C_HEAP_ARRAY(address, _blobs_addr); 3861 } 3862 } 3863 3864 #define MAX_STR_COUNT 200 3865 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; 3866 static int _C_strings_count = 0; 3867 static int _C_strings_s[MAX_STR_COUNT] = {0}; 3868 static int _C_strings_id[MAX_STR_COUNT] = {0}; 3869 static int _C_strings_len[MAX_STR_COUNT] = {0}; 3870 static int _C_strings_hash[MAX_STR_COUNT] = {0}; 3871 static int _C_strings_used = 0; 3872 3873 void SCCache::load_strings() { 3874 uint strings_count = _load_header->strings_count(); 3875 if (strings_count == 0) { 3876 return; 3877 } 3878 uint strings_offset = _load_header->strings_offset(); 3879 uint strings_size = _load_header->entries_offset() - strings_offset; 3880 uint data_size = (uint)(strings_count * sizeof(uint)); 3881 uint* sizes = (uint*)addr(strings_offset); 3882 uint* hashs = (uint*)addr(strings_offset + data_size); 3883 strings_size -= 2 * data_size; 3884 _C_strings_buf = addr(strings_offset + 2 * data_size); 3885 const char* p = _C_strings_buf; 3886 assert(strings_count <= MAX_STR_COUNT, "sanity"); 3887 for (uint i = 0; i < strings_count; i++) { 3888 _C_strings[i] = p; 3889 uint len = sizes[i]; 3890 _C_strings_s[i] = i; 3891 _C_strings_id[i] = i; 3892 _C_strings_len[i] = len; 3893 _C_strings_hash[i] = hashs[i]; 3894 p += len; 3895 } 3896 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size); 3897 _C_strings_count = strings_count; 3898 _C_strings_used = strings_count; 3899 log_info(scc, init)("Load %d C strings at offset %d from Startup Code Cache '%s'", _C_strings_count, strings_offset, _cache_path); 3900 } 3901 3902 int SCCache::store_strings() { 3903 uint offset = _write_position; 3904 uint length = 0; 3905 if (_C_strings_used > 0) { 3906 // Write sizes first 3907 for (int i = 0; i < _C_strings_used; i++) { 3908 uint len = _C_strings_len[i] + 1; // Include 0 3909 length += len; 3910 assert(len < 1000, "big string: %s", _C_strings[i]); 3911 uint n = write_bytes(&len, sizeof(uint)); 3912 if (n != sizeof(uint)) { 3913 return -1; 3914 } 3915 } 3916 // Write hashs 3917 for (int i = 0; i < _C_strings_used; i++) { 3918 uint n = write_bytes(&(_C_strings_hash[i]), sizeof(uint)); 3919 if (n != sizeof(uint)) { 3920 return -1; 3921 } 3922 } 3923 for (int i = 0; i < _C_strings_used; i++) { 3924 uint len = _C_strings_len[i] + 1; // Include 0 3925 uint n = write_bytes(_C_strings[_C_strings_s[i]], len); 3926 if (n != len) { 3927 return -1; 3928 } 3929 } 3930 log_info(scc, exit)("Wrote %d C strings of total length %d at offset %d to Startup Code Cache '%s'", 3931 _C_strings_used, length, offset, _cache_path); 3932 } 3933 return _C_strings_used; 3934 } 3935 3936 void SCCache::add_new_C_string(const char* str) { 3937 assert(for_write(), "only when storing code"); 3938 _table->add_C_string(str); 3939 } 3940 3941 void SCAddressTable::add_C_string(const char* str) { 3942 if (str != nullptr && _complete && (_opto_complete || _c1_complete)) { 3943 // Check previous strings address 3944 for (int i = 0; i < _C_strings_count; i++) { 3945 if (_C_strings[i] == str) { 3946 return; // Found existing one 3947 } 3948 } 3949 // Add new one 3950 if (_C_strings_count < MAX_STR_COUNT) { 3951 log_trace(scc)("add_C_string: [%d] " INTPTR_FORMAT " %s", _C_strings_count, p2i(str), str); 3952 _C_strings_id[_C_strings_count] = -1; // Init 3953 _C_strings[_C_strings_count++] = str; 3954 } else { 3955 CompileTask* task = ciEnv::current()->task(); 3956 log_warning(scc)("%d (L%d): Number of C strings > max %d %s", 3957 task->compile_id(), task->comp_level(), MAX_STR_COUNT, str); 3958 } 3959 } 3960 } 3961 3962 int SCAddressTable::id_for_C_string(address str) { 3963 for (int i = 0; i < _C_strings_count; i++) { 3964 if (_C_strings[i] == (const char*)str) { // found 3965 int id = _C_strings_id[i]; 3966 if (id >= 0) { 3967 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used); 3968 return id; // Found recorded 3969 } 3970 // Search for the same string content 3971 int len = (int)strlen((const char*)str); 3972 int hash = java_lang_String::hash_code((const jbyte*)str, len); 3973 for (int j = 0; j < _C_strings_used; j++) { 3974 if ((_C_strings_len[j] == len) && (_C_strings_hash[j] == hash)) { 3975 _C_strings_id[i] = j; // Found match 3976 return j; 3977 } 3978 } 3979 // Not found in recorded, add new 3980 id = _C_strings_used++; 3981 _C_strings_s[id] = i; 3982 _C_strings_id[i] = id; 3983 _C_strings_len[id] = len; 3984 _C_strings_hash[id] = hash; 3985 return id; 3986 } 3987 } 3988 return -1; 3989 } 3990 3991 address SCAddressTable::address_for_C_string(int idx) { 3992 assert(idx < _C_strings_count, "sanity"); 3993 return (address)_C_strings[idx]; 3994 } 3995 3996 int search_address(address addr, address* table, uint length) { 3997 for (int i = 0; i < (int)length; i++) { 3998 if (table[i] == addr) { 3999 return i; 4000 } 4001 } 4002 return -1; 4003 } 4004 4005 address SCAddressTable::address_for_id(int idx) { 4006 if (!_complete) { 4007 fatal("SCA table is not complete"); 4008 } 4009 if (idx == -1) { 4010 return (address)-1; 4011 } 4012 uint id = (uint)idx; 4013 if (id >= _all_max && idx < (_all_max + _C_strings_count)) { 4014 return address_for_C_string(idx - _all_max); 4015 } 4016 if (idx < 0 || id == (_extrs_length + _stubs_length + _final_blobs_length)) { 4017 fatal("Incorrect id %d for SCA table", id); 4018 } 4019 if (idx > (_all_max + _C_strings_count)) { 4020 return (address)os::init + idx; 4021 } 4022 if (id < _extrs_length) { 4023 return _extrs_addr[id]; 4024 } 4025 id -= _extrs_length; 4026 if (id < _stubs_length) { 4027 return _stubs_addr[id]; 4028 } 4029 id -= _stubs_length; 4030 if (id < _final_blobs_length) { 4031 return _blobs_addr[id]; 4032 } 4033 return nullptr; 4034 } 4035 4036 int SCAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer) { 4037 int id = -1; 4038 if (addr == (address)-1) { // Static call stub has jump to itself 4039 return id; 4040 } 4041 if (!_complete) { 4042 fatal("SCA table is not complete"); 4043 } 4044 // Seach for C string 4045 id = id_for_C_string(addr); 4046 if (id >=0) { 4047 return id + _all_max; 4048 } 4049 if (StubRoutines::contains(addr)) { 4050 // Search in stubs 4051 id = search_address(addr, _stubs_addr, _stubs_length); 4052 if (id < 0) { 4053 StubCodeDesc* desc = StubCodeDesc::desc_for(addr); 4054 if (desc == nullptr) { 4055 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset); 4056 } 4057 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>"; 4058 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in SCA table", p2i(addr), sub_name); 4059 } else { 4060 id += _extrs_length; 4061 } 4062 } else { 4063 CodeBlob* cb = CodeCache::find_blob(addr); 4064 if (cb != nullptr) { 4065 // Search in code blobs 4066 id = search_address(addr, _blobs_addr, _final_blobs_length); 4067 if (id < 0) { 4068 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in SCA table", p2i(addr), cb->name()); 4069 } else { 4070 id += _extrs_length + _stubs_length; 4071 } 4072 } else { 4073 // Search in runtime functions 4074 id = search_address(addr, _extrs_addr, _extrs_length); 4075 if (id < 0) { 4076 ResourceMark rm; 4077 const int buflen = 1024; 4078 char* func_name = NEW_RESOURCE_ARRAY(char, buflen); 4079 int offset = 0; 4080 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) { 4081 if (offset > 0) { 4082 // Could be address of C string 4083 uint dist = (uint)pointer_delta(addr, (address)os::init, 1); 4084 CompileTask* task = ciEnv::current()->task(); 4085 uint compile_id = 0; 4086 uint comp_level =0; 4087 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs) 4088 compile_id = task->compile_id(); 4089 comp_level = task->comp_level(); 4090 } 4091 log_info(scc)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in SCA table", 4092 compile_id, comp_level, p2i(addr), dist, (const char*)addr); 4093 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance"); 4094 return dist; 4095 } 4096 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in SCA table", p2i(addr), func_name, offset); 4097 } else { 4098 os::print_location(tty, p2i(addr), true); 4099 reloc.print_current_on(tty); 4100 #ifndef PRODUCT 4101 buffer->print_on(tty); 4102 buffer->decode(); 4103 #endif // !PRODUCT 4104 fatal("Address " INTPTR_FORMAT " for <unknown> is missing in SCA table", p2i(addr)); 4105 } 4106 } 4107 } 4108 } 4109 return id; 4110 }