1 /* 2 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "asm/macroAssembler.hpp" 27 #include "cds/aotCacheAccess.hpp" 28 #include "cds/cds_globals.hpp" 29 #include "cds/cdsConfig.hpp" 30 #include "cds/heapShared.hpp" 31 #include "cds/metaspaceShared.hpp" 32 #include "classfile/javaAssertions.hpp" 33 #include "code/aotCodeCache.hpp" 34 #include "code/codeCache.hpp" 35 #include "gc/shared/gcConfig.hpp" 36 #include "logging/logStream.hpp" 37 #include "memory/memoryReserver.hpp" 38 #include "runtime/deoptimization.hpp" 39 #include "runtime/flags/flagSetting.hpp" 40 #include "runtime/globals_extension.hpp" 41 #include "runtime/java.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/os.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "utilities/copy.hpp" 47 #ifdef COMPILER1 48 #include "c1/c1_Runtime1.hpp" 49 #endif 50 #ifdef COMPILER2 51 #include "opto/runtime.hpp" 52 #endif 53 #if INCLUDE_G1GC 54 #include "gc/g1/g1BarrierSetRuntime.hpp" 55 #endif 56 #if INCLUDE_SHENANDOAHGC 57 #include "gc/shenandoah/shenandoahRuntime.hpp" 58 #endif 59 #if INCLUDE_ZGC 60 #include "gc/z/zBarrierSetRuntime.hpp" 61 #endif 62 63 #include <sys/stat.h> 64 #include <errno.h> 65 66 const char* aot_code_entry_kind_name[] = { 67 #define DECL_KIND_STRING(kind) XSTR(kind), 68 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING) 69 #undef DECL_KIND_STRING 70 }; 71 72 static void report_load_failure() { 73 if (AbortVMOnAOTCodeFailure) { 74 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr); 75 } 76 log_info(aot, codecache, init)("Unable to use AOT Code Cache."); 77 AOTAdapterCaching = false; 78 AOTStubCaching = false; 79 } 80 81 static void report_store_failure() { 82 if (AbortVMOnAOTCodeFailure) { 83 tty->print_cr("Unable to create AOT Code Cache."); 84 vm_abort(false); 85 } 86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache."); 87 AOTAdapterCaching = false; 88 AOTStubCaching = false; 89 } 90 91 bool AOTCodeCache::is_dumping_adapter() { 92 return AOTAdapterCaching && is_on_for_dump(); 93 } 94 95 bool AOTCodeCache::is_using_adapter() { 96 return AOTAdapterCaching && is_on_for_use(); 97 } 98 99 bool AOTCodeCache::is_dumping_stub() { 100 return AOTStubCaching && is_on_for_dump(); 101 } 102 103 bool AOTCodeCache::is_using_stub() { 104 return AOTStubCaching && is_on_for_use(); 105 } 106 107 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) { 108 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind); 109 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue 110 // becasue both id and kind are used to find an entry, and that combination should be unique 111 if (kind == AOTCodeEntry::Adapter) { 112 return id; 113 } else if (kind == AOTCodeEntry::SharedBlob) { 114 return id; 115 } else if (kind == AOTCodeEntry::C1Blob) { 116 return (int)SharedStubId::NUM_STUBIDS + id; 117 } else { 118 // kind must be AOTCodeEntry::C2Blob 119 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id; 120 } 121 } 122 123 static uint _max_aot_code_size = 0; 124 uint AOTCodeCache::max_aot_code_size() { 125 return _max_aot_code_size; 126 } 127 128 void AOTCodeCache::initialize() { 129 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64)) 130 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform."); 131 AOTAdapterCaching = false; 132 AOTStubCaching = false; 133 return; 134 #else 135 if (FLAG_IS_DEFAULT(AOTCache)) { 136 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified."); 137 AOTAdapterCaching = false; 138 AOTStubCaching = false; 139 return; // AOTCache must be specified to dump and use AOT code 140 } 141 142 // Disable stubs caching until JDK-8357398 is fixed. 143 FLAG_SET_ERGO(AOTStubCaching, false); 144 145 if (VerifyOops) { 146 // Disable AOT stubs caching when VerifyOops flag is on. 147 // Verify oops code generated a lot of C strings which overflow 148 // AOT C string table (which has fixed size). 149 // AOT C string table will be reworked later to handle such cases. 150 // 151 // Note: AOT adapters are not affected - they don't have oop operations. 152 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops."); 153 FLAG_SET_ERGO(AOTStubCaching, false); 154 } 155 156 bool is_dumping = false; 157 bool is_using = false; 158 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) { 159 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true); 160 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true); 161 is_dumping = true; 162 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) { 163 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true); 164 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true); 165 is_using = true; 166 } else { 167 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used."); 168 return; // nothing to do 169 } 170 if (!AOTAdapterCaching && !AOTStubCaching) { 171 return; // AOT code caching disabled on command line 172 } 173 _max_aot_code_size = AOTCodeMaxSize; 174 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) { 175 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) { 176 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity()); 177 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K)); 178 } 179 } 180 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0; 181 if (is_using && aot_code_size == 0) { 182 log_info(aot, codecache, init)("AOT Code Cache is empty"); 183 return; 184 } 185 if (!open_cache(is_dumping, is_using)) { 186 if (is_using) { 187 report_load_failure(); 188 } else { 189 report_store_failure(); 190 } 191 return; 192 } 193 if (is_dumping) { 194 FLAG_SET_DEFAULT(ForceUnreachable, true); 195 } 196 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false); 197 #endif // defined(AMD64) || defined(AARCH64) 198 } 199 200 void AOTCodeCache::init2() { 201 if (!is_on()) { 202 return; 203 } 204 if (!verify_vm_config()) { 205 close(); 206 report_load_failure(); 207 } 208 209 // initialize the table of external routines so we can save 210 // generated code blobs that reference them 211 init_extrs_table(); 212 init_early_stubs_table(); 213 } 214 215 AOTCodeCache* AOTCodeCache::_cache = nullptr; 216 217 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) { 218 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using); 219 if (cache->failed()) { 220 delete cache; 221 _cache = nullptr; 222 return false; 223 } 224 _cache = cache; 225 return true; 226 } 227 228 void AOTCodeCache::close() { 229 if (is_on()) { 230 delete _cache; // Free memory 231 _cache = nullptr; 232 } 233 } 234 235 #define DATA_ALIGNMENT HeapWordSize 236 237 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) : 238 _load_header(nullptr), 239 _load_buffer(nullptr), 240 _store_buffer(nullptr), 241 _C_store_buffer(nullptr), 242 _write_position(0), 243 _load_size(0), 244 _store_size(0), 245 _for_use(is_using), 246 _for_dump(is_dumping), 247 _closing(false), 248 _failed(false), 249 _lookup_failed(false), 250 _table(nullptr), 251 _load_entries(nullptr), 252 _search_entries(nullptr), 253 _store_entries(nullptr), 254 _C_strings_buf(nullptr), 255 _store_entries_cnt(0) 256 { 257 // Read header at the begining of cache 258 if (_for_use) { 259 // Read cache 260 size_t load_size = AOTCacheAccess::get_aot_code_region_size(); 261 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode); 262 if (!rs.is_reserved()) { 263 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size); 264 set_failed(); 265 return; 266 } 267 if (!AOTCacheAccess::map_aot_code_region(rs)) { 268 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache"); 269 set_failed(); 270 return; 271 } 272 273 _load_size = (uint)load_size; 274 _load_buffer = (char*)rs.base(); 275 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned"); 276 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer)); 277 278 _load_header = (Header*)addr(0); 279 if (!_load_header->verify_config(_load_size)) { 280 set_failed(); 281 return; 282 } 283 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count()); 284 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count()); 285 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count()); 286 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count()); 287 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count()); 288 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size()); 289 290 // Read strings 291 load_strings(); 292 } 293 if (_for_dump) { 294 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode); 295 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT); 296 // Entries allocated at the end of buffer in reverse (as on stack). 297 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT); 298 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size()); 299 } 300 _table = new AOTCodeAddressTable(); 301 } 302 303 void AOTCodeCache::init_extrs_table() { 304 AOTCodeAddressTable* table = addr_table(); 305 if (table != nullptr) { 306 table->init_extrs(); 307 } 308 } 309 310 void AOTCodeCache::init_early_stubs_table() { 311 AOTCodeAddressTable* table = addr_table(); 312 if (table != nullptr) { 313 table->init_early_stubs(); 314 } 315 } 316 317 void AOTCodeCache::init_shared_blobs_table() { 318 AOTCodeAddressTable* table = addr_table(); 319 if (table != nullptr) { 320 table->init_shared_blobs(); 321 } 322 } 323 324 void AOTCodeCache::init_early_c1_table() { 325 AOTCodeAddressTable* table = addr_table(); 326 if (table != nullptr) { 327 table->init_early_c1(); 328 } 329 } 330 331 AOTCodeCache::~AOTCodeCache() { 332 if (_closing) { 333 return; // Already closed 334 } 335 // Stop any further access to cache. 336 _closing = true; 337 338 MutexLocker ml(Compile_lock); 339 if (for_dump()) { // Finalize cache 340 finish_write(); 341 } 342 _load_buffer = nullptr; 343 if (_C_store_buffer != nullptr) { 344 FREE_C_HEAP_ARRAY(char, _C_store_buffer); 345 _C_store_buffer = nullptr; 346 _store_buffer = nullptr; 347 } 348 if (_table != nullptr) { 349 delete _table; 350 _table = nullptr; 351 } 352 } 353 354 void AOTCodeCache::Config::record() { 355 _flags = 0; 356 #ifdef ASSERT 357 _flags |= debugVM; 358 #endif 359 if (UseCompressedOops) { 360 _flags |= compressedOops; 361 } 362 if (UseCompressedClassPointers) { 363 _flags |= compressedClassPointers; 364 } 365 if (UseTLAB) { 366 _flags |= useTLAB; 367 } 368 if (JavaAssertions::systemClassDefault()) { 369 _flags |= systemClassAssertions; 370 } 371 if (JavaAssertions::userClassDefault()) { 372 _flags |= userClassAssertions; 373 } 374 if (EnableContended) { 375 _flags |= enableContendedPadding; 376 } 377 if (RestrictContended) { 378 _flags |= restrictContendedPadding; 379 } 380 _compressedOopShift = CompressedOops::shift(); 381 _compressedOopBase = CompressedOops::base(); 382 _compressedKlassShift = CompressedKlassPointers::shift(); 383 _contendedPaddingWidth = ContendedPaddingWidth; 384 _objectAlignment = ObjectAlignmentInBytes; 385 _gc = (uint)Universe::heap()->kind(); 386 } 387 388 bool AOTCodeCache::Config::verify() const { 389 #ifdef ASSERT 390 if ((_flags & debugVM) == 0) { 391 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM"); 392 return false; 393 } 394 #else 395 if ((_flags & debugVM) != 0) { 396 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM"); 397 return false; 398 } 399 #endif 400 401 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc; 402 if (aot_gc != Universe::heap()->kind()) { 403 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name()); 404 return false; 405 } 406 407 if (((_flags & compressedOops) != 0) != UseCompressedOops) { 408 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true"); 409 return false; 410 } 411 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) { 412 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true"); 413 return false; 414 } 415 416 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) { 417 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s", JavaAssertions::systemClassDefault() ? "disabled" : "enabled"); 418 return false; 419 } 420 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) { 421 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s", JavaAssertions::userClassDefault() ? "disabled" : "enabled"); 422 return false; 423 } 424 425 if (((_flags & enableContendedPadding) != 0) != EnableContended) { 426 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s", EnableContended ? "false" : "true"); 427 return false; 428 } 429 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) { 430 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true"); 431 return false; 432 } 433 if (_compressedOopShift != (uint)CompressedOops::shift()) { 434 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift()); 435 return false; 436 } 437 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) { 438 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift()); 439 return false; 440 } 441 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) { 442 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth); 443 return false; 444 } 445 if (_objectAlignment != (uint)ObjectAlignmentInBytes) { 446 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes); 447 return false; 448 } 449 450 // This should be the last check as it only disables AOTStubCaching 451 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) { 452 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base()); 453 AOTStubCaching = false; 454 } 455 456 return true; 457 } 458 459 bool AOTCodeCache::Header::verify_config(uint load_size) const { 460 if (_version != AOT_CODE_VERSION) { 461 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version); 462 return false; 463 } 464 if (load_size < _cache_size) { 465 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size); 466 return false; 467 } 468 return true; 469 } 470 471 AOTCodeCache* AOTCodeCache::open_for_use() { 472 if (AOTCodeCache::is_on_for_use()) { 473 return AOTCodeCache::cache(); 474 } 475 return nullptr; 476 } 477 478 AOTCodeCache* AOTCodeCache::open_for_dump() { 479 if (AOTCodeCache::is_on_for_dump()) { 480 AOTCodeCache* cache = AOTCodeCache::cache(); 481 cache->clear_lookup_failed(); // Reset bit 482 return cache; 483 } 484 return nullptr; 485 } 486 487 void copy_bytes(const char* from, address to, uint size) { 488 assert(size > 0, "sanity"); 489 bool by_words = true; 490 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) { 491 // Use wordwise copies if possible: 492 Copy::disjoint_words((HeapWord*)from, 493 (HeapWord*)to, 494 ((size_t)size + HeapWordSize-1) / HeapWordSize); 495 } else { 496 by_words = false; 497 Copy::conjoint_jbytes(from, to, (size_t)size); 498 } 499 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to)); 500 } 501 502 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) { 503 _cache = cache; 504 _entry = entry; 505 _load_buffer = cache->cache_buffer(); 506 _read_position = 0; 507 _lookup_failed = false; 508 } 509 510 void AOTCodeReader::set_read_position(uint pos) { 511 if (pos == _read_position) { 512 return; 513 } 514 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size()); 515 _read_position = pos; 516 } 517 518 bool AOTCodeCache::set_write_position(uint pos) { 519 if (pos == _write_position) { 520 return true; 521 } 522 if (_store_size < _write_position) { 523 _store_size = _write_position; // Adjust during write 524 } 525 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size); 526 _write_position = pos; 527 return true; 528 } 529 530 static char align_buffer[256] = { 0 }; 531 532 bool AOTCodeCache::align_write() { 533 // We are not executing code from cache - we copy it by bytes first. 534 // No need for big alignment (or at all). 535 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1)); 536 if (padding == DATA_ALIGNMENT) { 537 return true; 538 } 539 uint n = write_bytes((const void*)&align_buffer, padding); 540 if (n != padding) { 541 return false; 542 } 543 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache"); 544 return true; 545 } 546 547 // Check to see if AOT code cache has required space to store "nbytes" of data 548 address AOTCodeCache::reserve_bytes(uint nbytes) { 549 assert(for_dump(), "Code Cache file is not created"); 550 uint new_position = _write_position + nbytes; 551 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) { 552 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.", 553 nbytes, _write_position); 554 set_failed(); 555 report_store_failure(); 556 return nullptr; 557 } 558 address buffer = (address)(_store_buffer + _write_position); 559 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position); 560 _write_position += nbytes; 561 if (_store_size < _write_position) { 562 _store_size = _write_position; 563 } 564 return buffer; 565 } 566 567 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) { 568 assert(for_dump(), "Code Cache file is not created"); 569 if (nbytes == 0) { 570 return 0; 571 } 572 uint new_position = _write_position + nbytes; 573 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) { 574 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.", 575 nbytes, _write_position); 576 set_failed(); 577 report_store_failure(); 578 return 0; 579 } 580 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes); 581 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position); 582 _write_position += nbytes; 583 if (_store_size < _write_position) { 584 _store_size = _write_position; 585 } 586 return nbytes; 587 } 588 589 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) { 590 return (void*)(cache->add_entry()); 591 } 592 593 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) { 594 if (entry->kind() == kind) { 595 assert(entry->id() == id, "sanity"); 596 return true; // Found 597 } 598 return false; 599 } 600 601 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) { 602 assert(_for_use, "sanity"); 603 uint count = _load_header->entries_count(); 604 if (_load_entries == nullptr) { 605 // Read it 606 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index] 607 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count); 608 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset()); 609 } 610 // Binary search 611 int l = 0; 612 int h = count - 1; 613 while (l <= h) { 614 int mid = (l + h) >> 1; 615 int ix = mid * 2; 616 uint is = _search_entries[ix]; 617 if (is == id) { 618 int index = _search_entries[ix + 1]; 619 AOTCodeEntry* entry = &(_load_entries[index]); 620 if (check_entry(kind, id, entry)) { 621 return entry; // Found 622 } 623 // Linear search around to handle id collission 624 for (int i = mid - 1; i >= l; i--) { // search back 625 ix = i * 2; 626 is = _search_entries[ix]; 627 if (is != id) { 628 break; 629 } 630 index = _search_entries[ix + 1]; 631 AOTCodeEntry* entry = &(_load_entries[index]); 632 if (check_entry(kind, id, entry)) { 633 return entry; // Found 634 } 635 } 636 for (int i = mid + 1; i <= h; i++) { // search forward 637 ix = i * 2; 638 is = _search_entries[ix]; 639 if (is != id) { 640 break; 641 } 642 index = _search_entries[ix + 1]; 643 AOTCodeEntry* entry = &(_load_entries[index]); 644 if (check_entry(kind, id, entry)) { 645 return entry; // Found 646 } 647 } 648 break; // Not found match 649 } else if (is < id) { 650 l = mid + 1; 651 } else { 652 h = mid - 1; 653 } 654 } 655 return nullptr; 656 } 657 658 extern "C" { 659 static int uint_cmp(const void *i, const void *j) { 660 uint a = *(uint *)i; 661 uint b = *(uint *)j; 662 return a > b ? 1 : a < b ? -1 : 0; 663 } 664 } 665 666 bool AOTCodeCache::finish_write() { 667 if (!align_write()) { 668 return false; 669 } 670 uint strings_offset = _write_position; 671 int strings_count = store_strings(); 672 if (strings_count < 0) { 673 return false; 674 } 675 if (!align_write()) { 676 return false; 677 } 678 uint strings_size = _write_position - strings_offset; 679 680 uint entries_count = 0; // Number of entrant (useful) code entries 681 uint entries_offset = _write_position; 682 683 uint store_count = _store_entries_cnt; 684 if (store_count > 0) { 685 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT); 686 uint code_count = store_count; 687 uint search_count = code_count * 2; 688 uint search_size = search_count * sizeof(uint); 689 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes 690 // _write_position includes size of code and strings 691 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it. 692 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size; 693 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size()); 694 695 // Create ordered search table for entries [id, index]; 696 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode); 697 // Allocate in AOT Cache buffer 698 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT); 699 char* start = align_up(buffer, DATA_ALIGNMENT); 700 char* current = start + header_size; // Skip header 701 702 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry 703 uint adapters_count = 0; 704 uint shared_blobs_count = 0; 705 uint C1_blobs_count = 0; 706 uint C2_blobs_count = 0; 707 uint max_size = 0; 708 // AOTCodeEntry entries were allocated in reverse in store buffer. 709 // Process them in reverse order to cache first code first. 710 for (int i = store_count - 1; i >= 0; i--) { 711 entries_address[i].set_next(nullptr); // clear pointers before storing data 712 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT); 713 if (size > max_size) { 714 max_size = size; 715 } 716 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size); 717 entries_address[i].set_offset(current - start); // New offset 718 current += size; 719 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry)); 720 if (n != sizeof(AOTCodeEntry)) { 721 FREE_C_HEAP_ARRAY(uint, search); 722 return false; 723 } 724 search[entries_count*2 + 0] = entries_address[i].id(); 725 search[entries_count*2 + 1] = entries_count; 726 entries_count++; 727 AOTCodeEntry::Kind kind = entries_address[i].kind(); 728 if (kind == AOTCodeEntry::Adapter) { 729 adapters_count++; 730 } else if (kind == AOTCodeEntry::SharedBlob) { 731 shared_blobs_count++; 732 } else if (kind == AOTCodeEntry::C1Blob) { 733 C1_blobs_count++; 734 } else if (kind == AOTCodeEntry::C2Blob) { 735 C2_blobs_count++; 736 } 737 } 738 if (entries_count == 0) { 739 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires"); 740 FREE_C_HEAP_ARRAY(uint, search); 741 return true; // Nothing to write 742 } 743 assert(entries_count <= store_count, "%d > %d", entries_count, store_count); 744 // Write strings 745 if (strings_count > 0) { 746 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size); 747 strings_offset = (current - start); // New offset 748 current += strings_size; 749 } 750 751 uint new_entries_offset = (current - start); // New offset 752 // Sort and store search table 753 qsort(search, entries_count, 2*sizeof(uint), uint_cmp); 754 search_size = 2 * entries_count * sizeof(uint); 755 copy_bytes((const char*)search, (address)current, search_size); 756 FREE_C_HEAP_ARRAY(uint, search); 757 current += search_size; 758 759 // Write entries 760 entries_size = entries_count * sizeof(AOTCodeEntry); // New size 761 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size); 762 current += entries_size; 763 uint size = (current - start); 764 assert(size <= total_size, "%d > %d", size , total_size); 765 766 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count); 767 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count); 768 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count); 769 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count); 770 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size); 771 772 // Finalize header 773 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start; 774 header->init(size, (uint)strings_count, strings_offset, 775 entries_count, new_entries_offset, 776 adapters_count, shared_blobs_count, 777 C1_blobs_count, C2_blobs_count); 778 779 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count); 780 } 781 return true; 782 } 783 784 //------------------Store/Load AOT code ---------------------- 785 786 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) { 787 AOTCodeCache* cache = open_for_dump(); 788 if (cache == nullptr) { 789 return false; 790 } 791 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind); 792 793 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) { 794 return false; 795 } 796 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) { 797 return false; 798 } 799 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]); 800 801 #ifdef ASSERT 802 LogStreamHandle(Trace, aot, codecache, stubs) log; 803 if (log.is_enabled()) { 804 FlagSetting fs(PrintRelocations, true); 805 blob.print_on(&log); 806 } 807 #endif 808 // we need to take a lock to prevent race between compiler threads generating AOT code 809 // and the main thread generating adapter 810 MutexLocker ml(Compile_lock); 811 if (!cache->align_write()) { 812 return false; 813 } 814 uint entry_position = cache->_write_position; 815 816 // Write name 817 uint name_offset = cache->_write_position - entry_position; 818 uint name_size = (uint)strlen(name) + 1; // Includes '/0' 819 uint n = cache->write_bytes(name, name_size); 820 if (n != name_size) { 821 return false; 822 } 823 824 // Write CodeBlob 825 if (!cache->align_write()) { 826 return false; 827 } 828 uint blob_offset = cache->_write_position - entry_position; 829 address archive_buffer = cache->reserve_bytes(blob.size()); 830 if (archive_buffer == nullptr) { 831 return false; 832 } 833 CodeBlob::archive_blob(&blob, archive_buffer); 834 835 uint reloc_data_size = blob.relocation_size(); 836 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size); 837 if (n != reloc_data_size) { 838 return false; 839 } 840 841 bool has_oop_maps = false; 842 if (blob.oop_maps() != nullptr) { 843 if (!cache->write_oop_map_set(blob)) { 844 return false; 845 } 846 has_oop_maps = true; 847 } 848 849 #ifndef PRODUCT 850 // Write asm remarks 851 if (!cache->write_asm_remarks(blob)) { 852 return false; 853 } 854 if (!cache->write_dbg_strings(blob)) { 855 return false; 856 } 857 #endif /* PRODUCT */ 858 859 if (!cache->write_relocations(blob)) { 860 return false; 861 } 862 863 // Write entries offsets 864 n = cache->write_bytes(&entry_offset_count, sizeof(int)); 865 if (n != sizeof(int)) { 866 return false; 867 } 868 for (int i = 0; i < entry_offset_count; i++) { 869 uint32_t off = (uint32_t)entry_offsets[i]; 870 n = cache->write_bytes(&off, sizeof(uint32_t)); 871 if (n != sizeof(uint32_t)) { 872 return false; 873 } 874 } 875 uint entry_size = cache->_write_position - entry_position; 876 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id), 877 entry_position, entry_size, name_offset, name_size, 878 blob_offset, has_oop_maps, blob.content_begin()); 879 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]); 880 return true; 881 } 882 883 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) { 884 AOTCodeCache* cache = open_for_use(); 885 if (cache == nullptr) { 886 return nullptr; 887 } 888 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind); 889 890 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) { 891 return nullptr; 892 } 893 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) { 894 return nullptr; 895 } 896 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]); 897 898 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id)); 899 if (entry == nullptr) { 900 return nullptr; 901 } 902 AOTCodeReader reader(cache, entry); 903 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets); 904 905 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache", 906 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]); 907 return blob; 908 } 909 910 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) { 911 uint entry_position = _entry->offset(); 912 913 // Read name 914 uint name_offset = entry_position + _entry->name_offset(); 915 uint name_size = _entry->name_size(); // Includes '/0' 916 const char* stored_name = addr(name_offset); 917 918 if (strncmp(stored_name, name, (name_size - 1)) != 0) { 919 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'", 920 stored_name, name); 921 set_lookup_failed(); // Skip this blob 922 return nullptr; 923 } 924 925 // Read archived code blob 926 uint offset = entry_position + _entry->blob_offset(); 927 CodeBlob* archived_blob = (CodeBlob*)addr(offset); 928 offset += archived_blob->size(); 929 930 address reloc_data = (address)addr(offset); 931 offset += archived_blob->relocation_size(); 932 set_read_position(offset); 933 934 ImmutableOopMapSet* oop_maps = nullptr; 935 if (_entry->has_oop_maps()) { 936 oop_maps = read_oop_map_set(); 937 } 938 939 #ifndef PRODUCT 940 AsmRemarks asm_remarks; 941 read_asm_remarks(asm_remarks); 942 DbgStrings dbg_strings; 943 read_dbg_strings(dbg_strings); 944 #endif // PRODUCT 945 946 CodeBlob* code_blob = CodeBlob::create(archived_blob, 947 stored_name, 948 reloc_data, 949 oop_maps 950 #ifndef PRODUCT 951 , asm_remarks 952 , dbg_strings 953 #endif 954 ); 955 if (code_blob == nullptr) { // no space left in CodeCache 956 return nullptr; 957 } 958 959 fix_relocations(code_blob); 960 961 // Read entries offsets 962 offset = read_position(); 963 int stored_count = *(int*)addr(offset); 964 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count); 965 offset += sizeof(int); 966 set_read_position(offset); 967 for (int i = 0; i < stored_count; i++) { 968 uint32_t off = *(uint32_t*)addr(offset); 969 offset += sizeof(uint32_t); 970 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : ""; 971 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache", 972 stored_name, entry_name, _entry->id(), off); 973 entry_offsets[i] = off; 974 } 975 976 #ifdef ASSERT 977 LogStreamHandle(Trace, aot, codecache, stubs) log; 978 if (log.is_enabled()) { 979 FlagSetting fs(PrintRelocations, true); 980 code_blob->print_on(&log); 981 } 982 #endif 983 return code_blob; 984 } 985 986 // ------------ process code and data -------------- 987 988 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) { 989 GrowableArray<uint> reloc_data; 990 RelocIterator iter(&code_blob); 991 LogStreamHandle(Trace, aot, codecache, reloc) log; 992 while (iter.next()) { 993 int idx = reloc_data.append(0); // default value 994 switch (iter.type()) { 995 case relocInfo::none: 996 break; 997 case relocInfo::runtime_call_type: { 998 // Record offset of runtime destination 999 CallRelocation* r = (CallRelocation*)iter.reloc(); 1000 address dest = r->destination(); 1001 if (dest == r->addr()) { // possible call via trampoline on Aarch64 1002 dest = (address)-1; // do nothing in this case when loading this relocation 1003 } 1004 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob)); 1005 break; 1006 } 1007 case relocInfo::runtime_call_w_cp_type: 1008 fatal("runtime_call_w_cp_type unimplemented"); 1009 break; 1010 case relocInfo::external_word_type: { 1011 // Record offset of runtime target 1012 address target = ((external_word_Relocation*)iter.reloc())->target(); 1013 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob)); 1014 break; 1015 } 1016 case relocInfo::internal_word_type: 1017 break; 1018 case relocInfo::section_word_type: 1019 break; 1020 case relocInfo::post_call_nop_type: 1021 break; 1022 default: 1023 fatal("relocation %d unimplemented", (int)iter.type()); 1024 break; 1025 } 1026 if (log.is_enabled()) { 1027 iter.print_current_on(&log); 1028 } 1029 } 1030 1031 // Write additional relocation data: uint per relocation 1032 // Write the count first 1033 int count = reloc_data.length(); 1034 write_bytes(&count, sizeof(int)); 1035 for (GrowableArrayIterator<uint> iter = reloc_data.begin(); 1036 iter != reloc_data.end(); ++iter) { 1037 uint value = *iter; 1038 int n = write_bytes(&value, sizeof(uint)); 1039 if (n != sizeof(uint)) { 1040 return false; 1041 } 1042 } 1043 return true; 1044 } 1045 1046 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) { 1047 LogStreamHandle(Trace, aot, reloc) log; 1048 uint offset = read_position(); 1049 int count = *(int*)addr(offset); 1050 offset += sizeof(int); 1051 if (log.is_enabled()) { 1052 log.print_cr("======== extra relocations count=%d", count); 1053 } 1054 uint* reloc_data = (uint*)addr(offset); 1055 offset += (count * sizeof(uint)); 1056 set_read_position(offset); 1057 1058 RelocIterator iter(code_blob); 1059 int j = 0; 1060 while (iter.next()) { 1061 switch (iter.type()) { 1062 case relocInfo::none: 1063 break; 1064 case relocInfo::runtime_call_type: { 1065 address dest = _cache->address_for_id(reloc_data[j]); 1066 if (dest != (address)-1) { 1067 ((CallRelocation*)iter.reloc())->set_destination(dest); 1068 } 1069 break; 1070 } 1071 case relocInfo::runtime_call_w_cp_type: 1072 fatal("runtime_call_w_cp_type unimplemented"); 1073 break; 1074 case relocInfo::external_word_type: { 1075 address target = _cache->address_for_id(reloc_data[j]); 1076 // Add external address to global table 1077 int index = ExternalsRecorder::find_index(target); 1078 // Update index in relocation 1079 Relocation::add_jint(iter.data(), index); 1080 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc(); 1081 assert(reloc->target() == target, "sanity"); 1082 reloc->set_value(target); // Patch address in the code 1083 break; 1084 } 1085 case relocInfo::internal_word_type: { 1086 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc(); 1087 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin()); 1088 break; 1089 } 1090 case relocInfo::section_word_type: { 1091 section_word_Relocation* r = (section_word_Relocation*)iter.reloc(); 1092 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin()); 1093 break; 1094 } 1095 case relocInfo::post_call_nop_type: 1096 break; 1097 default: 1098 fatal("relocation %d unimplemented", (int)iter.type()); 1099 break; 1100 } 1101 if (log.is_enabled()) { 1102 iter.print_current_on(&log); 1103 } 1104 j++; 1105 } 1106 assert(j == count, "sanity"); 1107 } 1108 1109 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) { 1110 ImmutableOopMapSet* oopmaps = cb.oop_maps(); 1111 int oopmaps_size = oopmaps->nr_of_bytes(); 1112 if (!write_bytes(&oopmaps_size, sizeof(int))) { 1113 return false; 1114 } 1115 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes()); 1116 if (n != (uint)oopmaps->nr_of_bytes()) { 1117 return false; 1118 } 1119 return true; 1120 } 1121 1122 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() { 1123 uint offset = read_position(); 1124 int size = *(int *)addr(offset); 1125 offset += sizeof(int); 1126 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset); 1127 offset += size; 1128 set_read_position(offset); 1129 return oopmaps; 1130 } 1131 1132 #ifndef PRODUCT 1133 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) { 1134 // Write asm remarks 1135 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint)); 1136 if (count_ptr == nullptr) { 1137 return false; 1138 } 1139 uint count = 0; 1140 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool { 1141 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str); 1142 uint n = write_bytes(&offset, sizeof(uint)); 1143 if (n != sizeof(uint)) { 1144 return false; 1145 } 1146 const char* cstr = add_C_string(str); 1147 int id = _table->id_for_C_string((address)cstr); 1148 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str); 1149 n = write_bytes(&id, sizeof(int)); 1150 if (n != sizeof(int)) { 1151 return false; 1152 } 1153 count += 1; 1154 return true; 1155 }); 1156 *count_ptr = count; 1157 return result; 1158 } 1159 1160 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) { 1161 // Read asm remarks 1162 uint offset = read_position(); 1163 uint count = *(uint *)addr(offset); 1164 offset += sizeof(uint); 1165 for (uint i = 0; i < count; i++) { 1166 uint remark_offset = *(uint *)addr(offset); 1167 offset += sizeof(uint); 1168 int remark_string_id = *(uint *)addr(offset); 1169 offset += sizeof(int); 1170 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id); 1171 asm_remarks.insert(remark_offset, remark); 1172 } 1173 set_read_position(offset); 1174 } 1175 1176 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) { 1177 // Write dbg strings 1178 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint)); 1179 if (count_ptr == nullptr) { 1180 return false; 1181 } 1182 uint count = 0; 1183 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool { 1184 log_trace(aot, codecache, stubs)("dbg string=%s", str); 1185 const char* cstr = add_C_string(str); 1186 int id = _table->id_for_C_string((address)cstr); 1187 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str); 1188 uint n = write_bytes(&id, sizeof(int)); 1189 if (n != sizeof(int)) { 1190 return false; 1191 } 1192 count += 1; 1193 return true; 1194 }); 1195 *count_ptr = count; 1196 return result; 1197 } 1198 1199 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) { 1200 // Read dbg strings 1201 uint offset = read_position(); 1202 uint count = *(uint *)addr(offset); 1203 offset += sizeof(uint); 1204 for (uint i = 0; i < count; i++) { 1205 int string_id = *(uint *)addr(offset); 1206 offset += sizeof(int); 1207 const char* str = (const char*)_cache->address_for_C_string(string_id); 1208 dbg_strings.insert(str); 1209 } 1210 set_read_position(offset); 1211 } 1212 #endif // PRODUCT 1213 1214 //======================= AOTCodeAddressTable =============== 1215 1216 // address table ids for generated routines, external addresses and C 1217 // string addresses are partitioned into positive integer ranges 1218 // defined by the following positive base and max values 1219 // i.e. [_extrs_base, _extrs_base + _extrs_max -1], 1220 // [_blobs_base, _blobs_base + _blobs_max -1], 1221 // ... 1222 // [_c_str_base, _c_str_base + _c_str_max -1], 1223 1224 #define _extrs_max 100 1225 #define _stubs_max 3 1226 1227 #define _shared_blobs_max 20 1228 #define _C1_blobs_max 10 1229 #define _blobs_max (_shared_blobs_max+_C1_blobs_max) 1230 #define _all_max (_extrs_max+_stubs_max+_blobs_max) 1231 1232 #define _extrs_base 0 1233 #define _stubs_base (_extrs_base + _extrs_max) 1234 #define _shared_blobs_base (_stubs_base + _stubs_max) 1235 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max) 1236 #define _blobs_end (_shared_blobs_base + _blobs_max) 1237 1238 #define SET_ADDRESS(type, addr) \ 1239 { \ 1240 type##_addr[type##_length++] = (address) (addr); \ 1241 assert(type##_length <= type##_max, "increase size"); \ 1242 } 1243 1244 static bool initializing_extrs = false; 1245 1246 void AOTCodeAddressTable::init_extrs() { 1247 if (_extrs_complete || initializing_extrs) return; // Done already 1248 1249 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting"); 1250 1251 initializing_extrs = true; 1252 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode); 1253 1254 _extrs_length = 0; 1255 1256 // Record addresses of VM runtime methods 1257 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite); 1258 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method); 1259 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract); 1260 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss); 1261 #if defined(AARCH64) && !defined(ZERO) 1262 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper); 1263 #endif 1264 { 1265 // Required by Shared blobs 1266 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info); 1267 SET_ADDRESS(_extrs, Deoptimization::unpack_frames); 1268 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception); 1269 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C); 1270 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C); 1271 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C); 1272 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError); 1273 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError); 1274 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError); 1275 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call); 1276 } 1277 1278 #ifdef COMPILER1 1279 { 1280 // Required by C1 blobs 1281 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)); 1282 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address); 1283 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer); 1284 SET_ADDRESS(_extrs, Runtime1::is_instance_of); 1285 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc); 1286 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception); 1287 SET_ADDRESS(_extrs, Runtime1::new_instance); 1288 SET_ADDRESS(_extrs, Runtime1::counter_overflow); 1289 SET_ADDRESS(_extrs, Runtime1::new_type_array); 1290 SET_ADDRESS(_extrs, Runtime1::new_object_array); 1291 SET_ADDRESS(_extrs, Runtime1::new_multi_array); 1292 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception); 1293 SET_ADDRESS(_extrs, Runtime1::throw_index_exception); 1294 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception); 1295 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception); 1296 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception); 1297 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception); 1298 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error); 1299 SET_ADDRESS(_extrs, Runtime1::is_instance_of); 1300 SET_ADDRESS(_extrs, Runtime1::monitorenter); 1301 SET_ADDRESS(_extrs, Runtime1::monitorexit); 1302 SET_ADDRESS(_extrs, Runtime1::deoptimize); 1303 SET_ADDRESS(_extrs, Runtime1::access_field_patching); 1304 SET_ADDRESS(_extrs, Runtime1::move_klass_patching); 1305 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching); 1306 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching); 1307 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap); 1308 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry); 1309 SET_ADDRESS(_extrs, Thread::current); 1310 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr()); 1311 #ifndef PRODUCT 1312 SET_ADDRESS(_extrs, os::breakpoint); 1313 #endif 1314 } 1315 #endif 1316 1317 #ifdef COMPILER2 1318 { 1319 // Required by C2 blobs 1320 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap); 1321 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C); 1322 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C); 1323 SET_ADDRESS(_extrs, OptoRuntime::new_array_C); 1324 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C); 1325 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C); 1326 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C); 1327 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C); 1328 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C); 1329 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C); 1330 #if INCLUDE_JVMTI 1331 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start); 1332 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end); 1333 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount); 1334 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount); 1335 #endif 1336 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C); 1337 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C); 1338 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C); 1339 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C); 1340 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C); 1341 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C); 1342 #if defined(AARCH64) 1343 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure); 1344 #endif // AARCH64 1345 } 1346 #endif // COMPILER2 1347 1348 #if INCLUDE_G1GC 1349 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry); 1350 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry); 1351 #endif 1352 #if INCLUDE_SHENANDOAHGC 1353 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre); 1354 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom); 1355 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow); 1356 #endif 1357 #if INCLUDE_ZGC 1358 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr()); 1359 #if defined(AMD64) 1360 SET_ADDRESS(_extrs, &ZPointerLoadShift); 1361 #endif 1362 #endif 1363 #ifndef ZERO 1364 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64) 1365 SET_ADDRESS(_extrs, MacroAssembler::debug64); 1366 #endif 1367 #endif // ZERO 1368 1369 _extrs_complete = true; 1370 log_debug(aot, codecache, init)("External addresses recorded"); 1371 } 1372 1373 static bool initializing_early_stubs = false; 1374 1375 void AOTCodeAddressTable::init_early_stubs() { 1376 if (_complete || initializing_early_stubs) return; // Done already 1377 initializing_early_stubs = true; 1378 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode); 1379 _stubs_length = 0; 1380 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry()); 1381 1382 { 1383 // Required by C1 blobs 1384 #if defined(AMD64) && !defined(ZERO) 1385 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip()); 1386 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup()); 1387 #endif // AMD64 1388 } 1389 1390 _early_stubs_complete = true; 1391 log_info(aot, codecache, init)("Early stubs recorded"); 1392 } 1393 1394 static bool initializing_shared_blobs = false; 1395 1396 void AOTCodeAddressTable::init_shared_blobs() { 1397 if (_complete || initializing_shared_blobs) return; // Done already 1398 initializing_shared_blobs = true; 1399 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode); 1400 _shared_blobs_addr = blobs_addr; 1401 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max; 1402 _shared_blobs_length = _C1_blobs_length = 0; 1403 1404 // clear the address table 1405 memset(blobs_addr, 0, sizeof(address)* _blobs_max); 1406 1407 // Record addresses of generated code blobs 1408 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub()); 1409 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub()); 1410 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack()); 1411 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception()); 1412 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution()); 1413 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls()); 1414 #if INCLUDE_JVMCI 1415 if (EnableJVMCI) { 1416 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap()); 1417 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); 1418 } 1419 #endif 1420 1421 _shared_blobs_complete = true; 1422 log_debug(aot, codecache, init)("Early shared blobs recorded"); 1423 _complete = true; 1424 } 1425 1426 void AOTCodeAddressTable::init_early_c1() { 1427 #ifdef COMPILER1 1428 // Runtime1 Blobs 1429 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) { 1430 C1StubId id = (C1StubId)i; 1431 if (Runtime1::blob_for(id) == nullptr) { 1432 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id)); 1433 continue; 1434 } 1435 if (Runtime1::entry_for(id) == nullptr) { 1436 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id)); 1437 continue; 1438 } 1439 address entry = Runtime1::entry_for(id); 1440 SET_ADDRESS(_C1_blobs, entry); 1441 } 1442 #endif // COMPILER1 1443 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length); 1444 _early_c1_complete = true; 1445 } 1446 1447 #undef SET_ADDRESS 1448 1449 AOTCodeAddressTable::~AOTCodeAddressTable() { 1450 if (_extrs_addr != nullptr) { 1451 FREE_C_HEAP_ARRAY(address, _extrs_addr); 1452 } 1453 if (_shared_blobs_addr != nullptr) { 1454 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr); 1455 } 1456 } 1457 1458 #ifdef PRODUCT 1459 #define MAX_STR_COUNT 200 1460 #else 1461 #define MAX_STR_COUNT 500 1462 #endif 1463 #define _c_str_max MAX_STR_COUNT 1464 static const int _c_str_base = _all_max; 1465 1466 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings 1467 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates 1468 static int _C_strings_count = 0; 1469 static int _C_strings_s[MAX_STR_COUNT] = {0}; 1470 static int _C_strings_id[MAX_STR_COUNT] = {0}; 1471 static int _C_strings_used = 0; 1472 1473 void AOTCodeCache::load_strings() { 1474 uint strings_count = _load_header->strings_count(); 1475 if (strings_count == 0) { 1476 return; 1477 } 1478 uint strings_offset = _load_header->strings_offset(); 1479 uint* string_lengths = (uint*)addr(strings_offset); 1480 strings_offset += (strings_count * sizeof(uint)); 1481 uint strings_size = _load_header->entries_offset() - strings_offset; 1482 // We have to keep cached strings longer than _cache buffer 1483 // because they are refernced from compiled code which may 1484 // still be executed on VM exit after _cache is freed. 1485 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode); 1486 memcpy(p, addr(strings_offset), strings_size); 1487 _C_strings_buf = p; 1488 assert(strings_count <= MAX_STR_COUNT, "sanity"); 1489 for (uint i = 0; i < strings_count; i++) { 1490 _C_strings[i] = p; 1491 uint len = string_lengths[i]; 1492 _C_strings_s[i] = i; 1493 _C_strings_id[i] = i; 1494 p += len; 1495 } 1496 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size); 1497 _C_strings_count = strings_count; 1498 _C_strings_used = strings_count; 1499 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset); 1500 } 1501 1502 int AOTCodeCache::store_strings() { 1503 if (_C_strings_used > 0) { 1504 uint offset = _write_position; 1505 uint length = 0; 1506 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used); 1507 if (lengths == nullptr) { 1508 return -1; 1509 } 1510 for (int i = 0; i < _C_strings_used; i++) { 1511 const char* str = _C_strings[_C_strings_s[i]]; 1512 uint len = (uint)strlen(str) + 1; 1513 length += len; 1514 assert(len < 1000, "big string: %s", str); 1515 lengths[i] = len; 1516 uint n = write_bytes(str, len); 1517 if (n != len) { 1518 return -1; 1519 } 1520 } 1521 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache", 1522 _C_strings_used, length, offset); 1523 } 1524 return _C_strings_used; 1525 } 1526 1527 const char* AOTCodeCache::add_C_string(const char* str) { 1528 if (is_on_for_dump() && str != nullptr) { 1529 return _cache->_table->add_C_string(str); 1530 } 1531 return str; 1532 } 1533 1534 const char* AOTCodeAddressTable::add_C_string(const char* str) { 1535 if (_extrs_complete) { 1536 LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock 1537 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag); 1538 // Check previous strings address 1539 for (int i = 0; i < _C_strings_count; i++) { 1540 if (_C_strings_in[i] == str) { 1541 return _C_strings[i]; // Found previous one - return our duplicate 1542 } else if (strcmp(_C_strings[i], str) == 0) { 1543 return _C_strings[i]; 1544 } 1545 } 1546 // Add new one 1547 if (_C_strings_count < MAX_STR_COUNT) { 1548 // Passed in string can be freed and used space become inaccessible. 1549 // Keep original address but duplicate string for future compare. 1550 _C_strings_id[_C_strings_count] = -1; // Init 1551 _C_strings_in[_C_strings_count] = str; 1552 const char* dup = os::strdup(str); 1553 _C_strings[_C_strings_count++] = dup; 1554 if (log.is_enabled()) { 1555 log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup); 1556 } 1557 return dup; 1558 } else { 1559 fatal("Number of C strings >= MAX_STR_COUNT"); 1560 } 1561 } 1562 return str; 1563 } 1564 1565 int AOTCodeAddressTable::id_for_C_string(address str) { 1566 if (str == nullptr) { 1567 return -1; 1568 } 1569 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag); 1570 for (int i = 0; i < _C_strings_count; i++) { 1571 if (_C_strings[i] == (const char*)str) { // found 1572 int id = _C_strings_id[i]; 1573 if (id >= 0) { 1574 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used); 1575 return id; // Found recorded 1576 } 1577 // Not found in recorded, add new 1578 id = _C_strings_used++; 1579 _C_strings_s[id] = i; 1580 _C_strings_id[i] = id; 1581 return id; 1582 } 1583 } 1584 return -1; 1585 } 1586 1587 address AOTCodeAddressTable::address_for_C_string(int idx) { 1588 assert(idx < _C_strings_count, "sanity"); 1589 return (address)_C_strings[idx]; 1590 } 1591 1592 static int search_address(address addr, address* table, uint length) { 1593 for (int i = 0; i < (int)length; i++) { 1594 if (table[i] == addr) { 1595 return i; 1596 } 1597 } 1598 return -1; 1599 } 1600 1601 address AOTCodeAddressTable::address_for_id(int idx) { 1602 if (!_extrs_complete) { 1603 fatal("AOT Code Cache VM runtime addresses table is not complete"); 1604 } 1605 if (idx == -1) { 1606 return (address)-1; 1607 } 1608 uint id = (uint)idx; 1609 // special case for symbols based relative to os::init 1610 if (id > (_c_str_base + _c_str_max)) { 1611 return (address)os::init + idx; 1612 } 1613 if (idx < 0) { 1614 fatal("Incorrect id %d for AOT Code Cache addresses table", id); 1615 } 1616 // no need to compare unsigned id against 0 1617 if (/* id >= _extrs_base && */ id < _extrs_length) { 1618 return _extrs_addr[id - _extrs_base]; 1619 } 1620 if (id >= _stubs_base && id < _stubs_base + _stubs_length) { 1621 return _stubs_addr[id - _stubs_base]; 1622 } 1623 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) { 1624 return _shared_blobs_addr[id - _shared_blobs_base]; 1625 } 1626 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) { 1627 return _C1_blobs_addr[id - _C1_blobs_base]; 1628 } 1629 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) { 1630 return address_for_C_string(id - _c_str_base); 1631 } 1632 fatal("Incorrect id %d for AOT Code Cache addresses table", id); 1633 return nullptr; 1634 } 1635 1636 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) { 1637 if (!_extrs_complete) { 1638 fatal("AOT Code Cache VM runtime addresses table is not complete"); 1639 } 1640 int id = -1; 1641 if (addr == (address)-1) { // Static call stub has jump to itself 1642 return id; 1643 } 1644 // Seach for C string 1645 id = id_for_C_string(addr); 1646 if (id >= 0) { 1647 return id + _c_str_base; 1648 } 1649 if (StubRoutines::contains(addr)) { 1650 // Search in stubs 1651 id = search_address(addr, _stubs_addr, _stubs_length); 1652 if (id < 0) { 1653 StubCodeDesc* desc = StubCodeDesc::desc_for(addr); 1654 if (desc == nullptr) { 1655 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset); 1656 } 1657 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>"; 1658 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name); 1659 } else { 1660 return id + _stubs_base; 1661 } 1662 } else { 1663 CodeBlob* cb = CodeCache::find_blob(addr); 1664 if (cb != nullptr) { 1665 // Search in code blobs 1666 int id_base = _shared_blobs_base; 1667 id = search_address(addr, _shared_blobs_addr, _blobs_max); 1668 if (id < 0) { 1669 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name()); 1670 } else { 1671 return id_base + id; 1672 } 1673 } else { 1674 // Search in runtime functions 1675 id = search_address(addr, _extrs_addr, _extrs_length); 1676 if (id < 0) { 1677 ResourceMark rm; 1678 const int buflen = 1024; 1679 char* func_name = NEW_RESOURCE_ARRAY(char, buflen); 1680 int offset = 0; 1681 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) { 1682 if (offset > 0) { 1683 // Could be address of C string 1684 uint dist = (uint)pointer_delta(addr, (address)os::init, 1); 1685 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table", 1686 p2i(addr), dist, (const char*)addr); 1687 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance"); 1688 return dist; 1689 } 1690 reloc.print_current_on(tty); 1691 code_blob->print_on(tty); 1692 code_blob->print_code_on(tty); 1693 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset); 1694 } else { 1695 reloc.print_current_on(tty); 1696 code_blob->print_on(tty); 1697 code_blob->print_code_on(tty); 1698 os::find(addr, tty); 1699 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr); 1700 } 1701 } else { 1702 return _extrs_base + id; 1703 } 1704 } 1705 } 1706 return id; 1707 } 1708 1709 void AOTCodeCache::print_on(outputStream* st) { 1710 AOTCodeCache* cache = open_for_use(); 1711 if (cache != nullptr) { 1712 uint count = cache->_load_header->entries_count(); 1713 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index] 1714 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count); 1715 1716 for (uint i = 0; i < count; i++) { 1717 // Use search_entries[] to order ouput 1718 int index = search_entries[2*i + 1]; 1719 AOTCodeEntry* entry = &(load_entries[index]); 1720 1721 uint entry_position = entry->offset(); 1722 uint name_offset = entry->name_offset() + entry_position; 1723 const char* saved_name = cache->addr(name_offset); 1724 1725 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u size=%u '%s'", 1726 i, index, entry->kind(), entry->id(), entry->size(), saved_name); 1727 } 1728 } else { 1729 st->print_cr("failed to map code cache"); 1730 } 1731 }