1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "classfile/javaAssertions.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "code/codeCache.hpp"
35 #include "gc/shared/gcConfig.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/memoryReserver.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/flags/flagSetting.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubInfo.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/copy.hpp"
48 #ifdef COMPILER1
49 #include "c1/c1_Runtime1.hpp"
50 #endif
51 #ifdef COMPILER2
52 #include "opto/runtime.hpp"
53 #endif
54 #if INCLUDE_G1GC
55 #include "gc/g1/g1BarrierSetRuntime.hpp"
56 #endif
57 #if INCLUDE_SHENANDOAHGC
58 #include "gc/shenandoah/shenandoahRuntime.hpp"
59 #endif
60 #if INCLUDE_ZGC
61 #include "gc/z/zBarrierSetRuntime.hpp"
62 #endif
63
64 #include <errno.h>
65 #include <sys/stat.h>
66
67 const char* aot_code_entry_kind_name[] = {
68 #define DECL_KIND_STRING(kind) XSTR(kind),
69 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
70 #undef DECL_KIND_STRING
71 };
72
73 static void report_load_failure() {
74 if (AbortVMOnAOTCodeFailure) {
75 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
76 }
77 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
78 AOTCodeCache::disable_caching();
79 }
80
81 static void report_store_failure() {
82 if (AbortVMOnAOTCodeFailure) {
83 tty->print_cr("Unable to create AOT Code Cache.");
84 vm_abort(false);
85 }
86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
87 AOTCodeCache::disable_caching();
88 }
89
90 // The sequence of AOT code caching flags and parametters settings.
91 //
92 // 1. The initial AOT code caching flags setting is done
93 // during call to CDSConfig::check_vm_args_consistency().
94 //
95 // 2. The earliest AOT code state check done in compilationPolicy_init()
96 // where we set number of compiler threads for AOT assembly phase.
97 //
98 // 3. We determine presence of AOT code in AOT Cache in
99 // AOTMetaspace::open_static_archive() which is calles
100 // after compilationPolicy_init() but before codeCache_init().
101 //
102 // 4. AOTCodeCache::initialize() is called during universe_init()
103 // and does final AOT state and flags settings.
104 //
105 // 5. Finally AOTCodeCache::init2() is called after universe_init()
106 // when all GC settings are finalized.
107
108 // Next methods determine which action we do with AOT code depending
109 // on phase of AOT process: assembly or production.
110
111 bool AOTCodeCache::is_dumping_adapter() {
112 return AOTAdapterCaching && is_on_for_dump();
113 }
114
115 bool AOTCodeCache::is_using_adapter() {
116 return AOTAdapterCaching && is_on_for_use();
117 }
118
119 bool AOTCodeCache::is_dumping_stub() {
120 return AOTStubCaching && is_on_for_dump();
121 }
122
123 bool AOTCodeCache::is_using_stub() {
124 return AOTStubCaching && is_on_for_use();
125 }
126
127 // Next methods could be called regardless AOT code cache status.
128 // Initially they are called during flags parsing and finilized
129 // in AOTCodeCache::initialize().
130 void AOTCodeCache::enable_caching() {
131 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
132 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
133 }
134
135 void AOTCodeCache::disable_caching() {
136 FLAG_SET_ERGO(AOTStubCaching, false);
137 FLAG_SET_ERGO(AOTAdapterCaching, false);
138 }
139
140 bool AOTCodeCache::is_caching_enabled() {
141 return AOTStubCaching || AOTAdapterCaching;
142 }
143
144 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
145 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
146 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
147 // becasue both id and kind are used to find an entry, and that combination should be unique
148 if (kind == AOTCodeEntry::Adapter) {
149 return id;
150 } else if (kind == AOTCodeEntry::SharedBlob) {
151 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
152 return id;
153 } else if (kind == AOTCodeEntry::C1Blob) {
154 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
155 return id;
156 } else {
157 // kind must be AOTCodeEntry::C2Blob
158 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
159 return id;
160 }
161 }
162
163 static uint _max_aot_code_size = 0;
164 uint AOTCodeCache::max_aot_code_size() {
165 return _max_aot_code_size;
166 }
167
168 // It is called from AOTMetaspace::initialize_shared_spaces()
169 // which is called from universe_init().
170 // At this point all AOT class linking seetings are finilized
171 // and AOT cache is open so we can map AOT code region.
172 void AOTCodeCache::initialize() {
173 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
174 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
175 disable_caching();
176 return;
177 #else
178 if (FLAG_IS_DEFAULT(AOTCache)) {
179 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
180 disable_caching();
181 return; // AOTCache must be specified to dump and use AOT code
182 }
183
184 // Disable stubs caching until JDK-8357398 is fixed.
185 FLAG_SET_ERGO(AOTStubCaching, false);
186
187 if (VerifyOops) {
188 // Disable AOT stubs caching when VerifyOops flag is on.
189 // Verify oops code generated a lot of C strings which overflow
190 // AOT C string table (which has fixed size).
191 // AOT C string table will be reworked later to handle such cases.
192 //
193 // Note: AOT adapters are not affected - they don't have oop operations.
194 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
195 FLAG_SET_ERGO(AOTStubCaching, false);
196 }
197
198 bool is_dumping = false;
199 bool is_using = false;
200 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
201 is_dumping = true;
202 enable_caching();
203 is_dumping = is_caching_enabled();
204 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
205 enable_caching();
206 is_using = is_caching_enabled();
207 } else {
208 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
209 disable_caching();
210 return; // nothing to do
211 }
212 if (!(is_dumping || is_using)) {
213 disable_caching();
214 return; // AOT code caching disabled on command line
215 }
216 _max_aot_code_size = AOTCodeMaxSize;
217 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
218 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
219 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
220 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
221 }
222 }
223 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
224 if (is_using && aot_code_size == 0) {
225 log_info(aot, codecache, init)("AOT Code Cache is empty");
226 disable_caching();
227 return;
228 }
229 if (!open_cache(is_dumping, is_using)) {
230 if (is_using) {
231 report_load_failure();
232 } else {
233 report_store_failure();
234 }
235 return;
236 }
237 if (is_dumping) {
238 FLAG_SET_DEFAULT(ForceUnreachable, true);
239 }
240 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
241 #endif // defined(AMD64) || defined(AARCH64)
242 }
243
244 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
245 AOTCodeCache* AOTCodeCache::_cache = nullptr;
246 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
247
248 // It is called after universe_init() when all GC settings are finalized.
249 void AOTCodeCache::init2() {
250 DEBUG_ONLY( _passed_init2 = true; )
251 if (opened_cache == nullptr) {
252 return;
253 }
254 if (!opened_cache->verify_config()) {
255 delete opened_cache;
256 opened_cache = nullptr;
257 report_load_failure();
258 return;
259 }
260
261 // initialize the table of external routines so we can save
262 // generated code blobs that reference them
263 AOTCodeAddressTable* table = opened_cache->_table;
264 assert(table != nullptr, "should be initialized already");
265 table->init_extrs();
266
267 // Now cache and address table are ready for AOT code generation
268 _cache = opened_cache;
269 }
270
271 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
272 opened_cache = new AOTCodeCache(is_dumping, is_using);
273 if (opened_cache->failed()) {
274 delete opened_cache;
275 opened_cache = nullptr;
276 return false;
277 }
278 return true;
279 }
280
281 void AOTCodeCache::close() {
282 if (is_on()) {
283 delete _cache; // Free memory
284 _cache = nullptr;
285 opened_cache = nullptr;
286 }
287 }
288
289 #define DATA_ALIGNMENT HeapWordSize
290
291 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
292 _load_header(nullptr),
293 _load_buffer(nullptr),
294 _store_buffer(nullptr),
295 _C_store_buffer(nullptr),
296 _write_position(0),
297 _load_size(0),
298 _store_size(0),
299 _for_use(is_using),
300 _for_dump(is_dumping),
301 _closing(false),
302 _failed(false),
303 _lookup_failed(false),
304 _table(nullptr),
305 _load_entries(nullptr),
306 _search_entries(nullptr),
307 _store_entries(nullptr),
308 _C_strings_buf(nullptr),
309 _store_entries_cnt(0)
310 {
311 // Read header at the begining of cache
312 if (_for_use) {
313 // Read cache
314 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
315 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
316 if (!rs.is_reserved()) {
317 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
318 set_failed();
319 return;
320 }
321 if (!AOTCacheAccess::map_aot_code_region(rs)) {
322 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
323 set_failed();
324 return;
325 }
326
327 _load_size = (uint)load_size;
328 _load_buffer = (char*)rs.base();
329 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
330 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
331
332 _load_header = (Header*)addr(0);
333 if (!_load_header->verify(_load_size)) {
334 set_failed();
335 return;
336 }
337 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
338 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
339 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
340 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
341 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
342 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
343
344 // Read strings
345 load_strings();
346 }
347 if (_for_dump) {
348 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
349 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
350 // Entries allocated at the end of buffer in reverse (as on stack).
351 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
352 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
353 }
354 _table = new AOTCodeAddressTable();
355 }
356
357 void AOTCodeCache::init_early_stubs_table() {
358 AOTCodeAddressTable* table = addr_table();
359 if (table != nullptr) {
360 table->init_early_stubs();
361 }
362 }
363
364 void AOTCodeCache::init_shared_blobs_table() {
365 AOTCodeAddressTable* table = addr_table();
366 if (table != nullptr) {
367 table->init_shared_blobs();
368 }
369 }
370
371 void AOTCodeCache::init_early_c1_table() {
372 AOTCodeAddressTable* table = addr_table();
373 if (table != nullptr) {
374 table->init_early_c1();
375 }
376 }
377
378 AOTCodeCache::~AOTCodeCache() {
379 if (_closing) {
380 return; // Already closed
381 }
382 // Stop any further access to cache.
383 _closing = true;
384
385 MutexLocker ml(Compile_lock);
386 if (for_dump()) { // Finalize cache
387 finish_write();
388 }
389 _load_buffer = nullptr;
390 if (_C_store_buffer != nullptr) {
391 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
392 _C_store_buffer = nullptr;
393 _store_buffer = nullptr;
394 }
395 if (_table != nullptr) {
396 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
397 delete _table;
398 _table = nullptr;
399 }
400 }
401
402 void AOTCodeCache::Config::record(uint cpu_features_offset) {
403 _flags = 0;
404 #ifdef ASSERT
405 _flags |= debugVM;
406 #endif
407 if (UseCompressedOops) {
408 _flags |= compressedOops;
409 }
410 if (UseCompressedClassPointers) {
411 _flags |= compressedClassPointers;
412 }
413 if (UseTLAB) {
414 _flags |= useTLAB;
415 }
416 if (JavaAssertions::systemClassDefault()) {
417 _flags |= systemClassAssertions;
418 }
419 if (JavaAssertions::userClassDefault()) {
420 _flags |= userClassAssertions;
421 }
422 if (EnableContended) {
423 _flags |= enableContendedPadding;
424 }
425 if (RestrictContended) {
426 _flags |= restrictContendedPadding;
427 }
428 _compressedOopShift = CompressedOops::shift();
429 _compressedOopBase = CompressedOops::base();
430 _compressedKlassShift = CompressedKlassPointers::shift();
431 _contendedPaddingWidth = ContendedPaddingWidth;
432 _gc = (uint)Universe::heap()->kind();
433 _cpu_features_offset = cpu_features_offset;
434 }
435
436 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
437 LogStreamHandle(Debug, aot, codecache, init) log;
438 uint offset = _cpu_features_offset;
439 uint cpu_features_size = *(uint *)cache->addr(offset);
440 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
441 offset += sizeof(uint);
442
443 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
444 if (log.is_enabled()) {
445 ResourceMark rm; // required for stringStream::as_string()
446 stringStream ss;
447 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
448 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
449 }
450
451 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
452 if (log.is_enabled()) {
453 ResourceMark rm; // required for stringStream::as_string()
454 stringStream ss;
455 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
456 VM_Version::store_cpu_features(runtime_cpu_features);
457 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
458 if (!ss.is_empty()) {
459 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
460 }
461 }
462 } else {
463 if (log.is_enabled()) {
464 ResourceMark rm; // required for stringStream::as_string()
465 stringStream ss;
466 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
467 VM_Version::store_cpu_features(runtime_cpu_features);
468 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
469 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
470 }
471 return false;
472 }
473 return true;
474 }
475
476 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
477 // First checks affect all cached AOT code
478 #ifdef ASSERT
479 if ((_flags & debugVM) == 0) {
480 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
481 return false;
482 }
483 #else
484 if ((_flags & debugVM) != 0) {
485 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
486 return false;
487 }
488 #endif
489
490 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
491 if (aot_gc != Universe::heap()->kind()) {
492 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
493 return false;
494 }
495
496 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
497 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
498 return false;
499 }
500 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
501 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
502 return false;
503 }
504
505 // The following checks do not affect AOT adapters caching
506
507 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
508 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
509 AOTStubCaching = false;
510 }
511 if (_compressedOopShift != (uint)CompressedOops::shift()) {
512 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
513 AOTStubCaching = false;
514 }
515
516 // This should be the last check as it only disables AOTStubCaching
517 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
518 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
519 AOTStubCaching = false;
520 }
521
522 if (!verify_cpu_features(cache)) {
523 return false;
524 }
525 return true;
526 }
527
528 bool AOTCodeCache::Header::verify(uint load_size) const {
529 if (_version != AOT_CODE_VERSION) {
530 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
531 return false;
532 }
533 if (load_size < _cache_size) {
534 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
535 return false;
536 }
537 return true;
538 }
539
540 AOTCodeCache* AOTCodeCache::open_for_use() {
541 if (AOTCodeCache::is_on_for_use()) {
542 return AOTCodeCache::cache();
543 }
544 return nullptr;
545 }
546
547 AOTCodeCache* AOTCodeCache::open_for_dump() {
548 if (AOTCodeCache::is_on_for_dump()) {
549 AOTCodeCache* cache = AOTCodeCache::cache();
550 cache->clear_lookup_failed(); // Reset bit
551 return cache;
552 }
553 return nullptr;
554 }
555
556 void copy_bytes(const char* from, address to, uint size) {
557 assert((int)size > 0, "sanity");
558 memcpy(to, from, size);
559 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
560 }
561
562 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
563 _cache = cache;
564 _entry = entry;
565 _load_buffer = cache->cache_buffer();
566 _read_position = 0;
567 _lookup_failed = false;
568 }
569
570 void AOTCodeReader::set_read_position(uint pos) {
571 if (pos == _read_position) {
572 return;
573 }
574 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
575 _read_position = pos;
576 }
577
578 bool AOTCodeCache::set_write_position(uint pos) {
579 if (pos == _write_position) {
580 return true;
581 }
582 if (_store_size < _write_position) {
583 _store_size = _write_position; // Adjust during write
584 }
585 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
586 _write_position = pos;
587 return true;
588 }
589
590 static char align_buffer[256] = { 0 };
591
592 bool AOTCodeCache::align_write() {
593 // We are not executing code from cache - we copy it by bytes first.
594 // No need for big alignment (or at all).
595 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
596 if (padding == DATA_ALIGNMENT) {
597 return true;
598 }
599 uint n = write_bytes((const void*)&align_buffer, padding);
600 if (n != padding) {
601 return false;
602 }
603 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
604 return true;
605 }
606
607 // Check to see if AOT code cache has required space to store "nbytes" of data
608 address AOTCodeCache::reserve_bytes(uint nbytes) {
609 assert(for_dump(), "Code Cache file is not created");
610 uint new_position = _write_position + nbytes;
611 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
612 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
613 nbytes, _write_position);
614 set_failed();
615 report_store_failure();
616 return nullptr;
617 }
618 address buffer = (address)(_store_buffer + _write_position);
619 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
620 _write_position += nbytes;
621 if (_store_size < _write_position) {
622 _store_size = _write_position;
623 }
624 return buffer;
625 }
626
627 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
628 assert(for_dump(), "Code Cache file is not created");
629 if (nbytes == 0) {
630 return 0;
631 }
632 uint new_position = _write_position + nbytes;
633 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
634 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
635 nbytes, _write_position);
636 set_failed();
637 report_store_failure();
638 return 0;
639 }
640 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
641 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
642 _write_position += nbytes;
643 if (_store_size < _write_position) {
644 _store_size = _write_position;
645 }
646 return nbytes;
647 }
648
649 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
650 return (void*)(cache->add_entry());
651 }
652
653 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
654 if (entry->kind() == kind) {
655 assert(entry->id() == id, "sanity");
656 return true; // Found
657 }
658 return false;
659 }
660
661 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
662 assert(_for_use, "sanity");
663 uint count = _load_header->entries_count();
664 if (_load_entries == nullptr) {
665 // Read it
666 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
667 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
668 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
669 }
670 // Binary search
671 int l = 0;
672 int h = count - 1;
673 while (l <= h) {
674 int mid = (l + h) >> 1;
675 int ix = mid * 2;
676 uint is = _search_entries[ix];
677 if (is == id) {
678 int index = _search_entries[ix + 1];
679 AOTCodeEntry* entry = &(_load_entries[index]);
680 if (check_entry(kind, id, entry)) {
681 return entry; // Found
682 }
683 // Linear search around to handle id collission
684 for (int i = mid - 1; i >= l; i--) { // search back
685 ix = i * 2;
686 is = _search_entries[ix];
687 if (is != id) {
688 break;
689 }
690 index = _search_entries[ix + 1];
691 AOTCodeEntry* entry = &(_load_entries[index]);
692 if (check_entry(kind, id, entry)) {
693 return entry; // Found
694 }
695 }
696 for (int i = mid + 1; i <= h; i++) { // search forward
697 ix = i * 2;
698 is = _search_entries[ix];
699 if (is != id) {
700 break;
701 }
702 index = _search_entries[ix + 1];
703 AOTCodeEntry* entry = &(_load_entries[index]);
704 if (check_entry(kind, id, entry)) {
705 return entry; // Found
706 }
707 }
708 break; // Not found match
709 } else if (is < id) {
710 l = mid + 1;
711 } else {
712 h = mid - 1;
713 }
714 }
715 return nullptr;
716 }
717
718 extern "C" {
719 static int uint_cmp(const void *i, const void *j) {
720 uint a = *(uint *)i;
721 uint b = *(uint *)j;
722 return a > b ? 1 : a < b ? -1 : 0;
723 }
724 }
725
726 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
727 uint* size_ptr = (uint *)buffer;
728 *size_ptr = buffer_size;
729 buffer += sizeof(uint);
730
731 VM_Version::store_cpu_features(buffer);
732 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
733 buffer += buffer_size;
734 buffer = align_up(buffer, DATA_ALIGNMENT);
735 }
736
737 bool AOTCodeCache::finish_write() {
738 if (!align_write()) {
739 return false;
740 }
741 uint strings_offset = _write_position;
742 int strings_count = store_strings();
743 if (strings_count < 0) {
744 return false;
745 }
746 if (!align_write()) {
747 return false;
748 }
749 uint strings_size = _write_position - strings_offset;
750
751 uint entries_count = 0; // Number of entrant (useful) code entries
752 uint entries_offset = _write_position;
753
754 uint store_count = _store_entries_cnt;
755 if (store_count > 0) {
756 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
757 uint code_count = store_count;
758 uint search_count = code_count * 2;
759 uint search_size = search_count * sizeof(uint);
760 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
761 // _write_position includes size of code and strings
762 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
763 uint cpu_features_size = VM_Version::cpu_features_size();
764 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
765 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
766 align_up(total_cpu_features_size, DATA_ALIGNMENT);
767 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
768
769 // Allocate in AOT Cache buffer
770 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
771 char* start = align_up(buffer, DATA_ALIGNMENT);
772 char* current = start + header_size; // Skip header
773
774 uint cpu_features_offset = current - start;
775 store_cpu_features(current, cpu_features_size);
776 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
777 assert(current < start + total_size, "sanity check");
778
779 // Create ordered search table for entries [id, index];
780 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
781
782 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
783 uint adapters_count = 0;
784 uint shared_blobs_count = 0;
785 uint C1_blobs_count = 0;
786 uint C2_blobs_count = 0;
787 uint max_size = 0;
788 // AOTCodeEntry entries were allocated in reverse in store buffer.
789 // Process them in reverse order to cache first code first.
790 for (int i = store_count - 1; i >= 0; i--) {
791 entries_address[i].set_next(nullptr); // clear pointers before storing data
792 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
793 if (size > max_size) {
794 max_size = size;
795 }
796 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
797 entries_address[i].set_offset(current - start); // New offset
798 current += size;
799 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
800 if (n != sizeof(AOTCodeEntry)) {
801 FREE_C_HEAP_ARRAY(uint, search);
802 return false;
803 }
804 search[entries_count*2 + 0] = entries_address[i].id();
805 search[entries_count*2 + 1] = entries_count;
806 entries_count++;
807 AOTCodeEntry::Kind kind = entries_address[i].kind();
808 if (kind == AOTCodeEntry::Adapter) {
809 adapters_count++;
810 } else if (kind == AOTCodeEntry::SharedBlob) {
811 shared_blobs_count++;
812 } else if (kind == AOTCodeEntry::C1Blob) {
813 C1_blobs_count++;
814 } else if (kind == AOTCodeEntry::C2Blob) {
815 C2_blobs_count++;
816 }
817 }
818 if (entries_count == 0) {
819 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
820 FREE_C_HEAP_ARRAY(uint, search);
821 return true; // Nothing to write
822 }
823 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
824 // Write strings
825 if (strings_count > 0) {
826 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
827 strings_offset = (current - start); // New offset
828 current += strings_size;
829 }
830
831 uint new_entries_offset = (current - start); // New offset
832 // Sort and store search table
833 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
834 search_size = 2 * entries_count * sizeof(uint);
835 copy_bytes((const char*)search, (address)current, search_size);
836 FREE_C_HEAP_ARRAY(uint, search);
837 current += search_size;
838
839 // Write entries
840 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
841 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
842 current += entries_size;
843 uint size = (current - start);
844 assert(size <= total_size, "%d > %d", size , total_size);
845
846 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
847 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
848 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
849 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
850 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
851
852 // Finalize header
853 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
854 header->init(size, (uint)strings_count, strings_offset,
855 entries_count, new_entries_offset,
856 adapters_count, shared_blobs_count,
857 C1_blobs_count, C2_blobs_count, cpu_features_offset);
858
859 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
860 }
861 return true;
862 }
863
864 //------------------Store/Load AOT code ----------------------
865
866 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
867 AOTCodeCache* cache = open_for_dump();
868 if (cache == nullptr) {
869 return false;
870 }
871 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
872
873 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
874 return false;
875 }
876 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
877 return false;
878 }
879 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
880
881 #ifdef ASSERT
882 LogStreamHandle(Trace, aot, codecache, stubs) log;
883 if (log.is_enabled()) {
884 FlagSetting fs(PrintRelocations, true);
885 blob.print_on(&log);
886 }
887 #endif
888 // we need to take a lock to prevent race between compiler threads generating AOT code
889 // and the main thread generating adapter
890 MutexLocker ml(Compile_lock);
891 if (!is_on()) {
892 return false; // AOT code cache was already dumped and closed.
893 }
894 if (!cache->align_write()) {
895 return false;
896 }
897 uint entry_position = cache->_write_position;
898
899 // Write name
900 uint name_offset = cache->_write_position - entry_position;
901 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
902 uint n = cache->write_bytes(name, name_size);
903 if (n != name_size) {
904 return false;
905 }
906
907 // Write CodeBlob
908 if (!cache->align_write()) {
909 return false;
910 }
911 uint blob_offset = cache->_write_position - entry_position;
912 address archive_buffer = cache->reserve_bytes(blob.size());
913 if (archive_buffer == nullptr) {
914 return false;
915 }
916 CodeBlob::archive_blob(&blob, archive_buffer);
917
918 uint reloc_data_size = blob.relocation_size();
919 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
920 if (n != reloc_data_size) {
921 return false;
922 }
923
924 bool has_oop_maps = false;
925 if (blob.oop_maps() != nullptr) {
926 if (!cache->write_oop_map_set(blob)) {
927 return false;
928 }
929 has_oop_maps = true;
930 }
931
932 #ifndef PRODUCT
933 // Write asm remarks
934 if (!cache->write_asm_remarks(blob)) {
935 return false;
936 }
937 if (!cache->write_dbg_strings(blob)) {
938 return false;
939 }
940 #endif /* PRODUCT */
941
942 if (!cache->write_relocations(blob)) {
943 if (!cache->failed()) {
944 // We may miss an address in AOT table - skip this code blob.
945 cache->set_write_position(entry_position);
946 }
947 return false;
948 }
949
950 uint entry_size = cache->_write_position - entry_position;
951 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
952 entry_position, entry_size, name_offset, name_size,
953 blob_offset, has_oop_maps, blob.content_begin());
954 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
955 return true;
956 }
957
958 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
959 assert(AOTCodeEntry::is_blob(entry_kind),
960 "wrong entry kind for blob id %s", StubInfo::name(id));
961 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
962 }
963
964 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
965 AOTCodeCache* cache = open_for_use();
966 if (cache == nullptr) {
967 return nullptr;
968 }
969 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
970
971 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
972 return nullptr;
973 }
974 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
975 return nullptr;
976 }
977 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
978
979 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
980 if (entry == nullptr) {
981 return nullptr;
982 }
983 AOTCodeReader reader(cache, entry);
984 CodeBlob* blob = reader.compile_code_blob(name);
985
986 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
987 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
988 return blob;
989 }
990
991 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
992 assert(AOTCodeEntry::is_blob(entry_kind),
993 "wrong entry kind for blob id %s", StubInfo::name(id));
994 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
995 }
996
997 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
998 uint entry_position = _entry->offset();
999
1000 // Read name
1001 uint name_offset = entry_position + _entry->name_offset();
1002 uint name_size = _entry->name_size(); // Includes '/0'
1003 const char* stored_name = addr(name_offset);
1004
1005 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1006 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1007 stored_name, name);
1008 set_lookup_failed(); // Skip this blob
1009 return nullptr;
1010 }
1011
1012 // Read archived code blob
1013 uint offset = entry_position + _entry->blob_offset();
1014 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1015 offset += archived_blob->size();
1016
1017 address reloc_data = (address)addr(offset);
1018 offset += archived_blob->relocation_size();
1019 set_read_position(offset);
1020
1021 ImmutableOopMapSet* oop_maps = nullptr;
1022 if (_entry->has_oop_maps()) {
1023 oop_maps = read_oop_map_set();
1024 }
1025
1026 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1027 stored_name,
1028 reloc_data,
1029 oop_maps
1030 );
1031 if (code_blob == nullptr) { // no space left in CodeCache
1032 return nullptr;
1033 }
1034
1035 #ifndef PRODUCT
1036 code_blob->asm_remarks().init();
1037 read_asm_remarks(code_blob->asm_remarks());
1038 code_blob->dbg_strings().init();
1039 read_dbg_strings(code_blob->dbg_strings());
1040 #endif // PRODUCT
1041
1042 fix_relocations(code_blob);
1043
1044 #ifdef ASSERT
1045 LogStreamHandle(Trace, aot, codecache, stubs) log;
1046 if (log.is_enabled()) {
1047 FlagSetting fs(PrintRelocations, true);
1048 code_blob->print_on(&log);
1049 }
1050 #endif
1051 return code_blob;
1052 }
1053
1054 // ------------ process code and data --------------
1055
1056 // Can't use -1. It is valid value for jump to iteself destination
1057 // used by static call stub: see NativeJump::jump_destination().
1058 #define BAD_ADDRESS_ID -2
1059
1060 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
1061 GrowableArray<uint> reloc_data;
1062 RelocIterator iter(&code_blob);
1063 LogStreamHandle(Trace, aot, codecache, reloc) log;
1064 while (iter.next()) {
1065 int idx = reloc_data.append(0); // default value
1066 switch (iter.type()) {
1067 case relocInfo::none:
1068 break;
1069 case relocInfo::runtime_call_type: {
1070 // Record offset of runtime destination
1071 CallRelocation* r = (CallRelocation*)iter.reloc();
1072 address dest = r->destination();
1073 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1074 dest = (address)-1; // do nothing in this case when loading this relocation
1075 }
1076 int id = _table->id_for_address(dest, iter, &code_blob);
1077 if (id == BAD_ADDRESS_ID) {
1078 return false;
1079 }
1080 reloc_data.at_put(idx, id);
1081 break;
1082 }
1083 case relocInfo::runtime_call_w_cp_type:
1084 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1085 return false;
1086 case relocInfo::external_word_type: {
1087 // Record offset of runtime target
1088 address target = ((external_word_Relocation*)iter.reloc())->target();
1089 int id = _table->id_for_address(target, iter, &code_blob);
1090 if (id == BAD_ADDRESS_ID) {
1091 return false;
1092 }
1093 reloc_data.at_put(idx, id);
1094 break;
1095 }
1096 case relocInfo::internal_word_type:
1097 break;
1098 case relocInfo::section_word_type:
1099 break;
1100 case relocInfo::post_call_nop_type:
1101 break;
1102 default:
1103 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1104 return false;
1105 break;
1106 }
1107 if (log.is_enabled()) {
1108 iter.print_current_on(&log);
1109 }
1110 }
1111
1112 // Write additional relocation data: uint per relocation
1113 // Write the count first
1114 int count = reloc_data.length();
1115 write_bytes(&count, sizeof(int));
1116 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1117 iter != reloc_data.end(); ++iter) {
1118 uint value = *iter;
1119 int n = write_bytes(&value, sizeof(uint));
1120 if (n != sizeof(uint)) {
1121 return false;
1122 }
1123 }
1124 return true;
1125 }
1126
1127 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1128 LogStreamHandle(Trace, aot, reloc) log;
1129 uint offset = read_position();
1130 int count = *(int*)addr(offset);
1131 offset += sizeof(int);
1132 if (log.is_enabled()) {
1133 log.print_cr("======== extra relocations count=%d", count);
1134 }
1135 uint* reloc_data = (uint*)addr(offset);
1136 offset += (count * sizeof(uint));
1137 set_read_position(offset);
1138
1139 RelocIterator iter(code_blob);
1140 int j = 0;
1141 while (iter.next()) {
1142 switch (iter.type()) {
1143 case relocInfo::none:
1144 break;
1145 case relocInfo::runtime_call_type: {
1146 address dest = _cache->address_for_id(reloc_data[j]);
1147 if (dest != (address)-1) {
1148 ((CallRelocation*)iter.reloc())->set_destination(dest);
1149 }
1150 break;
1151 }
1152 case relocInfo::runtime_call_w_cp_type:
1153 // this relocation should not be in cache (see write_relocations)
1154 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1155 break;
1156 case relocInfo::external_word_type: {
1157 address target = _cache->address_for_id(reloc_data[j]);
1158 // Add external address to global table
1159 int index = ExternalsRecorder::find_index(target);
1160 // Update index in relocation
1161 Relocation::add_jint(iter.data(), index);
1162 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1163 assert(reloc->target() == target, "sanity");
1164 reloc->set_value(target); // Patch address in the code
1165 break;
1166 }
1167 case relocInfo::internal_word_type: {
1168 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1169 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1170 break;
1171 }
1172 case relocInfo::section_word_type: {
1173 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1174 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1175 break;
1176 }
1177 case relocInfo::post_call_nop_type:
1178 break;
1179 default:
1180 assert(false,"relocation %d unimplemented", (int)iter.type());
1181 break;
1182 }
1183 if (log.is_enabled()) {
1184 iter.print_current_on(&log);
1185 }
1186 j++;
1187 }
1188 assert(j == count, "sanity");
1189 }
1190
1191 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1192 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1193 int oopmaps_size = oopmaps->nr_of_bytes();
1194 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1195 return false;
1196 }
1197 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1198 if (n != (uint)oopmaps->nr_of_bytes()) {
1199 return false;
1200 }
1201 return true;
1202 }
1203
1204 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1205 uint offset = read_position();
1206 int size = *(int *)addr(offset);
1207 offset += sizeof(int);
1208 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1209 offset += size;
1210 set_read_position(offset);
1211 return oopmaps;
1212 }
1213
1214 #ifndef PRODUCT
1215 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1216 // Write asm remarks
1217 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1218 if (count_ptr == nullptr) {
1219 return false;
1220 }
1221 uint count = 0;
1222 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1223 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1224 uint n = write_bytes(&offset, sizeof(uint));
1225 if (n != sizeof(uint)) {
1226 return false;
1227 }
1228 const char* cstr = add_C_string(str);
1229 int id = _table->id_for_C_string((address)cstr);
1230 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1231 n = write_bytes(&id, sizeof(int));
1232 if (n != sizeof(int)) {
1233 return false;
1234 }
1235 count += 1;
1236 return true;
1237 });
1238 *count_ptr = count;
1239 return result;
1240 }
1241
1242 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1243 // Read asm remarks
1244 uint offset = read_position();
1245 uint count = *(uint *)addr(offset);
1246 offset += sizeof(uint);
1247 for (uint i = 0; i < count; i++) {
1248 uint remark_offset = *(uint *)addr(offset);
1249 offset += sizeof(uint);
1250 int remark_string_id = *(uint *)addr(offset);
1251 offset += sizeof(int);
1252 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1253 asm_remarks.insert(remark_offset, remark);
1254 }
1255 set_read_position(offset);
1256 }
1257
1258 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1259 // Write dbg strings
1260 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1261 if (count_ptr == nullptr) {
1262 return false;
1263 }
1264 uint count = 0;
1265 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1266 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1267 const char* cstr = add_C_string(str);
1268 int id = _table->id_for_C_string((address)cstr);
1269 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1270 uint n = write_bytes(&id, sizeof(int));
1271 if (n != sizeof(int)) {
1272 return false;
1273 }
1274 count += 1;
1275 return true;
1276 });
1277 *count_ptr = count;
1278 return result;
1279 }
1280
1281 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1282 // Read dbg strings
1283 uint offset = read_position();
1284 uint count = *(uint *)addr(offset);
1285 offset += sizeof(uint);
1286 for (uint i = 0; i < count; i++) {
1287 int string_id = *(uint *)addr(offset);
1288 offset += sizeof(int);
1289 const char* str = (const char*)_cache->address_for_C_string(string_id);
1290 dbg_strings.insert(str);
1291 }
1292 set_read_position(offset);
1293 }
1294 #endif // PRODUCT
1295
1296 //======================= AOTCodeAddressTable ===============
1297
1298 // address table ids for generated routines, external addresses and C
1299 // string addresses are partitioned into positive integer ranges
1300 // defined by the following positive base and max values
1301 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1302 // [_blobs_base, _blobs_base + _blobs_max -1],
1303 // ...
1304 // [_c_str_base, _c_str_base + _c_str_max -1],
1305
1306 #define _extrs_max 100
1307 #define _stubs_max 3
1308
1309 #define _shared_blobs_max 20
1310 #define _C1_blobs_max 10
1311 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1312 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1313
1314 #define _extrs_base 0
1315 #define _stubs_base (_extrs_base + _extrs_max)
1316 #define _shared_blobs_base (_stubs_base + _stubs_max)
1317 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1318 #define _blobs_end (_shared_blobs_base + _blobs_max)
1319
1320 #define SET_ADDRESS(type, addr) \
1321 { \
1322 type##_addr[type##_length++] = (address) (addr); \
1323 assert(type##_length <= type##_max, "increase size"); \
1324 }
1325
1326 static bool initializing_extrs = false;
1327
1328 void AOTCodeAddressTable::init_extrs() {
1329 if (_extrs_complete || initializing_extrs) return; // Done already
1330
1331 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1332
1333 initializing_extrs = true;
1334 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1335
1336 _extrs_length = 0;
1337
1338 // Record addresses of VM runtime methods
1339 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1340 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1341 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1342 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1343 #if defined(AARCH64) && !defined(ZERO)
1344 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1345 #endif
1346 {
1347 // Required by Shared blobs
1348 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1349 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1350 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1351 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1352 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1353 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1354 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1355 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1356 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1357 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1358 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1359 }
1360
1361 #ifdef COMPILER1
1362 {
1363 // Required by C1 blobs
1364 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1365 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1366 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1367 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1368 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1369 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1370 SET_ADDRESS(_extrs, Runtime1::new_instance);
1371 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1372 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1373 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1374 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1375 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1376 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1377 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1378 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1379 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1380 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1381 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1382 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1383 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1384 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1385 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1386 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1387 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1388 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1389 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1390 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1391 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1392 SET_ADDRESS(_extrs, Thread::current);
1393 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1394 #ifndef PRODUCT
1395 SET_ADDRESS(_extrs, os::breakpoint);
1396 #endif
1397 }
1398 #endif
1399
1400 #ifdef COMPILER2
1401 {
1402 // Required by C2 blobs
1403 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1404 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1405 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1406 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1407 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1408 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1409 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1410 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1411 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1412 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1413 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1414 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1415 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1416 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1417 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1418 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1419 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1420 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1421 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1422 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1423 #if defined(AARCH64)
1424 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1425 #endif // AARCH64
1426 }
1427 #endif // COMPILER2
1428
1429 #if INCLUDE_G1GC
1430 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1431 #endif
1432 #if INCLUDE_SHENANDOAHGC
1433 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1434 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1435 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1436 #endif
1437 #if INCLUDE_ZGC
1438 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1439 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1440 #if defined(AMD64)
1441 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1442 #endif
1443 #endif
1444 #ifndef ZERO
1445 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1446 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1447 #endif
1448 #endif // ZERO
1449
1450 _extrs_complete = true;
1451 log_debug(aot, codecache, init)("External addresses recorded");
1452 }
1453
1454 static bool initializing_early_stubs = false;
1455
1456 void AOTCodeAddressTable::init_early_stubs() {
1457 if (_complete || initializing_early_stubs) return; // Done already
1458 initializing_early_stubs = true;
1459 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1460 _stubs_length = 0;
1461 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1462
1463 {
1464 // Required by C1 blobs
1465 #if defined(AMD64) && !defined(ZERO)
1466 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1467 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1468 #endif // AMD64
1469 }
1470
1471 _early_stubs_complete = true;
1472 log_info(aot, codecache, init)("Early stubs recorded");
1473 }
1474
1475 static bool initializing_shared_blobs = false;
1476
1477 void AOTCodeAddressTable::init_shared_blobs() {
1478 if (_complete || initializing_shared_blobs) return; // Done already
1479 initializing_shared_blobs = true;
1480 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1481
1482 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1483 _shared_blobs_addr = blobs_addr;
1484 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1485
1486 _shared_blobs_length = 0;
1487 _C1_blobs_length = 0;
1488
1489 // clear the address table
1490 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1491
1492 // Record addresses of generated code blobs
1493 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1494 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1495 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1496 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1497 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1498 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1499 #if INCLUDE_JVMCI
1500 if (EnableJVMCI) {
1501 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1502 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1503 }
1504 #endif
1505
1506 _shared_blobs_complete = true;
1507 log_debug(aot, codecache, init)("Early shared blobs recorded");
1508 _complete = true;
1509 }
1510
1511 void AOTCodeAddressTable::init_early_c1() {
1512 #ifdef COMPILER1
1513 // Runtime1 Blobs
1514 StubId id = StubInfo::stub_base(StubGroup::C1);
1515 // include forward_exception in range we publish
1516 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1517 for (; id != limit; id = StubInfo::next(id)) {
1518 if (Runtime1::blob_for(id) == nullptr) {
1519 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1520 continue;
1521 }
1522 if (Runtime1::entry_for(id) == nullptr) {
1523 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1524 continue;
1525 }
1526 address entry = Runtime1::entry_for(id);
1527 SET_ADDRESS(_C1_blobs, entry);
1528 }
1529 #endif // COMPILER1
1530 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1531 _early_c1_complete = true;
1532 }
1533
1534 #undef SET_ADDRESS
1535
1536 AOTCodeAddressTable::~AOTCodeAddressTable() {
1537 if (_extrs_addr != nullptr) {
1538 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1539 }
1540 if (_stubs_addr != nullptr) {
1541 FREE_C_HEAP_ARRAY(address, _stubs_addr);
1542 }
1543 if (_shared_blobs_addr != nullptr) {
1544 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1545 }
1546 }
1547
1548 #ifdef PRODUCT
1549 #define MAX_STR_COUNT 200
1550 #else
1551 #define MAX_STR_COUNT 500
1552 #endif
1553 #define _c_str_max MAX_STR_COUNT
1554 static const int _c_str_base = _all_max;
1555
1556 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1557 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1558 static int _C_strings_count = 0;
1559 static int _C_strings_s[MAX_STR_COUNT] = {0};
1560 static int _C_strings_id[MAX_STR_COUNT] = {0};
1561 static int _C_strings_used = 0;
1562
1563 void AOTCodeCache::load_strings() {
1564 uint strings_count = _load_header->strings_count();
1565 if (strings_count == 0) {
1566 return;
1567 }
1568 uint strings_offset = _load_header->strings_offset();
1569 uint* string_lengths = (uint*)addr(strings_offset);
1570 strings_offset += (strings_count * sizeof(uint));
1571 uint strings_size = _load_header->entries_offset() - strings_offset;
1572 // We have to keep cached strings longer than _cache buffer
1573 // because they are refernced from compiled code which may
1574 // still be executed on VM exit after _cache is freed.
1575 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1576 memcpy(p, addr(strings_offset), strings_size);
1577 _C_strings_buf = p;
1578 assert(strings_count <= MAX_STR_COUNT, "sanity");
1579 for (uint i = 0; i < strings_count; i++) {
1580 _C_strings[i] = p;
1581 uint len = string_lengths[i];
1582 _C_strings_s[i] = i;
1583 _C_strings_id[i] = i;
1584 p += len;
1585 }
1586 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1587 _C_strings_count = strings_count;
1588 _C_strings_used = strings_count;
1589 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1590 }
1591
1592 int AOTCodeCache::store_strings() {
1593 if (_C_strings_used > 0) {
1594 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1595 uint offset = _write_position;
1596 uint length = 0;
1597 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1598 if (lengths == nullptr) {
1599 return -1;
1600 }
1601 for (int i = 0; i < _C_strings_used; i++) {
1602 const char* str = _C_strings[_C_strings_s[i]];
1603 uint len = (uint)strlen(str) + 1;
1604 length += len;
1605 assert(len < 1000, "big string: %s", str);
1606 lengths[i] = len;
1607 uint n = write_bytes(str, len);
1608 if (n != len) {
1609 return -1;
1610 }
1611 }
1612 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1613 _C_strings_used, length, offset);
1614 }
1615 return _C_strings_used;
1616 }
1617
1618 const char* AOTCodeCache::add_C_string(const char* str) {
1619 if (is_on_for_dump() && str != nullptr) {
1620 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1621 AOTCodeAddressTable* table = addr_table();
1622 if (table != nullptr) {
1623 return table->add_C_string(str);
1624 }
1625 }
1626 return str;
1627 }
1628
1629 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1630 if (_extrs_complete) {
1631 // Check previous strings address
1632 for (int i = 0; i < _C_strings_count; i++) {
1633 if (_C_strings_in[i] == str) {
1634 return _C_strings[i]; // Found previous one - return our duplicate
1635 } else if (strcmp(_C_strings[i], str) == 0) {
1636 return _C_strings[i];
1637 }
1638 }
1639 // Add new one
1640 if (_C_strings_count < MAX_STR_COUNT) {
1641 // Passed in string can be freed and used space become inaccessible.
1642 // Keep original address but duplicate string for future compare.
1643 _C_strings_id[_C_strings_count] = -1; // Init
1644 _C_strings_in[_C_strings_count] = str;
1645 const char* dup = os::strdup(str);
1646 _C_strings[_C_strings_count++] = dup;
1647 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1648 return dup;
1649 } else {
1650 assert(false, "Number of C strings >= MAX_STR_COUNT");
1651 }
1652 }
1653 return str;
1654 }
1655
1656 int AOTCodeAddressTable::id_for_C_string(address str) {
1657 if (str == nullptr) {
1658 return -1;
1659 }
1660 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1661 for (int i = 0; i < _C_strings_count; i++) {
1662 if (_C_strings[i] == (const char*)str) { // found
1663 int id = _C_strings_id[i];
1664 if (id >= 0) {
1665 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1666 return id; // Found recorded
1667 }
1668 // Not found in recorded, add new
1669 id = _C_strings_used++;
1670 _C_strings_s[id] = i;
1671 _C_strings_id[i] = id;
1672 return id;
1673 }
1674 }
1675 return -1;
1676 }
1677
1678 address AOTCodeAddressTable::address_for_C_string(int idx) {
1679 assert(idx < _C_strings_count, "sanity");
1680 return (address)_C_strings[idx];
1681 }
1682
1683 static int search_address(address addr, address* table, uint length) {
1684 for (int i = 0; i < (int)length; i++) {
1685 if (table[i] == addr) {
1686 return i;
1687 }
1688 }
1689 return BAD_ADDRESS_ID;
1690 }
1691
1692 address AOTCodeAddressTable::address_for_id(int idx) {
1693 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1694 if (idx == -1) {
1695 return (address)-1;
1696 }
1697 uint id = (uint)idx;
1698 // special case for symbols based relative to os::init
1699 if (id > (_c_str_base + _c_str_max)) {
1700 return (address)os::init + idx;
1701 }
1702 if (idx < 0) {
1703 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1704 return nullptr;
1705 }
1706 // no need to compare unsigned id against 0
1707 if (/* id >= _extrs_base && */ id < _extrs_length) {
1708 return _extrs_addr[id - _extrs_base];
1709 }
1710 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1711 return _stubs_addr[id - _stubs_base];
1712 }
1713 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1714 return _shared_blobs_addr[id - _shared_blobs_base];
1715 }
1716 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1717 return _C1_blobs_addr[id - _C1_blobs_base];
1718 }
1719 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1720 return address_for_C_string(id - _c_str_base);
1721 }
1722 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1723 return nullptr;
1724 }
1725
1726 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1727 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1728 int id = -1;
1729 if (addr == (address)-1) { // Static call stub has jump to itself
1730 return id;
1731 }
1732 // Seach for C string
1733 id = id_for_C_string(addr);
1734 if (id >= 0) {
1735 return id + _c_str_base;
1736 }
1737 if (StubRoutines::contains(addr)) {
1738 // Search in stubs
1739 id = search_address(addr, _stubs_addr, _stubs_length);
1740 if (id < 0) {
1741 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1742 if (desc == nullptr) {
1743 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1744 }
1745 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1746 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1747 } else {
1748 return id + _stubs_base;
1749 }
1750 } else {
1751 CodeBlob* cb = CodeCache::find_blob(addr);
1752 if (cb != nullptr) {
1753 // Search in code blobs
1754 int id_base = _shared_blobs_base;
1755 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1756 if (id < 0) {
1757 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1758 } else {
1759 return id_base + id;
1760 }
1761 } else {
1762 // Search in runtime functions
1763 id = search_address(addr, _extrs_addr, _extrs_length);
1764 if (id < 0) {
1765 ResourceMark rm;
1766 const int buflen = 1024;
1767 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1768 int offset = 0;
1769 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1770 if (offset > 0) {
1771 // Could be address of C string
1772 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1773 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1774 p2i(addr), dist, (const char*)addr);
1775 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1776 return dist;
1777 }
1778 #ifdef ASSERT
1779 reloc.print_current_on(tty);
1780 code_blob->print_on(tty);
1781 code_blob->print_code_on(tty);
1782 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1783 #endif
1784 } else {
1785 #ifdef ASSERT
1786 reloc.print_current_on(tty);
1787 code_blob->print_on(tty);
1788 code_blob->print_code_on(tty);
1789 os::find(addr, tty);
1790 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1791 #endif
1792 }
1793 } else {
1794 return _extrs_base + id;
1795 }
1796 }
1797 }
1798 return id;
1799 }
1800
1801 // This is called after initialize() but before init2()
1802 // and _cache is not set yet.
1803 void AOTCodeCache::print_on(outputStream* st) {
1804 if (opened_cache != nullptr && opened_cache->for_use()) {
1805 st->print_cr("\nAOT Code Cache");
1806 uint count = opened_cache->_load_header->entries_count();
1807 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
1808 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1809
1810 for (uint i = 0; i < count; i++) {
1811 // Use search_entries[] to order ouput
1812 int index = search_entries[2*i + 1];
1813 AOTCodeEntry* entry = &(load_entries[index]);
1814
1815 uint entry_position = entry->offset();
1816 uint name_offset = entry->name_offset() + entry_position;
1817 const char* saved_name = opened_cache->addr(name_offset);
1818
1819 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
1820 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
1821 }
1822 }
1823 }