1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciUtilities.hpp"
33 #include "classfile/javaAssertions.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/gcConfig.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/memoryReserver.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/flags/flagSetting.hpp"
42 #include "runtime/globals_extension.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/os.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubInfo.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "utilities/copy.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56 #if INCLUDE_G1GC
57 #include "gc/g1/g1BarrierSetRuntime.hpp"
58 #include "gc/g1/g1HeapRegion.hpp"
59 #endif
60 #if INCLUDE_SHENANDOAHGC
61 #include "gc/shenandoah/shenandoahRuntime.hpp"
62 #endif
63 #if INCLUDE_ZGC
64 #include "gc/z/zBarrierSetRuntime.hpp"
65 #endif
66
67 #include <errno.h>
68 #include <sys/stat.h>
69
70 const char* aot_code_entry_kind_name[] = {
71 #define DECL_KIND_STRING(kind) XSTR(kind),
72 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
73 #undef DECL_KIND_STRING
74 };
75
76 static void report_load_failure() {
77 if (AbortVMOnAOTCodeFailure) {
78 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
79 }
80 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
81 AOTCodeCache::disable_caching();
82 }
83
84 static void report_store_failure() {
85 if (AbortVMOnAOTCodeFailure) {
86 tty->print_cr("Unable to create AOT Code Cache.");
87 vm_abort(false);
88 }
89 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
90 AOTCodeCache::disable_caching();
91 }
92
93 // The sequence of AOT code caching flags and parametters settings.
94 //
95 // 1. The initial AOT code caching flags setting is done
96 // during call to CDSConfig::check_vm_args_consistency().
97 //
98 // 2. The earliest AOT code state check done in compilationPolicy_init()
99 // where we set number of compiler threads for AOT assembly phase.
100 //
101 // 3. We determine presence of AOT code in AOT Cache in
102 // AOTMetaspace::open_static_archive() which is calles
103 // after compilationPolicy_init() but before codeCache_init().
104 //
105 // 4. AOTCodeCache::initialize() is called during universe_init()
106 // and does final AOT state and flags settings.
107 //
108 // 5. Finally AOTCodeCache::init2() is called after universe_init()
109 // when all GC settings are finalized.
110
111 // Next methods determine which action we do with AOT code depending
112 // on phase of AOT process: assembly or production.
113
114 bool AOTCodeCache::is_dumping_adapter() {
115 return AOTAdapterCaching && is_on_for_dump();
116 }
117
118 bool AOTCodeCache::is_using_adapter() {
119 return AOTAdapterCaching && is_on_for_use();
120 }
121
122 bool AOTCodeCache::is_dumping_stub() {
123 return AOTStubCaching && is_on_for_dump();
124 }
125
126 bool AOTCodeCache::is_using_stub() {
127 return AOTStubCaching && is_on_for_use();
128 }
129
130 // Next methods could be called regardless AOT code cache status.
131 // Initially they are called during flags parsing and finilized
132 // in AOTCodeCache::initialize().
133 void AOTCodeCache::enable_caching() {
134 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
135 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
136 }
137
138 void AOTCodeCache::disable_caching() {
139 FLAG_SET_ERGO(AOTStubCaching, false);
140 FLAG_SET_ERGO(AOTAdapterCaching, false);
141 }
142
143 bool AOTCodeCache::is_caching_enabled() {
144 return AOTStubCaching || AOTAdapterCaching;
145 }
146
147 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
148 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
149 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
150 // becasue both id and kind are used to find an entry, and that combination should be unique
151 if (kind == AOTCodeEntry::Adapter) {
152 return id;
153 } else if (kind == AOTCodeEntry::SharedBlob) {
154 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
155 return id;
156 } else if (kind == AOTCodeEntry::C1Blob) {
157 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
158 return id;
159 } else {
160 // kind must be AOTCodeEntry::C2Blob
161 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
162 return id;
163 }
164 }
165
166 static uint _max_aot_code_size = 0;
167 uint AOTCodeCache::max_aot_code_size() {
168 return _max_aot_code_size;
169 }
170
171 // It is called from AOTMetaspace::initialize_shared_spaces()
172 // which is called from universe_init().
173 // At this point all AOT class linking seetings are finilized
174 // and AOT cache is open so we can map AOT code region.
175 void AOTCodeCache::initialize() {
176 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
177 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
178 disable_caching();
179 return;
180 #else
181 if (FLAG_IS_DEFAULT(AOTCache)) {
182 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
183 disable_caching();
184 return; // AOTCache must be specified to dump and use AOT code
185 }
186
187 // Disable stubs caching until JDK-8357398 is fixed.
188 FLAG_SET_ERGO(AOTStubCaching, false);
189
190 if (VerifyOops) {
191 // Disable AOT stubs caching when VerifyOops flag is on.
192 // Verify oops code generated a lot of C strings which overflow
193 // AOT C string table (which has fixed size).
194 // AOT C string table will be reworked later to handle such cases.
195 //
196 // Note: AOT adapters are not affected - they don't have oop operations.
197 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
198 FLAG_SET_ERGO(AOTStubCaching, false);
199 }
200
201 bool is_dumping = false;
202 bool is_using = false;
203 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
204 is_dumping = true;
205 enable_caching();
206 is_dumping = is_caching_enabled();
207 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
208 enable_caching();
209 is_using = is_caching_enabled();
210 } else {
211 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
212 disable_caching();
213 return; // nothing to do
214 }
215 if (!(is_dumping || is_using)) {
216 disable_caching();
217 return; // AOT code caching disabled on command line
218 }
219 _max_aot_code_size = AOTCodeMaxSize;
220 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
221 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
222 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
223 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
224 }
225 }
226 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
227 if (is_using && aot_code_size == 0) {
228 log_info(aot, codecache, init)("AOT Code Cache is empty");
229 disable_caching();
230 return;
231 }
232 if (!open_cache(is_dumping, is_using)) {
233 if (is_using) {
234 report_load_failure();
235 } else {
236 report_store_failure();
237 }
238 return;
239 }
240 if (is_dumping) {
241 FLAG_SET_DEFAULT(ForceUnreachable, true);
242 }
243 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
244 #endif // defined(AMD64) || defined(AARCH64)
245 }
246
247 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
248 AOTCodeCache* AOTCodeCache::_cache = nullptr;
249 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
250
251 // It is called after universe_init() when all GC settings are finalized.
252 void AOTCodeCache::init2() {
253 DEBUG_ONLY( _passed_init2 = true; )
254 if (opened_cache == nullptr) {
255 return;
256 }
257 if (!opened_cache->verify_config()) {
258 delete opened_cache;
259 opened_cache = nullptr;
260 report_load_failure();
261 return;
262 }
263
264 // initialize aot runtime constants as appropriate to this runtime
265 AOTRuntimeConstants::initialize_from_runtime();
266
267 // initialize the table of external routines so we can save
268 // generated code blobs that reference them
269 AOTCodeAddressTable* table = opened_cache->_table;
270 assert(table != nullptr, "should be initialized already");
271 table->init_extrs();
272
273 // Now cache and address table are ready for AOT code generation
274 _cache = opened_cache;
275 }
276
277 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
278 opened_cache = new AOTCodeCache(is_dumping, is_using);
279 if (opened_cache->failed()) {
280 delete opened_cache;
281 opened_cache = nullptr;
282 return false;
283 }
284 return true;
285 }
286
287 void AOTCodeCache::close() {
288 if (is_on()) {
289 delete _cache; // Free memory
290 _cache = nullptr;
291 opened_cache = nullptr;
292 }
293 }
294
295 #define DATA_ALIGNMENT HeapWordSize
296
297 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
298 _load_header(nullptr),
299 _load_buffer(nullptr),
300 _store_buffer(nullptr),
301 _C_store_buffer(nullptr),
302 _write_position(0),
303 _load_size(0),
304 _store_size(0),
305 _for_use(is_using),
306 _for_dump(is_dumping),
307 _closing(false),
308 _failed(false),
309 _lookup_failed(false),
310 _table(nullptr),
311 _load_entries(nullptr),
312 _search_entries(nullptr),
313 _store_entries(nullptr),
314 _C_strings_buf(nullptr),
315 _store_entries_cnt(0)
316 {
317 // Read header at the begining of cache
318 if (_for_use) {
319 // Read cache
320 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
321 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
322 if (!rs.is_reserved()) {
323 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
324 set_failed();
325 return;
326 }
327 if (!AOTCacheAccess::map_aot_code_region(rs)) {
328 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
329 set_failed();
330 return;
331 }
332
333 _load_size = (uint)load_size;
334 _load_buffer = (char*)rs.base();
335 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
336 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
337
338 _load_header = (Header*)addr(0);
339 if (!_load_header->verify(_load_size)) {
340 set_failed();
341 return;
342 }
343 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
344 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
345 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
346 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
347 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
348 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
349
350 // Read strings
351 load_strings();
352 }
353 if (_for_dump) {
354 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
355 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
356 // Entries allocated at the end of buffer in reverse (as on stack).
357 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
358 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
359 }
360 _table = new AOTCodeAddressTable();
361 }
362
363 void AOTCodeCache::init_early_stubs_table() {
364 AOTCodeAddressTable* table = addr_table();
365 if (table != nullptr) {
366 table->init_early_stubs();
367 }
368 }
369
370 void AOTCodeCache::init_shared_blobs_table() {
371 AOTCodeAddressTable* table = addr_table();
372 if (table != nullptr) {
373 table->init_shared_blobs();
374 }
375 }
376
377 void AOTCodeCache::init_early_c1_table() {
378 AOTCodeAddressTable* table = addr_table();
379 if (table != nullptr) {
380 table->init_early_c1();
381 }
382 }
383
384 AOTCodeCache::~AOTCodeCache() {
385 if (_closing) {
386 return; // Already closed
387 }
388 // Stop any further access to cache.
389 _closing = true;
390
391 MutexLocker ml(Compile_lock);
392 if (for_dump()) { // Finalize cache
393 finish_write();
394 }
395 _load_buffer = nullptr;
396 if (_C_store_buffer != nullptr) {
397 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
398 _C_store_buffer = nullptr;
399 _store_buffer = nullptr;
400 }
401 if (_table != nullptr) {
402 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
403 delete _table;
404 _table = nullptr;
405 }
406 }
407
408 void AOTCodeCache::Config::record(uint cpu_features_offset) {
409 _flags = 0;
410 #ifdef ASSERT
411 _flags |= debugVM;
412 #endif
413 if (UseCompressedOops) {
414 _flags |= compressedOops;
415 }
416 if (UseCompressedClassPointers) {
417 _flags |= compressedClassPointers;
418 }
419 if (UseTLAB) {
420 _flags |= useTLAB;
421 }
422 if (JavaAssertions::systemClassDefault()) {
423 _flags |= systemClassAssertions;
424 }
425 if (JavaAssertions::userClassDefault()) {
426 _flags |= userClassAssertions;
427 }
428 if (EnableContended) {
429 _flags |= enableContendedPadding;
430 }
431 if (RestrictContended) {
432 _flags |= restrictContendedPadding;
433 }
434 _compressedOopShift = CompressedOops::shift();
435 _compressedOopBase = CompressedOops::base();
436 _compressedKlassShift = CompressedKlassPointers::shift();
437 _contendedPaddingWidth = ContendedPaddingWidth;
438 _gc = (uint)Universe::heap()->kind();
439 _cpu_features_offset = cpu_features_offset;
440 }
441
442 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
443 LogStreamHandle(Debug, aot, codecache, init) log;
444 uint offset = _cpu_features_offset;
445 uint cpu_features_size = *(uint *)cache->addr(offset);
446 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
447 offset += sizeof(uint);
448
449 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
450 if (log.is_enabled()) {
451 ResourceMark rm; // required for stringStream::as_string()
452 stringStream ss;
453 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
454 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
455 }
456
457 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
458 if (log.is_enabled()) {
459 ResourceMark rm; // required for stringStream::as_string()
460 stringStream ss;
461 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
462 VM_Version::store_cpu_features(runtime_cpu_features);
463 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
464 if (!ss.is_empty()) {
465 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
466 }
467 }
468 } else {
469 if (log.is_enabled()) {
470 ResourceMark rm; // required for stringStream::as_string()
471 stringStream ss;
472 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
473 VM_Version::store_cpu_features(runtime_cpu_features);
474 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
475 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
476 }
477 return false;
478 }
479 return true;
480 }
481
482 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
483 // First checks affect all cached AOT code
484 #ifdef ASSERT
485 if ((_flags & debugVM) == 0) {
486 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
487 return false;
488 }
489 #else
490 if ((_flags & debugVM) != 0) {
491 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
492 return false;
493 }
494 #endif
495
496 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
497 if (aot_gc != Universe::heap()->kind()) {
498 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
499 return false;
500 }
501
502 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
503 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
504 return false;
505 }
506 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
507 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
508 return false;
509 }
510
511 // The following checks do not affect AOT adapters caching
512
513 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
514 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
515 AOTStubCaching = false;
516 }
517 if (_compressedOopShift != (uint)CompressedOops::shift()) {
518 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
519 AOTStubCaching = false;
520 }
521
522 // This should be the last check as it only disables AOTStubCaching
523 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
524 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
525 AOTStubCaching = false;
526 }
527
528 if (!verify_cpu_features(cache)) {
529 return false;
530 }
531 return true;
532 }
533
534 bool AOTCodeCache::Header::verify(uint load_size) const {
535 if (_version != AOT_CODE_VERSION) {
536 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
537 return false;
538 }
539 if (load_size < _cache_size) {
540 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
541 return false;
542 }
543 return true;
544 }
545
546 AOTCodeCache* AOTCodeCache::open_for_use() {
547 if (AOTCodeCache::is_on_for_use()) {
548 return AOTCodeCache::cache();
549 }
550 return nullptr;
551 }
552
553 AOTCodeCache* AOTCodeCache::open_for_dump() {
554 if (AOTCodeCache::is_on_for_dump()) {
555 AOTCodeCache* cache = AOTCodeCache::cache();
556 cache->clear_lookup_failed(); // Reset bit
557 return cache;
558 }
559 return nullptr;
560 }
561
562 void copy_bytes(const char* from, address to, uint size) {
563 assert((int)size > 0, "sanity");
564 memcpy(to, from, size);
565 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
566 }
567
568 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
569 _cache = cache;
570 _entry = entry;
571 _load_buffer = cache->cache_buffer();
572 _read_position = 0;
573 _lookup_failed = false;
574 }
575
576 void AOTCodeReader::set_read_position(uint pos) {
577 if (pos == _read_position) {
578 return;
579 }
580 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
581 _read_position = pos;
582 }
583
584 bool AOTCodeCache::set_write_position(uint pos) {
585 if (pos == _write_position) {
586 return true;
587 }
588 if (_store_size < _write_position) {
589 _store_size = _write_position; // Adjust during write
590 }
591 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
592 _write_position = pos;
593 return true;
594 }
595
596 static char align_buffer[256] = { 0 };
597
598 bool AOTCodeCache::align_write() {
599 // We are not executing code from cache - we copy it by bytes first.
600 // No need for big alignment (or at all).
601 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
602 if (padding == DATA_ALIGNMENT) {
603 return true;
604 }
605 uint n = write_bytes((const void*)&align_buffer, padding);
606 if (n != padding) {
607 return false;
608 }
609 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
610 return true;
611 }
612
613 // Check to see if AOT code cache has required space to store "nbytes" of data
614 address AOTCodeCache::reserve_bytes(uint nbytes) {
615 assert(for_dump(), "Code Cache file is not created");
616 uint new_position = _write_position + nbytes;
617 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
618 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
619 nbytes, _write_position);
620 set_failed();
621 report_store_failure();
622 return nullptr;
623 }
624 address buffer = (address)(_store_buffer + _write_position);
625 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
626 _write_position += nbytes;
627 if (_store_size < _write_position) {
628 _store_size = _write_position;
629 }
630 return buffer;
631 }
632
633 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
634 assert(for_dump(), "Code Cache file is not created");
635 if (nbytes == 0) {
636 return 0;
637 }
638 uint new_position = _write_position + nbytes;
639 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
640 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
641 nbytes, _write_position);
642 set_failed();
643 report_store_failure();
644 return 0;
645 }
646 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
647 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
648 _write_position += nbytes;
649 if (_store_size < _write_position) {
650 _store_size = _write_position;
651 }
652 return nbytes;
653 }
654
655 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
656 return (void*)(cache->add_entry());
657 }
658
659 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
660 if (entry->kind() == kind) {
661 assert(entry->id() == id, "sanity");
662 return true; // Found
663 }
664 return false;
665 }
666
667 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
668 assert(_for_use, "sanity");
669 uint count = _load_header->entries_count();
670 if (_load_entries == nullptr) {
671 // Read it
672 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
673 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
674 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
675 }
676 // Binary search
677 int l = 0;
678 int h = count - 1;
679 while (l <= h) {
680 int mid = (l + h) >> 1;
681 int ix = mid * 2;
682 uint is = _search_entries[ix];
683 if (is == id) {
684 int index = _search_entries[ix + 1];
685 AOTCodeEntry* entry = &(_load_entries[index]);
686 if (check_entry(kind, id, entry)) {
687 return entry; // Found
688 }
689 // Linear search around to handle id collission
690 for (int i = mid - 1; i >= l; i--) { // search back
691 ix = i * 2;
692 is = _search_entries[ix];
693 if (is != id) {
694 break;
695 }
696 index = _search_entries[ix + 1];
697 AOTCodeEntry* entry = &(_load_entries[index]);
698 if (check_entry(kind, id, entry)) {
699 return entry; // Found
700 }
701 }
702 for (int i = mid + 1; i <= h; i++) { // search forward
703 ix = i * 2;
704 is = _search_entries[ix];
705 if (is != id) {
706 break;
707 }
708 index = _search_entries[ix + 1];
709 AOTCodeEntry* entry = &(_load_entries[index]);
710 if (check_entry(kind, id, entry)) {
711 return entry; // Found
712 }
713 }
714 break; // Not found match
715 } else if (is < id) {
716 l = mid + 1;
717 } else {
718 h = mid - 1;
719 }
720 }
721 return nullptr;
722 }
723
724 extern "C" {
725 static int uint_cmp(const void *i, const void *j) {
726 uint a = *(uint *)i;
727 uint b = *(uint *)j;
728 return a > b ? 1 : a < b ? -1 : 0;
729 }
730 }
731
732 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
733 uint* size_ptr = (uint *)buffer;
734 *size_ptr = buffer_size;
735 buffer += sizeof(uint);
736
737 VM_Version::store_cpu_features(buffer);
738 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
739 buffer += buffer_size;
740 buffer = align_up(buffer, DATA_ALIGNMENT);
741 }
742
743 bool AOTCodeCache::finish_write() {
744 if (!align_write()) {
745 return false;
746 }
747 uint strings_offset = _write_position;
748 int strings_count = store_strings();
749 if (strings_count < 0) {
750 return false;
751 }
752 if (!align_write()) {
753 return false;
754 }
755 uint strings_size = _write_position - strings_offset;
756
757 uint entries_count = 0; // Number of entrant (useful) code entries
758 uint entries_offset = _write_position;
759
760 uint store_count = _store_entries_cnt;
761 if (store_count > 0) {
762 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
763 uint code_count = store_count;
764 uint search_count = code_count * 2;
765 uint search_size = search_count * sizeof(uint);
766 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
767 // _write_position includes size of code and strings
768 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
769 uint cpu_features_size = VM_Version::cpu_features_size();
770 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
771 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
772 align_up(total_cpu_features_size, DATA_ALIGNMENT);
773 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
774
775 // Allocate in AOT Cache buffer
776 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
777 char* start = align_up(buffer, DATA_ALIGNMENT);
778 char* current = start + header_size; // Skip header
779
780 uint cpu_features_offset = current - start;
781 store_cpu_features(current, cpu_features_size);
782 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
783 assert(current < start + total_size, "sanity check");
784
785 // Create ordered search table for entries [id, index];
786 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
787
788 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
789 uint adapters_count = 0;
790 uint shared_blobs_count = 0;
791 uint C1_blobs_count = 0;
792 uint C2_blobs_count = 0;
793 uint max_size = 0;
794 // AOTCodeEntry entries were allocated in reverse in store buffer.
795 // Process them in reverse order to cache first code first.
796 for (int i = store_count - 1; i >= 0; i--) {
797 entries_address[i].set_next(nullptr); // clear pointers before storing data
798 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
799 if (size > max_size) {
800 max_size = size;
801 }
802 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
803 entries_address[i].set_offset(current - start); // New offset
804 current += size;
805 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
806 if (n != sizeof(AOTCodeEntry)) {
807 FREE_C_HEAP_ARRAY(uint, search);
808 return false;
809 }
810 search[entries_count*2 + 0] = entries_address[i].id();
811 search[entries_count*2 + 1] = entries_count;
812 entries_count++;
813 AOTCodeEntry::Kind kind = entries_address[i].kind();
814 if (kind == AOTCodeEntry::Adapter) {
815 adapters_count++;
816 } else if (kind == AOTCodeEntry::SharedBlob) {
817 shared_blobs_count++;
818 } else if (kind == AOTCodeEntry::C1Blob) {
819 C1_blobs_count++;
820 } else if (kind == AOTCodeEntry::C2Blob) {
821 C2_blobs_count++;
822 }
823 }
824 if (entries_count == 0) {
825 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
826 FREE_C_HEAP_ARRAY(uint, search);
827 return true; // Nothing to write
828 }
829 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
830 // Write strings
831 if (strings_count > 0) {
832 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
833 strings_offset = (current - start); // New offset
834 current += strings_size;
835 }
836
837 uint new_entries_offset = (current - start); // New offset
838 // Sort and store search table
839 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
840 search_size = 2 * entries_count * sizeof(uint);
841 copy_bytes((const char*)search, (address)current, search_size);
842 FREE_C_HEAP_ARRAY(uint, search);
843 current += search_size;
844
845 // Write entries
846 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
847 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
848 current += entries_size;
849 uint size = (current - start);
850 assert(size <= total_size, "%d > %d", size , total_size);
851
852 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
853 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
854 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
855 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
856 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
857
858 // Finalize header
859 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
860 header->init(size, (uint)strings_count, strings_offset,
861 entries_count, new_entries_offset,
862 adapters_count, shared_blobs_count,
863 C1_blobs_count, C2_blobs_count, cpu_features_offset);
864
865 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
866 }
867 return true;
868 }
869
870 //------------------Store/Load AOT code ----------------------
871
872 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
873 AOTCodeCache* cache = open_for_dump();
874 if (cache == nullptr) {
875 return false;
876 }
877 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
878
879 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
880 return false;
881 }
882 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
883 return false;
884 }
885 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
886
887 #ifdef ASSERT
888 LogStreamHandle(Trace, aot, codecache, stubs) log;
889 if (log.is_enabled()) {
890 FlagSetting fs(PrintRelocations, true);
891 blob.print_on(&log);
892 }
893 #endif
894 // we need to take a lock to prevent race between compiler threads generating AOT code
895 // and the main thread generating adapter
896 MutexLocker ml(Compile_lock);
897 if (!is_on()) {
898 return false; // AOT code cache was already dumped and closed.
899 }
900 if (!cache->align_write()) {
901 return false;
902 }
903 uint entry_position = cache->_write_position;
904
905 // Write name
906 uint name_offset = cache->_write_position - entry_position;
907 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
908 uint n = cache->write_bytes(name, name_size);
909 if (n != name_size) {
910 return false;
911 }
912
913 // Write CodeBlob
914 if (!cache->align_write()) {
915 return false;
916 }
917 uint blob_offset = cache->_write_position - entry_position;
918 address archive_buffer = cache->reserve_bytes(blob.size());
919 if (archive_buffer == nullptr) {
920 return false;
921 }
922 CodeBlob::archive_blob(&blob, archive_buffer);
923
924 uint reloc_data_size = blob.relocation_size();
925 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
926 if (n != reloc_data_size) {
927 return false;
928 }
929
930 bool has_oop_maps = false;
931 if (blob.oop_maps() != nullptr) {
932 if (!cache->write_oop_map_set(blob)) {
933 return false;
934 }
935 has_oop_maps = true;
936 }
937
938 #ifndef PRODUCT
939 // Write asm remarks
940 if (!cache->write_asm_remarks(blob)) {
941 return false;
942 }
943 if (!cache->write_dbg_strings(blob)) {
944 return false;
945 }
946 #endif /* PRODUCT */
947
948 if (!cache->write_relocations(blob)) {
949 if (!cache->failed()) {
950 // We may miss an address in AOT table - skip this code blob.
951 cache->set_write_position(entry_position);
952 }
953 return false;
954 }
955
956 uint entry_size = cache->_write_position - entry_position;
957 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
958 entry_position, entry_size, name_offset, name_size,
959 blob_offset, has_oop_maps, blob.content_begin());
960 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
961 return true;
962 }
963
964 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
965 assert(AOTCodeEntry::is_blob(entry_kind),
966 "wrong entry kind for blob id %s", StubInfo::name(id));
967 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
968 }
969
970 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
971 AOTCodeCache* cache = open_for_use();
972 if (cache == nullptr) {
973 return nullptr;
974 }
975 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
976
977 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
978 return nullptr;
979 }
980 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
981 return nullptr;
982 }
983 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
984
985 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
986 if (entry == nullptr) {
987 return nullptr;
988 }
989 AOTCodeReader reader(cache, entry);
990 CodeBlob* blob = reader.compile_code_blob(name);
991
992 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
993 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
994 return blob;
995 }
996
997 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
998 assert(AOTCodeEntry::is_blob(entry_kind),
999 "wrong entry kind for blob id %s", StubInfo::name(id));
1000 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1001 }
1002
1003 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1004 uint entry_position = _entry->offset();
1005
1006 // Read name
1007 uint name_offset = entry_position + _entry->name_offset();
1008 uint name_size = _entry->name_size(); // Includes '/0'
1009 const char* stored_name = addr(name_offset);
1010
1011 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1012 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1013 stored_name, name);
1014 set_lookup_failed(); // Skip this blob
1015 return nullptr;
1016 }
1017
1018 // Read archived code blob
1019 uint offset = entry_position + _entry->blob_offset();
1020 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1021 offset += archived_blob->size();
1022
1023 address reloc_data = (address)addr(offset);
1024 offset += archived_blob->relocation_size();
1025 set_read_position(offset);
1026
1027 ImmutableOopMapSet* oop_maps = nullptr;
1028 if (_entry->has_oop_maps()) {
1029 oop_maps = read_oop_map_set();
1030 }
1031
1032 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1033 stored_name,
1034 reloc_data,
1035 oop_maps
1036 );
1037 if (code_blob == nullptr) { // no space left in CodeCache
1038 return nullptr;
1039 }
1040
1041 #ifndef PRODUCT
1042 code_blob->asm_remarks().init();
1043 read_asm_remarks(code_blob->asm_remarks());
1044 code_blob->dbg_strings().init();
1045 read_dbg_strings(code_blob->dbg_strings());
1046 #endif // PRODUCT
1047
1048 fix_relocations(code_blob);
1049
1050 #ifdef ASSERT
1051 LogStreamHandle(Trace, aot, codecache, stubs) log;
1052 if (log.is_enabled()) {
1053 FlagSetting fs(PrintRelocations, true);
1054 code_blob->print_on(&log);
1055 }
1056 #endif
1057 return code_blob;
1058 }
1059
1060 // ------------ process code and data --------------
1061
1062 // Can't use -1. It is valid value for jump to iteself destination
1063 // used by static call stub: see NativeJump::jump_destination().
1064 #define BAD_ADDRESS_ID -2
1065
1066 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
1067 GrowableArray<uint> reloc_data;
1068 RelocIterator iter(&code_blob);
1069 LogStreamHandle(Trace, aot, codecache, reloc) log;
1070 while (iter.next()) {
1071 int idx = reloc_data.append(0); // default value
1072 switch (iter.type()) {
1073 case relocInfo::none:
1074 break;
1075 case relocInfo::runtime_call_type: {
1076 // Record offset of runtime destination
1077 CallRelocation* r = (CallRelocation*)iter.reloc();
1078 address dest = r->destination();
1079 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1080 dest = (address)-1; // do nothing in this case when loading this relocation
1081 }
1082 int id = _table->id_for_address(dest, iter, &code_blob);
1083 if (id == BAD_ADDRESS_ID) {
1084 return false;
1085 }
1086 reloc_data.at_put(idx, id);
1087 break;
1088 }
1089 case relocInfo::runtime_call_w_cp_type:
1090 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1091 return false;
1092 case relocInfo::external_word_type: {
1093 // Record offset of runtime target
1094 address target = ((external_word_Relocation*)iter.reloc())->target();
1095 int id = _table->id_for_address(target, iter, &code_blob);
1096 if (id == BAD_ADDRESS_ID) {
1097 return false;
1098 }
1099 reloc_data.at_put(idx, id);
1100 break;
1101 }
1102 case relocInfo::internal_word_type:
1103 break;
1104 case relocInfo::section_word_type:
1105 break;
1106 case relocInfo::post_call_nop_type:
1107 break;
1108 default:
1109 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1110 return false;
1111 break;
1112 }
1113 if (log.is_enabled()) {
1114 iter.print_current_on(&log);
1115 }
1116 }
1117
1118 // Write additional relocation data: uint per relocation
1119 // Write the count first
1120 int count = reloc_data.length();
1121 write_bytes(&count, sizeof(int));
1122 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1123 iter != reloc_data.end(); ++iter) {
1124 uint value = *iter;
1125 int n = write_bytes(&value, sizeof(uint));
1126 if (n != sizeof(uint)) {
1127 return false;
1128 }
1129 }
1130 return true;
1131 }
1132
1133 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1134 LogStreamHandle(Trace, aot, reloc) log;
1135 uint offset = read_position();
1136 int count = *(int*)addr(offset);
1137 offset += sizeof(int);
1138 if (log.is_enabled()) {
1139 log.print_cr("======== extra relocations count=%d", count);
1140 }
1141 uint* reloc_data = (uint*)addr(offset);
1142 offset += (count * sizeof(uint));
1143 set_read_position(offset);
1144
1145 RelocIterator iter(code_blob);
1146 int j = 0;
1147 while (iter.next()) {
1148 switch (iter.type()) {
1149 case relocInfo::none:
1150 break;
1151 case relocInfo::runtime_call_type: {
1152 address dest = _cache->address_for_id(reloc_data[j]);
1153 if (dest != (address)-1) {
1154 ((CallRelocation*)iter.reloc())->set_destination(dest);
1155 }
1156 break;
1157 }
1158 case relocInfo::runtime_call_w_cp_type:
1159 // this relocation should not be in cache (see write_relocations)
1160 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1161 break;
1162 case relocInfo::external_word_type: {
1163 address target = _cache->address_for_id(reloc_data[j]);
1164 // Add external address to global table
1165 int index = ExternalsRecorder::find_index(target);
1166 // Update index in relocation
1167 Relocation::add_jint(iter.data(), index);
1168 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1169 assert(reloc->target() == target, "sanity");
1170 reloc->set_value(target); // Patch address in the code
1171 break;
1172 }
1173 case relocInfo::internal_word_type: {
1174 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1175 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1176 break;
1177 }
1178 case relocInfo::section_word_type: {
1179 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1180 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1181 break;
1182 }
1183 case relocInfo::post_call_nop_type:
1184 break;
1185 default:
1186 assert(false,"relocation %d unimplemented", (int)iter.type());
1187 break;
1188 }
1189 if (log.is_enabled()) {
1190 iter.print_current_on(&log);
1191 }
1192 j++;
1193 }
1194 assert(j == count, "sanity");
1195 }
1196
1197 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1198 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1199 int oopmaps_size = oopmaps->nr_of_bytes();
1200 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1201 return false;
1202 }
1203 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1204 if (n != (uint)oopmaps->nr_of_bytes()) {
1205 return false;
1206 }
1207 return true;
1208 }
1209
1210 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1211 uint offset = read_position();
1212 int size = *(int *)addr(offset);
1213 offset += sizeof(int);
1214 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1215 offset += size;
1216 set_read_position(offset);
1217 return oopmaps;
1218 }
1219
1220 #ifndef PRODUCT
1221 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1222 // Write asm remarks
1223 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1224 if (count_ptr == nullptr) {
1225 return false;
1226 }
1227 uint count = 0;
1228 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1229 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1230 uint n = write_bytes(&offset, sizeof(uint));
1231 if (n != sizeof(uint)) {
1232 return false;
1233 }
1234 const char* cstr = add_C_string(str);
1235 int id = _table->id_for_C_string((address)cstr);
1236 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1237 n = write_bytes(&id, sizeof(int));
1238 if (n != sizeof(int)) {
1239 return false;
1240 }
1241 count += 1;
1242 return true;
1243 });
1244 *count_ptr = count;
1245 return result;
1246 }
1247
1248 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1249 // Read asm remarks
1250 uint offset = read_position();
1251 uint count = *(uint *)addr(offset);
1252 offset += sizeof(uint);
1253 for (uint i = 0; i < count; i++) {
1254 uint remark_offset = *(uint *)addr(offset);
1255 offset += sizeof(uint);
1256 int remark_string_id = *(uint *)addr(offset);
1257 offset += sizeof(int);
1258 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1259 asm_remarks.insert(remark_offset, remark);
1260 }
1261 set_read_position(offset);
1262 }
1263
1264 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1265 // Write dbg strings
1266 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1267 if (count_ptr == nullptr) {
1268 return false;
1269 }
1270 uint count = 0;
1271 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1272 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1273 const char* cstr = add_C_string(str);
1274 int id = _table->id_for_C_string((address)cstr);
1275 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1276 uint n = write_bytes(&id, sizeof(int));
1277 if (n != sizeof(int)) {
1278 return false;
1279 }
1280 count += 1;
1281 return true;
1282 });
1283 *count_ptr = count;
1284 return result;
1285 }
1286
1287 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1288 // Read dbg strings
1289 uint offset = read_position();
1290 uint count = *(uint *)addr(offset);
1291 offset += sizeof(uint);
1292 for (uint i = 0; i < count; i++) {
1293 int string_id = *(uint *)addr(offset);
1294 offset += sizeof(int);
1295 const char* str = (const char*)_cache->address_for_C_string(string_id);
1296 dbg_strings.insert(str);
1297 }
1298 set_read_position(offset);
1299 }
1300 #endif // PRODUCT
1301
1302 //======================= AOTCodeAddressTable ===============
1303
1304 // address table ids for generated routines, external addresses and C
1305 // string addresses are partitioned into positive integer ranges
1306 // defined by the following positive base and max values
1307 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1308 // [_blobs_base, _blobs_base + _blobs_max -1],
1309 // ...
1310 // [_c_str_base, _c_str_base + _c_str_max -1],
1311
1312 #define _extrs_max 100
1313 #define _stubs_max 3
1314
1315 #define _shared_blobs_max 20
1316 #define _C1_blobs_max 10
1317 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1318 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1319
1320 #define _extrs_base 0
1321 #define _stubs_base (_extrs_base + _extrs_max)
1322 #define _shared_blobs_base (_stubs_base + _stubs_max)
1323 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1324 #define _blobs_end (_shared_blobs_base + _blobs_max)
1325
1326 #define SET_ADDRESS(type, addr) \
1327 { \
1328 type##_addr[type##_length++] = (address) (addr); \
1329 assert(type##_length <= type##_max, "increase size"); \
1330 }
1331
1332 static bool initializing_extrs = false;
1333
1334 void AOTCodeAddressTable::init_extrs() {
1335 if (_extrs_complete || initializing_extrs) return; // Done already
1336
1337 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1338
1339 initializing_extrs = true;
1340 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1341
1342 _extrs_length = 0;
1343
1344 // Record addresses of VM runtime methods
1345 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1346 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1347 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1348 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1349 #if defined(AARCH64) && !defined(ZERO)
1350 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1351 #endif
1352 {
1353 // Required by Shared blobs
1354 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1355 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1356 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1357 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1358 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1359 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1360 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1361 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1362 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1363 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1364 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1365 }
1366
1367 #ifdef COMPILER1
1368 {
1369 // Required by C1 blobs
1370 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1371 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1372 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1373 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1374 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1375 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1376 SET_ADDRESS(_extrs, Runtime1::new_instance);
1377 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1378 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1379 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1380 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1381 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1382 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1383 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1384 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1385 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1386 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1387 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1388 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1389 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1390 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1391 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1392 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1393 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1394 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1395 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1396 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1397 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1398 SET_ADDRESS(_extrs, Thread::current);
1399 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1400 #ifndef PRODUCT
1401 SET_ADDRESS(_extrs, os::breakpoint);
1402 #endif
1403 }
1404 #endif
1405
1406 #ifdef COMPILER2
1407 {
1408 // Required by C2 blobs
1409 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1410 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1411 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1412 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1413 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1414 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1415 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1416 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1417 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1418 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1419 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1420 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1421 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1422 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1423 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1424 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1425 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1426 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1427 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1428 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1429 #if defined(AARCH64)
1430 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1431 #endif // AARCH64
1432 }
1433 #endif // COMPILER2
1434
1435 #if INCLUDE_G1GC
1436 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1437 #endif
1438 #if INCLUDE_SHENANDOAHGC
1439 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1440 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1441 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1442 #endif
1443 #if INCLUDE_ZGC
1444 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1445 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1446 #if defined(AMD64)
1447 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1448 #endif
1449 #endif
1450 #ifndef ZERO
1451 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1452 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1453 #endif
1454 #endif // ZERO
1455
1456 // addresses of fields in AOT runtime constants area
1457 address* p = AOTRuntimeConstants::field_addresses_list();
1458 while (*p != nullptr) {
1459 SET_ADDRESS(_extrs, *p++);
1460 }
1461
1462 _extrs_complete = true;
1463 log_debug(aot, codecache, init)("External addresses recorded");
1464 }
1465
1466 static bool initializing_early_stubs = false;
1467
1468 void AOTCodeAddressTable::init_early_stubs() {
1469 if (_complete || initializing_early_stubs) return; // Done already
1470 initializing_early_stubs = true;
1471 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1472 _stubs_length = 0;
1473 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1474
1475 {
1476 // Required by C1 blobs
1477 #if defined(AMD64) && !defined(ZERO)
1478 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1479 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1480 #endif // AMD64
1481 }
1482
1483 _early_stubs_complete = true;
1484 log_info(aot, codecache, init)("Early stubs recorded");
1485 }
1486
1487 static bool initializing_shared_blobs = false;
1488
1489 void AOTCodeAddressTable::init_shared_blobs() {
1490 if (_complete || initializing_shared_blobs) return; // Done already
1491 initializing_shared_blobs = true;
1492 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1493
1494 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1495 _shared_blobs_addr = blobs_addr;
1496 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1497
1498 _shared_blobs_length = 0;
1499 _C1_blobs_length = 0;
1500
1501 // clear the address table
1502 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1503
1504 // Record addresses of generated code blobs
1505 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1506 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1507 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1508 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1509 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1510 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1511 #if INCLUDE_JVMCI
1512 if (EnableJVMCI) {
1513 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1514 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1515 }
1516 #endif
1517
1518 _shared_blobs_complete = true;
1519 log_debug(aot, codecache, init)("Early shared blobs recorded");
1520 _complete = true;
1521 }
1522
1523 void AOTCodeAddressTable::init_early_c1() {
1524 #ifdef COMPILER1
1525 // Runtime1 Blobs
1526 StubId id = StubInfo::stub_base(StubGroup::C1);
1527 // include forward_exception in range we publish
1528 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1529 for (; id != limit; id = StubInfo::next(id)) {
1530 if (Runtime1::blob_for(id) == nullptr) {
1531 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1532 continue;
1533 }
1534 if (Runtime1::entry_for(id) == nullptr) {
1535 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1536 continue;
1537 }
1538 address entry = Runtime1::entry_for(id);
1539 SET_ADDRESS(_C1_blobs, entry);
1540 }
1541 #endif // COMPILER1
1542 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1543 _early_c1_complete = true;
1544 }
1545
1546 #undef SET_ADDRESS
1547
1548 AOTCodeAddressTable::~AOTCodeAddressTable() {
1549 if (_extrs_addr != nullptr) {
1550 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1551 }
1552 if (_stubs_addr != nullptr) {
1553 FREE_C_HEAP_ARRAY(address, _stubs_addr);
1554 }
1555 if (_shared_blobs_addr != nullptr) {
1556 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1557 }
1558 }
1559
1560 #ifdef PRODUCT
1561 #define MAX_STR_COUNT 200
1562 #else
1563 #define MAX_STR_COUNT 500
1564 #endif
1565 #define _c_str_max MAX_STR_COUNT
1566 static const int _c_str_base = _all_max;
1567
1568 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1569 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1570 static int _C_strings_count = 0;
1571 static int _C_strings_s[MAX_STR_COUNT] = {0};
1572 static int _C_strings_id[MAX_STR_COUNT] = {0};
1573 static int _C_strings_used = 0;
1574
1575 void AOTCodeCache::load_strings() {
1576 uint strings_count = _load_header->strings_count();
1577 if (strings_count == 0) {
1578 return;
1579 }
1580 uint strings_offset = _load_header->strings_offset();
1581 uint* string_lengths = (uint*)addr(strings_offset);
1582 strings_offset += (strings_count * sizeof(uint));
1583 uint strings_size = _load_header->entries_offset() - strings_offset;
1584 // We have to keep cached strings longer than _cache buffer
1585 // because they are refernced from compiled code which may
1586 // still be executed on VM exit after _cache is freed.
1587 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1588 memcpy(p, addr(strings_offset), strings_size);
1589 _C_strings_buf = p;
1590 assert(strings_count <= MAX_STR_COUNT, "sanity");
1591 for (uint i = 0; i < strings_count; i++) {
1592 _C_strings[i] = p;
1593 uint len = string_lengths[i];
1594 _C_strings_s[i] = i;
1595 _C_strings_id[i] = i;
1596 p += len;
1597 }
1598 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1599 _C_strings_count = strings_count;
1600 _C_strings_used = strings_count;
1601 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1602 }
1603
1604 int AOTCodeCache::store_strings() {
1605 if (_C_strings_used > 0) {
1606 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1607 uint offset = _write_position;
1608 uint length = 0;
1609 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1610 if (lengths == nullptr) {
1611 return -1;
1612 }
1613 for (int i = 0; i < _C_strings_used; i++) {
1614 const char* str = _C_strings[_C_strings_s[i]];
1615 uint len = (uint)strlen(str) + 1;
1616 length += len;
1617 assert(len < 1000, "big string: %s", str);
1618 lengths[i] = len;
1619 uint n = write_bytes(str, len);
1620 if (n != len) {
1621 return -1;
1622 }
1623 }
1624 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1625 _C_strings_used, length, offset);
1626 }
1627 return _C_strings_used;
1628 }
1629
1630 const char* AOTCodeCache::add_C_string(const char* str) {
1631 if (is_on_for_dump() && str != nullptr) {
1632 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1633 AOTCodeAddressTable* table = addr_table();
1634 if (table != nullptr) {
1635 return table->add_C_string(str);
1636 }
1637 }
1638 return str;
1639 }
1640
1641 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1642 if (_extrs_complete) {
1643 // Check previous strings address
1644 for (int i = 0; i < _C_strings_count; i++) {
1645 if (_C_strings_in[i] == str) {
1646 return _C_strings[i]; // Found previous one - return our duplicate
1647 } else if (strcmp(_C_strings[i], str) == 0) {
1648 return _C_strings[i];
1649 }
1650 }
1651 // Add new one
1652 if (_C_strings_count < MAX_STR_COUNT) {
1653 // Passed in string can be freed and used space become inaccessible.
1654 // Keep original address but duplicate string for future compare.
1655 _C_strings_id[_C_strings_count] = -1; // Init
1656 _C_strings_in[_C_strings_count] = str;
1657 const char* dup = os::strdup(str);
1658 _C_strings[_C_strings_count++] = dup;
1659 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1660 return dup;
1661 } else {
1662 assert(false, "Number of C strings >= MAX_STR_COUNT");
1663 }
1664 }
1665 return str;
1666 }
1667
1668 int AOTCodeAddressTable::id_for_C_string(address str) {
1669 if (str == nullptr) {
1670 return -1;
1671 }
1672 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1673 for (int i = 0; i < _C_strings_count; i++) {
1674 if (_C_strings[i] == (const char*)str) { // found
1675 int id = _C_strings_id[i];
1676 if (id >= 0) {
1677 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1678 return id; // Found recorded
1679 }
1680 // Not found in recorded, add new
1681 id = _C_strings_used++;
1682 _C_strings_s[id] = i;
1683 _C_strings_id[i] = id;
1684 return id;
1685 }
1686 }
1687 return -1;
1688 }
1689
1690 address AOTCodeAddressTable::address_for_C_string(int idx) {
1691 assert(idx < _C_strings_count, "sanity");
1692 return (address)_C_strings[idx];
1693 }
1694
1695 static int search_address(address addr, address* table, uint length) {
1696 for (int i = 0; i < (int)length; i++) {
1697 if (table[i] == addr) {
1698 return i;
1699 }
1700 }
1701 return BAD_ADDRESS_ID;
1702 }
1703
1704 address AOTCodeAddressTable::address_for_id(int idx) {
1705 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1706 if (idx == -1) {
1707 return (address)-1;
1708 }
1709 uint id = (uint)idx;
1710 // special case for symbols based relative to os::init
1711 if (id > (_c_str_base + _c_str_max)) {
1712 return (address)os::init + idx;
1713 }
1714 if (idx < 0) {
1715 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1716 return nullptr;
1717 }
1718 // no need to compare unsigned id against 0
1719 if (/* id >= _extrs_base && */ id < _extrs_length) {
1720 return _extrs_addr[id - _extrs_base];
1721 }
1722 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1723 return _stubs_addr[id - _stubs_base];
1724 }
1725 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1726 return _shared_blobs_addr[id - _shared_blobs_base];
1727 }
1728 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1729 return _C1_blobs_addr[id - _C1_blobs_base];
1730 }
1731 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1732 return address_for_C_string(id - _c_str_base);
1733 }
1734 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1735 return nullptr;
1736 }
1737
1738 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1739 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1740 int id = -1;
1741 if (addr == (address)-1) { // Static call stub has jump to itself
1742 return id;
1743 }
1744 // Check card_table_base address first since it can point to any address
1745 BarrierSet* bs = BarrierSet::barrier_set();
1746 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
1747 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
1748
1749 // Seach for C string
1750 id = id_for_C_string(addr);
1751 if (id >= 0) {
1752 return id + _c_str_base;
1753 }
1754 if (StubRoutines::contains(addr)) {
1755 // Search in stubs
1756 id = search_address(addr, _stubs_addr, _stubs_length);
1757 if (id < 0) {
1758 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1759 if (desc == nullptr) {
1760 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1761 }
1762 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1763 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1764 } else {
1765 return id + _stubs_base;
1766 }
1767 } else {
1768 CodeBlob* cb = CodeCache::find_blob(addr);
1769 if (cb != nullptr) {
1770 // Search in code blobs
1771 int id_base = _shared_blobs_base;
1772 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1773 if (id < 0) {
1774 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1775 } else {
1776 return id_base + id;
1777 }
1778 } else {
1779 // Search in runtime functions
1780 id = search_address(addr, _extrs_addr, _extrs_length);
1781 if (id < 0) {
1782 ResourceMark rm;
1783 const int buflen = 1024;
1784 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1785 int offset = 0;
1786 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1787 if (offset > 0) {
1788 // Could be address of C string
1789 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1790 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1791 p2i(addr), dist, (const char*)addr);
1792 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1793 return dist;
1794 }
1795 #ifdef ASSERT
1796 reloc.print_current_on(tty);
1797 code_blob->print_on(tty);
1798 code_blob->print_code_on(tty);
1799 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1800 #endif
1801 } else {
1802 #ifdef ASSERT
1803 reloc.print_current_on(tty);
1804 code_blob->print_on(tty);
1805 code_blob->print_code_on(tty);
1806 os::find(addr, tty);
1807 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1808 #endif
1809 }
1810 } else {
1811 return _extrs_base + id;
1812 }
1813 }
1814 }
1815 return id;
1816 }
1817
1818 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
1819
1820 void AOTRuntimeConstants::initialize_from_runtime() {
1821 BarrierSet* bs = BarrierSet::barrier_set();
1822 address card_table_base = nullptr;
1823 uint grain_shift = 0;
1824 #if INCLUDE_G1GC
1825 if (bs->is_a(BarrierSet::G1BarrierSet)) {
1826 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
1827 } else
1828 #endif
1829 #if INCLUDE_SHENANDOAHGC
1830 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
1831 grain_shift = 0;
1832 } else
1833 #endif
1834 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
1835 CardTable::CardValue* base = ci_card_table_address_const();
1836 assert(base != nullptr, "unexpected byte_map_base");
1837 card_table_base = base;
1838 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
1839 grain_shift = ctbs->grain_shift();
1840 }
1841 _aot_runtime_constants._card_table_base = card_table_base;
1842 _aot_runtime_constants._grain_shift = grain_shift;
1843 }
1844
1845 address AOTRuntimeConstants::_field_addresses_list[] = {
1846 ((address)&_aot_runtime_constants._card_table_base),
1847 ((address)&_aot_runtime_constants._grain_shift),
1848 nullptr
1849 };
1850
1851 address AOTRuntimeConstants::card_table_base_address() {
1852 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
1853 return (address)&_aot_runtime_constants._card_table_base;
1854 }
1855
1856 // This is called after initialize() but before init2()
1857 // and _cache is not set yet.
1858 void AOTCodeCache::print_on(outputStream* st) {
1859 if (opened_cache != nullptr && opened_cache->for_use()) {
1860 st->print_cr("\nAOT Code Cache");
1861 uint count = opened_cache->_load_header->entries_count();
1862 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
1863 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1864
1865 for (uint i = 0; i < count; i++) {
1866 // Use search_entries[] to order ouput
1867 int index = search_entries[2*i + 1];
1868 AOTCodeEntry* entry = &(load_entries[index]);
1869
1870 uint entry_position = entry->offset();
1871 uint name_offset = entry->name_offset() + entry_position;
1872 const char* saved_name = opened_cache->addr(name_offset);
1873
1874 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
1875 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
1876 }
1877 }
1878 }