1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciUtilities.hpp"
33 #include "classfile/javaAssertions.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/gcConfig.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/memoryReserver.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/flags/flagSetting.hpp"
42 #include "runtime/globals_extension.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/os.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubInfo.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "utilities/copy.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56 #if INCLUDE_G1GC
57 #include "gc/g1/g1BarrierSetRuntime.hpp"
58 #include "gc/g1/g1HeapRegion.hpp"
59 #endif
60 #if INCLUDE_SHENANDOAHGC
61 #include "gc/shenandoah/shenandoahRuntime.hpp"
62 #endif
63 #if INCLUDE_ZGC
64 #include "gc/z/zBarrierSetRuntime.hpp"
65 #endif
66
67 #include <errno.h>
68 #include <sys/stat.h>
69
70 const char* aot_code_entry_kind_name[] = {
71 #define DECL_KIND_STRING(kind) XSTR(kind),
72 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
73 #undef DECL_KIND_STRING
74 };
75
76 static void report_load_failure() {
77 if (AbortVMOnAOTCodeFailure) {
78 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
79 }
80 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
81 AOTCodeCache::disable_caching();
82 }
83
84 static void report_store_failure() {
85 if (AbortVMOnAOTCodeFailure) {
86 tty->print_cr("Unable to create AOT Code Cache.");
87 vm_abort(false);
88 }
89 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
90 AOTCodeCache::disable_caching();
91 }
92
93 // The sequence of AOT code caching flags and parametters settings.
94 //
95 // 1. The initial AOT code caching flags setting is done
96 // during call to CDSConfig::check_vm_args_consistency().
97 //
98 // 2. The earliest AOT code state check done in compilationPolicy_init()
99 // where we set number of compiler threads for AOT assembly phase.
100 //
101 // 3. We determine presence of AOT code in AOT Cache in
102 // AOTMetaspace::open_static_archive() which is calles
103 // after compilationPolicy_init() but before codeCache_init().
104 //
105 // 4. AOTCodeCache::initialize() is called during universe_init()
106 // and does final AOT state and flags settings.
107 //
108 // 5. Finally AOTCodeCache::init2() is called after universe_init()
109 // when all GC settings are finalized.
110
111 // Next methods determine which action we do with AOT code depending
112 // on phase of AOT process: assembly or production.
113
114 bool AOTCodeCache::is_dumping_adapter() {
115 return AOTAdapterCaching && is_on_for_dump();
116 }
117
118 bool AOTCodeCache::is_using_adapter() {
119 return AOTAdapterCaching && is_on_for_use();
120 }
121
122 bool AOTCodeCache::is_dumping_stub() {
123 return AOTStubCaching && is_on_for_dump();
124 }
125
126 bool AOTCodeCache::is_using_stub() {
127 return AOTStubCaching && is_on_for_use();
128 }
129
130 // Next methods could be called regardless AOT code cache status.
131 // Initially they are called during flags parsing and finilized
132 // in AOTCodeCache::initialize().
133 void AOTCodeCache::enable_caching() {
134 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
135 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
136 }
137
138 void AOTCodeCache::disable_caching() {
139 FLAG_SET_ERGO(AOTStubCaching, false);
140 FLAG_SET_ERGO(AOTAdapterCaching, false);
141 }
142
143 bool AOTCodeCache::is_caching_enabled() {
144 return AOTStubCaching || AOTAdapterCaching;
145 }
146
147 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
148 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
149 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
150 // becasue both id and kind are used to find an entry, and that combination should be unique
151 if (kind == AOTCodeEntry::Adapter) {
152 return id;
153 } else if (kind == AOTCodeEntry::SharedBlob) {
154 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
155 return id;
156 } else if (kind == AOTCodeEntry::C1Blob) {
157 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
158 return id;
159 } else {
160 // kind must be AOTCodeEntry::C2Blob
161 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
162 return id;
163 }
164 }
165
166 static uint _max_aot_code_size = 0;
167 uint AOTCodeCache::max_aot_code_size() {
168 return _max_aot_code_size;
169 }
170
171 // It is called from AOTMetaspace::initialize_shared_spaces()
172 // which is called from universe_init().
173 // At this point all AOT class linking seetings are finilized
174 // and AOT cache is open so we can map AOT code region.
175 void AOTCodeCache::initialize() {
176 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
177 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
178 disable_caching();
179 return;
180 #else
181 if (FLAG_IS_DEFAULT(AOTCache)) {
182 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
183 disable_caching();
184 return; // AOTCache must be specified to dump and use AOT code
185 }
186
187 // Disable stubs caching until JDK-8357398 is fixed.
188 FLAG_SET_ERGO(AOTStubCaching, false);
189
190 if (VerifyOops) {
191 // Disable AOT stubs caching when VerifyOops flag is on.
192 // Verify oops code generated a lot of C strings which overflow
193 // AOT C string table (which has fixed size).
194 // AOT C string table will be reworked later to handle such cases.
195 //
196 // Note: AOT adapters are not affected - they don't have oop operations.
197 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
198 FLAG_SET_ERGO(AOTStubCaching, false);
199 }
200
201 bool is_dumping = false;
202 bool is_using = false;
203 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
204 is_dumping = true;
205 enable_caching();
206 is_dumping = is_caching_enabled();
207 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
208 enable_caching();
209 is_using = is_caching_enabled();
210 } else {
211 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
212 disable_caching();
213 return; // nothing to do
214 }
215 if (!(is_dumping || is_using)) {
216 disable_caching();
217 return; // AOT code caching disabled on command line
218 }
219 _max_aot_code_size = AOTCodeMaxSize;
220 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
221 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
222 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
223 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
224 }
225 }
226 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
227 if (is_using && aot_code_size == 0) {
228 log_info(aot, codecache, init)("AOT Code Cache is empty");
229 disable_caching();
230 return;
231 }
232 if (!open_cache(is_dumping, is_using)) {
233 if (is_using) {
234 report_load_failure();
235 } else {
236 report_store_failure();
237 }
238 return;
239 }
240 if (is_dumping) {
241 FLAG_SET_DEFAULT(ForceUnreachable, true);
242 }
243 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
244 #endif // defined(AMD64) || defined(AARCH64)
245 }
246
247 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
248 AOTCodeCache* AOTCodeCache::_cache = nullptr;
249 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
250
251 // It is called after universe_init() when all GC settings are finalized.
252 void AOTCodeCache::init2() {
253 DEBUG_ONLY( _passed_init2 = true; )
254 if (opened_cache == nullptr) {
255 return;
256 }
257 if (!opened_cache->verify_config()) {
258 delete opened_cache;
259 opened_cache = nullptr;
260 report_load_failure();
261 return;
262 }
263
264 // initialize aot runtime constants as appropriate to this runtime
265 AOTRuntimeConstants::initialize_from_runtime();
266
267 // initialize the table of external routines so we can save
268 // generated code blobs that reference them
269 AOTCodeAddressTable* table = opened_cache->_table;
270 assert(table != nullptr, "should be initialized already");
271 table->init_extrs();
272
273 // Now cache and address table are ready for AOT code generation
274 _cache = opened_cache;
275 }
276
277 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
278 opened_cache = new AOTCodeCache(is_dumping, is_using);
279 if (opened_cache->failed()) {
280 delete opened_cache;
281 opened_cache = nullptr;
282 return false;
283 }
284 return true;
285 }
286
287 void AOTCodeCache::close() {
288 if (is_on()) {
289 delete _cache; // Free memory
290 _cache = nullptr;
291 opened_cache = nullptr;
292 }
293 }
294
295 #define DATA_ALIGNMENT HeapWordSize
296
297 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
298 _load_header(nullptr),
299 _load_buffer(nullptr),
300 _store_buffer(nullptr),
301 _C_store_buffer(nullptr),
302 _write_position(0),
303 _load_size(0),
304 _store_size(0),
305 _for_use(is_using),
306 _for_dump(is_dumping),
307 _closing(false),
308 _failed(false),
309 _lookup_failed(false),
310 _table(nullptr),
311 _load_entries(nullptr),
312 _search_entries(nullptr),
313 _store_entries(nullptr),
314 _C_strings_buf(nullptr),
315 _store_entries_cnt(0)
316 {
317 // Read header at the begining of cache
318 if (_for_use) {
319 // Read cache
320 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
321 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
322 if (!rs.is_reserved()) {
323 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
324 set_failed();
325 return;
326 }
327 if (!AOTCacheAccess::map_aot_code_region(rs)) {
328 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
329 set_failed();
330 return;
331 }
332
333 _load_size = (uint)load_size;
334 _load_buffer = (char*)rs.base();
335 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
336 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
337
338 _load_header = (Header*)addr(0);
339 if (!_load_header->verify(_load_size)) {
340 set_failed();
341 return;
342 }
343 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
344 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
345 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
346 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
347 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
348 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
349
350 // Read strings
351 load_strings();
352 }
353 if (_for_dump) {
354 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
355 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
356 // Entries allocated at the end of buffer in reverse (as on stack).
357 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
358 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
359 }
360 _table = new AOTCodeAddressTable();
361 }
362
363 void AOTCodeCache::init_early_stubs_table() {
364 AOTCodeAddressTable* table = addr_table();
365 if (table != nullptr) {
366 table->init_early_stubs();
367 }
368 }
369
370 void AOTCodeCache::init_shared_blobs_table() {
371 AOTCodeAddressTable* table = addr_table();
372 if (table != nullptr) {
373 table->init_shared_blobs();
374 }
375 }
376
377 void AOTCodeCache::init_early_c1_table() {
378 AOTCodeAddressTable* table = addr_table();
379 if (table != nullptr) {
380 table->init_early_c1();
381 }
382 }
383
384 AOTCodeCache::~AOTCodeCache() {
385 if (_closing) {
386 return; // Already closed
387 }
388 // Stop any further access to cache.
389 _closing = true;
390
391 MutexLocker ml(Compile_lock);
392 if (for_dump()) { // Finalize cache
393 finish_write();
394 }
395 _load_buffer = nullptr;
396 if (_C_store_buffer != nullptr) {
397 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
398 _C_store_buffer = nullptr;
399 _store_buffer = nullptr;
400 }
401 if (_table != nullptr) {
402 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
403 delete _table;
404 _table = nullptr;
405 }
406 }
407
408 void AOTCodeCache::Config::record(uint cpu_features_offset) {
409 _flags = 0;
410 #ifdef ASSERT
411 _flags |= debugVM;
412 #endif
413 if (UseCompressedOops) {
414 _flags |= compressedOops;
415 }
416 if (UseCompressedClassPointers) {
417 _flags |= compressedClassPointers;
418 }
419 if (UseTLAB) {
420 _flags |= useTLAB;
421 }
422 if (JavaAssertions::systemClassDefault()) {
423 _flags |= systemClassAssertions;
424 }
425 if (JavaAssertions::userClassDefault()) {
426 _flags |= userClassAssertions;
427 }
428 if (EnableContended) {
429 _flags |= enableContendedPadding;
430 }
431 if (RestrictContended) {
432 _flags |= restrictContendedPadding;
433 }
434 _compressedOopShift = CompressedOops::shift();
435 _compressedOopBase = CompressedOops::base();
436 _compressedKlassShift = CompressedKlassPointers::shift();
437 _contendedPaddingWidth = ContendedPaddingWidth;
438 _gc = (uint)Universe::heap()->kind();
439 _cpu_features_offset = cpu_features_offset;
440 }
441
442 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
443 LogStreamHandle(Debug, aot, codecache, init) log;
444 uint offset = _cpu_features_offset;
445 uint cpu_features_size = *(uint *)cache->addr(offset);
446 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
447 offset += sizeof(uint);
448
449 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
450 if (log.is_enabled()) {
451 ResourceMark rm; // required for stringStream::as_string()
452 stringStream ss;
453 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
454 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
455 }
456
457 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
458 if (log.is_enabled()) {
459 ResourceMark rm; // required for stringStream::as_string()
460 stringStream ss;
461 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
462 VM_Version::store_cpu_features(runtime_cpu_features);
463 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
464 if (!ss.is_empty()) {
465 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
466 }
467 }
468 } else {
469 if (log.is_enabled()) {
470 ResourceMark rm; // required for stringStream::as_string()
471 stringStream ss;
472 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
473 VM_Version::store_cpu_features(runtime_cpu_features);
474 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
475 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
476 }
477 return false;
478 }
479 return true;
480 }
481
482 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
483 // First checks affect all cached AOT code
484 #ifdef ASSERT
485 if ((_flags & debugVM) == 0) {
486 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
487 return false;
488 }
489 #else
490 if ((_flags & debugVM) != 0) {
491 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
492 return false;
493 }
494 #endif
495
496 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
497 if (aot_gc != Universe::heap()->kind()) {
498 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
499 return false;
500 }
501
502 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
503 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
504 return false;
505 }
506 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
507 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
508 return false;
509 }
510
511 // The following checks do not affect AOT code, but can disable
512 // AOT stub/adapters caching if they are incompatible with runtime settings
513 // (adapters too as they access oops when buffering scalarized value objects).
514
515 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
516 log_debug(aot, codecache, init)("AOT Stub/Adapter Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
517 AOTStubCaching = false;
518 if (InlineTypePassFieldsAsArgs) {
519 AOTAdapterCaching = false;
520 }
521 }
522 if (_compressedOopShift != (uint)CompressedOops::shift()) {
523 log_debug(aot, codecache, init)("AOT Stub/Adapter Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
524 AOTStubCaching = false;
525 if (InlineTypePassFieldsAsArgs) {
526 AOTAdapterCaching = false;
527 }
528 }
529
530 // This should be the last check as it only disables AOTStub/AdapterCaching
531 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
532 log_debug(aot, codecache, init)("AOT Stub/Adapter Cache disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
533 AOTStubCaching = false;
534 if (InlineTypePassFieldsAsArgs) {
535 AOTAdapterCaching = false;
536 }
537 }
538
539 if (!verify_cpu_features(cache)) {
540 return false;
541 }
542 return true;
543 }
544
545 bool AOTCodeCache::Header::verify(uint load_size) const {
546 if (_version != AOT_CODE_VERSION) {
547 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
548 return false;
549 }
550 if (load_size < _cache_size) {
551 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
552 return false;
553 }
554 return true;
555 }
556
557 AOTCodeCache* AOTCodeCache::open_for_use() {
558 if (AOTCodeCache::is_on_for_use()) {
559 return AOTCodeCache::cache();
560 }
561 return nullptr;
562 }
563
564 AOTCodeCache* AOTCodeCache::open_for_dump() {
565 if (AOTCodeCache::is_on_for_dump()) {
566 AOTCodeCache* cache = AOTCodeCache::cache();
567 cache->clear_lookup_failed(); // Reset bit
568 return cache;
569 }
570 return nullptr;
571 }
572
573 void copy_bytes(const char* from, address to, uint size) {
574 assert((int)size > 0, "sanity");
575 memcpy(to, from, size);
576 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
577 }
578
579 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
580 _cache = cache;
581 _entry = entry;
582 _load_buffer = cache->cache_buffer();
583 _read_position = 0;
584 _lookup_failed = false;
585 }
586
587 void AOTCodeReader::set_read_position(uint pos) {
588 if (pos == _read_position) {
589 return;
590 }
591 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
592 _read_position = pos;
593 }
594
595 bool AOTCodeCache::set_write_position(uint pos) {
596 if (pos == _write_position) {
597 return true;
598 }
599 if (_store_size < _write_position) {
600 _store_size = _write_position; // Adjust during write
601 }
602 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
603 _write_position = pos;
604 return true;
605 }
606
607 static char align_buffer[256] = { 0 };
608
609 bool AOTCodeCache::align_write() {
610 // We are not executing code from cache - we copy it by bytes first.
611 // No need for big alignment (or at all).
612 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
613 if (padding == DATA_ALIGNMENT) {
614 return true;
615 }
616 uint n = write_bytes((const void*)&align_buffer, padding);
617 if (n != padding) {
618 return false;
619 }
620 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
621 return true;
622 }
623
624 // Check to see if AOT code cache has required space to store "nbytes" of data
625 address AOTCodeCache::reserve_bytes(uint nbytes) {
626 assert(for_dump(), "Code Cache file is not created");
627 uint new_position = _write_position + nbytes;
628 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
629 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
630 nbytes, _write_position);
631 set_failed();
632 report_store_failure();
633 return nullptr;
634 }
635 address buffer = (address)(_store_buffer + _write_position);
636 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
637 _write_position += nbytes;
638 if (_store_size < _write_position) {
639 _store_size = _write_position;
640 }
641 return buffer;
642 }
643
644 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
645 assert(for_dump(), "Code Cache file is not created");
646 if (nbytes == 0) {
647 return 0;
648 }
649 uint new_position = _write_position + nbytes;
650 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
651 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
652 nbytes, _write_position);
653 set_failed();
654 report_store_failure();
655 return 0;
656 }
657 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
658 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
659 _write_position += nbytes;
660 if (_store_size < _write_position) {
661 _store_size = _write_position;
662 }
663 return nbytes;
664 }
665
666 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
667 return (void*)(cache->add_entry());
668 }
669
670 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
671 if (entry->kind() == kind) {
672 assert(entry->id() == id, "sanity");
673 return true; // Found
674 }
675 return false;
676 }
677
678 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
679 assert(_for_use, "sanity");
680 uint count = _load_header->entries_count();
681 if (_load_entries == nullptr) {
682 // Read it
683 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
684 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
685 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
686 }
687 // Binary search
688 int l = 0;
689 int h = count - 1;
690 while (l <= h) {
691 int mid = (l + h) >> 1;
692 int ix = mid * 2;
693 uint is = _search_entries[ix];
694 if (is == id) {
695 int index = _search_entries[ix + 1];
696 AOTCodeEntry* entry = &(_load_entries[index]);
697 if (check_entry(kind, id, entry)) {
698 return entry; // Found
699 }
700 // Linear search around to handle id collission
701 for (int i = mid - 1; i >= l; i--) { // search back
702 ix = i * 2;
703 is = _search_entries[ix];
704 if (is != id) {
705 break;
706 }
707 index = _search_entries[ix + 1];
708 AOTCodeEntry* entry = &(_load_entries[index]);
709 if (check_entry(kind, id, entry)) {
710 return entry; // Found
711 }
712 }
713 for (int i = mid + 1; i <= h; i++) { // search forward
714 ix = i * 2;
715 is = _search_entries[ix];
716 if (is != id) {
717 break;
718 }
719 index = _search_entries[ix + 1];
720 AOTCodeEntry* entry = &(_load_entries[index]);
721 if (check_entry(kind, id, entry)) {
722 return entry; // Found
723 }
724 }
725 break; // Not found match
726 } else if (is < id) {
727 l = mid + 1;
728 } else {
729 h = mid - 1;
730 }
731 }
732 return nullptr;
733 }
734
735 extern "C" {
736 static int uint_cmp(const void *i, const void *j) {
737 uint a = *(uint *)i;
738 uint b = *(uint *)j;
739 return a > b ? 1 : a < b ? -1 : 0;
740 }
741 }
742
743 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
744 uint* size_ptr = (uint *)buffer;
745 *size_ptr = buffer_size;
746 buffer += sizeof(uint);
747
748 VM_Version::store_cpu_features(buffer);
749 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
750 buffer += buffer_size;
751 buffer = align_up(buffer, DATA_ALIGNMENT);
752 }
753
754 bool AOTCodeCache::finish_write() {
755 if (!align_write()) {
756 return false;
757 }
758 uint strings_offset = _write_position;
759 int strings_count = store_strings();
760 if (strings_count < 0) {
761 return false;
762 }
763 if (!align_write()) {
764 return false;
765 }
766 uint strings_size = _write_position - strings_offset;
767
768 uint entries_count = 0; // Number of entrant (useful) code entries
769 uint entries_offset = _write_position;
770
771 uint store_count = _store_entries_cnt;
772 if (store_count > 0) {
773 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
774 uint code_count = store_count;
775 uint search_count = code_count * 2;
776 uint search_size = search_count * sizeof(uint);
777 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
778 // _write_position includes size of code and strings
779 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
780 uint cpu_features_size = VM_Version::cpu_features_size();
781 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
782 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
783 align_up(total_cpu_features_size, DATA_ALIGNMENT);
784 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
785
786 // Allocate in AOT Cache buffer
787 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
788 char* start = align_up(buffer, DATA_ALIGNMENT);
789 char* current = start + header_size; // Skip header
790
791 uint cpu_features_offset = current - start;
792 store_cpu_features(current, cpu_features_size);
793 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
794 assert(current < start + total_size, "sanity check");
795
796 // Create ordered search table for entries [id, index];
797 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
798
799 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
800 uint adapters_count = 0;
801 uint shared_blobs_count = 0;
802 uint C1_blobs_count = 0;
803 uint C2_blobs_count = 0;
804 uint max_size = 0;
805 // AOTCodeEntry entries were allocated in reverse in store buffer.
806 // Process them in reverse order to cache first code first.
807 for (int i = store_count - 1; i >= 0; i--) {
808 entries_address[i].set_next(nullptr); // clear pointers before storing data
809 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
810 if (size > max_size) {
811 max_size = size;
812 }
813 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
814 entries_address[i].set_offset(current - start); // New offset
815 current += size;
816 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
817 if (n != sizeof(AOTCodeEntry)) {
818 FREE_C_HEAP_ARRAY(uint, search);
819 return false;
820 }
821 search[entries_count*2 + 0] = entries_address[i].id();
822 search[entries_count*2 + 1] = entries_count;
823 entries_count++;
824 AOTCodeEntry::Kind kind = entries_address[i].kind();
825 if (kind == AOTCodeEntry::Adapter) {
826 adapters_count++;
827 } else if (kind == AOTCodeEntry::SharedBlob) {
828 shared_blobs_count++;
829 } else if (kind == AOTCodeEntry::C1Blob) {
830 C1_blobs_count++;
831 } else if (kind == AOTCodeEntry::C2Blob) {
832 C2_blobs_count++;
833 }
834 }
835 if (entries_count == 0) {
836 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
837 FREE_C_HEAP_ARRAY(uint, search);
838 return true; // Nothing to write
839 }
840 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
841 // Write strings
842 if (strings_count > 0) {
843 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
844 strings_offset = (current - start); // New offset
845 current += strings_size;
846 }
847
848 uint new_entries_offset = (current - start); // New offset
849 // Sort and store search table
850 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
851 search_size = 2 * entries_count * sizeof(uint);
852 copy_bytes((const char*)search, (address)current, search_size);
853 FREE_C_HEAP_ARRAY(uint, search);
854 current += search_size;
855
856 // Write entries
857 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
858 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
859 current += entries_size;
860 uint size = (current - start);
861 assert(size <= total_size, "%d > %d", size , total_size);
862
863 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
864 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
865 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
866 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
867 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
868
869 // Finalize header
870 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
871 header->init(size, (uint)strings_count, strings_offset,
872 entries_count, new_entries_offset,
873 adapters_count, shared_blobs_count,
874 C1_blobs_count, C2_blobs_count, cpu_features_offset);
875
876 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
877 }
878 return true;
879 }
880
881 //------------------Store/Load AOT code ----------------------
882
883 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
884 AOTCodeCache* cache = open_for_dump();
885 if (cache == nullptr) {
886 return false;
887 }
888 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
889
890 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
891 return false;
892 }
893 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
894 return false;
895 }
896 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
897
898 #ifdef ASSERT
899 LogStreamHandle(Trace, aot, codecache, stubs) log;
900 if (log.is_enabled()) {
901 FlagSetting fs(PrintRelocations, true);
902 blob.print_on(&log);
903 }
904 #endif
905 // we need to take a lock to prevent race between compiler threads generating AOT code
906 // and the main thread generating adapter
907 MutexLocker ml(Compile_lock);
908 if (!is_on()) {
909 return false; // AOT code cache was already dumped and closed.
910 }
911 if (!cache->align_write()) {
912 return false;
913 }
914 uint entry_position = cache->_write_position;
915
916 // Write name
917 uint name_offset = cache->_write_position - entry_position;
918 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
919 uint n = cache->write_bytes(name, name_size);
920 if (n != name_size) {
921 return false;
922 }
923
924 // Write CodeBlob
925 if (!cache->align_write()) {
926 return false;
927 }
928 uint blob_offset = cache->_write_position - entry_position;
929 address archive_buffer = cache->reserve_bytes(blob.size());
930 if (archive_buffer == nullptr) {
931 return false;
932 }
933 CodeBlob::archive_blob(&blob, archive_buffer);
934
935 uint reloc_data_size = blob.relocation_size();
936 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
937 if (n != reloc_data_size) {
938 return false;
939 }
940
941 bool has_oop_maps = false;
942 if (blob.oop_maps() != nullptr) {
943 if (!cache->write_oop_map_set(blob)) {
944 return false;
945 }
946 has_oop_maps = true;
947 }
948
949 #ifndef PRODUCT
950 // Write asm remarks
951 if (!cache->write_asm_remarks(blob)) {
952 return false;
953 }
954 if (!cache->write_dbg_strings(blob)) {
955 return false;
956 }
957 #endif /* PRODUCT */
958
959 if (!cache->write_relocations(blob)) {
960 if (!cache->failed()) {
961 // We may miss an address in AOT table - skip this code blob.
962 cache->set_write_position(entry_position);
963 }
964 return false;
965 }
966
967 uint entry_size = cache->_write_position - entry_position;
968 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
969 entry_position, entry_size, name_offset, name_size,
970 blob_offset, has_oop_maps, blob.content_begin());
971 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
972 return true;
973 }
974
975 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
976 assert(AOTCodeEntry::is_blob(entry_kind),
977 "wrong entry kind for blob id %s", StubInfo::name(id));
978 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
979 }
980
981 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
982 AOTCodeCache* cache = open_for_use();
983 if (cache == nullptr) {
984 return nullptr;
985 }
986 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
987
988 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
989 return nullptr;
990 }
991 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
992 return nullptr;
993 }
994 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
995
996 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
997 if (entry == nullptr) {
998 return nullptr;
999 }
1000 AOTCodeReader reader(cache, entry);
1001 CodeBlob* blob = reader.compile_code_blob(name);
1002
1003 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1004 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1005 return blob;
1006 }
1007
1008 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1009 assert(AOTCodeEntry::is_blob(entry_kind),
1010 "wrong entry kind for blob id %s", StubInfo::name(id));
1011 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1012 }
1013
1014 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1015 uint entry_position = _entry->offset();
1016
1017 // Read name
1018 uint name_offset = entry_position + _entry->name_offset();
1019 uint name_size = _entry->name_size(); // Includes '/0'
1020 const char* stored_name = addr(name_offset);
1021
1022 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1023 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1024 stored_name, name);
1025 set_lookup_failed(); // Skip this blob
1026 return nullptr;
1027 }
1028
1029 // Read archived code blob
1030 uint offset = entry_position + _entry->blob_offset();
1031 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1032 offset += archived_blob->size();
1033
1034 address reloc_data = (address)addr(offset);
1035 offset += archived_blob->relocation_size();
1036 set_read_position(offset);
1037
1038 ImmutableOopMapSet* oop_maps = nullptr;
1039 if (_entry->has_oop_maps()) {
1040 oop_maps = read_oop_map_set();
1041 }
1042
1043 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1044 stored_name,
1045 reloc_data,
1046 oop_maps
1047 );
1048 if (code_blob == nullptr) { // no space left in CodeCache
1049 return nullptr;
1050 }
1051
1052 #ifndef PRODUCT
1053 code_blob->asm_remarks().init();
1054 read_asm_remarks(code_blob->asm_remarks());
1055 code_blob->dbg_strings().init();
1056 read_dbg_strings(code_blob->dbg_strings());
1057 #endif // PRODUCT
1058
1059 fix_relocations(code_blob);
1060
1061 #ifdef ASSERT
1062 LogStreamHandle(Trace, aot, codecache, stubs) log;
1063 if (log.is_enabled()) {
1064 FlagSetting fs(PrintRelocations, true);
1065 code_blob->print_on(&log);
1066 }
1067 #endif
1068 return code_blob;
1069 }
1070
1071 // ------------ process code and data --------------
1072
1073 // Can't use -1. It is valid value for jump to iteself destination
1074 // used by static call stub: see NativeJump::jump_destination().
1075 #define BAD_ADDRESS_ID -2
1076
1077 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
1078 GrowableArray<uint> reloc_data;
1079 RelocIterator iter(&code_blob);
1080 LogStreamHandle(Trace, aot, codecache, reloc) log;
1081 while (iter.next()) {
1082 int idx = reloc_data.append(0); // default value
1083 switch (iter.type()) {
1084 case relocInfo::none:
1085 break;
1086 case relocInfo::runtime_call_type: {
1087 // Record offset of runtime destination
1088 CallRelocation* r = (CallRelocation*)iter.reloc();
1089 address dest = r->destination();
1090 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1091 dest = (address)-1; // do nothing in this case when loading this relocation
1092 }
1093 int id = _table->id_for_address(dest, iter, &code_blob);
1094 if (id == BAD_ADDRESS_ID) {
1095 return false;
1096 }
1097 reloc_data.at_put(idx, id);
1098 break;
1099 }
1100 case relocInfo::runtime_call_w_cp_type:
1101 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1102 return false;
1103 case relocInfo::external_word_type: {
1104 // Record offset of runtime target
1105 address target = ((external_word_Relocation*)iter.reloc())->target();
1106 int id = _table->id_for_address(target, iter, &code_blob);
1107 if (id == BAD_ADDRESS_ID) {
1108 return false;
1109 }
1110 reloc_data.at_put(idx, id);
1111 break;
1112 }
1113 case relocInfo::internal_word_type:
1114 break;
1115 case relocInfo::section_word_type:
1116 break;
1117 case relocInfo::post_call_nop_type:
1118 break;
1119 default:
1120 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1121 return false;
1122 break;
1123 }
1124 if (log.is_enabled()) {
1125 iter.print_current_on(&log);
1126 }
1127 }
1128
1129 // Write additional relocation data: uint per relocation
1130 // Write the count first
1131 int count = reloc_data.length();
1132 write_bytes(&count, sizeof(int));
1133 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1134 iter != reloc_data.end(); ++iter) {
1135 uint value = *iter;
1136 int n = write_bytes(&value, sizeof(uint));
1137 if (n != sizeof(uint)) {
1138 return false;
1139 }
1140 }
1141 return true;
1142 }
1143
1144 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1145 LogStreamHandle(Trace, aot, reloc) log;
1146 uint offset = read_position();
1147 int count = *(int*)addr(offset);
1148 offset += sizeof(int);
1149 if (log.is_enabled()) {
1150 log.print_cr("======== extra relocations count=%d", count);
1151 }
1152 uint* reloc_data = (uint*)addr(offset);
1153 offset += (count * sizeof(uint));
1154 set_read_position(offset);
1155
1156 RelocIterator iter(code_blob);
1157 int j = 0;
1158 while (iter.next()) {
1159 switch (iter.type()) {
1160 case relocInfo::none:
1161 break;
1162 case relocInfo::runtime_call_type: {
1163 address dest = _cache->address_for_id(reloc_data[j]);
1164 if (dest != (address)-1) {
1165 ((CallRelocation*)iter.reloc())->set_destination(dest);
1166 }
1167 break;
1168 }
1169 case relocInfo::runtime_call_w_cp_type:
1170 // this relocation should not be in cache (see write_relocations)
1171 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1172 break;
1173 case relocInfo::external_word_type: {
1174 address target = _cache->address_for_id(reloc_data[j]);
1175 // Add external address to global table
1176 int index = ExternalsRecorder::find_index(target);
1177 // Update index in relocation
1178 Relocation::add_jint(iter.data(), index);
1179 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1180 assert(reloc->target() == target, "sanity");
1181 reloc->set_value(target); // Patch address in the code
1182 break;
1183 }
1184 case relocInfo::internal_word_type: {
1185 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1186 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1187 break;
1188 }
1189 case relocInfo::section_word_type: {
1190 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1191 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1192 break;
1193 }
1194 case relocInfo::post_call_nop_type:
1195 break;
1196 default:
1197 assert(false,"relocation %d unimplemented", (int)iter.type());
1198 break;
1199 }
1200 if (log.is_enabled()) {
1201 iter.print_current_on(&log);
1202 }
1203 j++;
1204 }
1205 assert(j == count, "sanity");
1206 }
1207
1208 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1209 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1210 int oopmaps_size = oopmaps->nr_of_bytes();
1211 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1212 return false;
1213 }
1214 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1215 if (n != (uint)oopmaps->nr_of_bytes()) {
1216 return false;
1217 }
1218 return true;
1219 }
1220
1221 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1222 uint offset = read_position();
1223 int size = *(int *)addr(offset);
1224 offset += sizeof(int);
1225 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1226 offset += size;
1227 set_read_position(offset);
1228 return oopmaps;
1229 }
1230
1231 #ifndef PRODUCT
1232 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1233 // Write asm remarks
1234 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1235 if (count_ptr == nullptr) {
1236 return false;
1237 }
1238 uint count = 0;
1239 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1240 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1241 uint n = write_bytes(&offset, sizeof(uint));
1242 if (n != sizeof(uint)) {
1243 return false;
1244 }
1245 const char* cstr = add_C_string(str);
1246 int id = _table->id_for_C_string((address)cstr);
1247 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1248 n = write_bytes(&id, sizeof(int));
1249 if (n != sizeof(int)) {
1250 return false;
1251 }
1252 count += 1;
1253 return true;
1254 });
1255 *count_ptr = count;
1256 return result;
1257 }
1258
1259 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1260 // Read asm remarks
1261 uint offset = read_position();
1262 uint count = *(uint *)addr(offset);
1263 offset += sizeof(uint);
1264 for (uint i = 0; i < count; i++) {
1265 uint remark_offset = *(uint *)addr(offset);
1266 offset += sizeof(uint);
1267 int remark_string_id = *(uint *)addr(offset);
1268 offset += sizeof(int);
1269 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1270 asm_remarks.insert(remark_offset, remark);
1271 }
1272 set_read_position(offset);
1273 }
1274
1275 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1276 // Write dbg strings
1277 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1278 if (count_ptr == nullptr) {
1279 return false;
1280 }
1281 uint count = 0;
1282 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1283 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1284 const char* cstr = add_C_string(str);
1285 int id = _table->id_for_C_string((address)cstr);
1286 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1287 uint n = write_bytes(&id, sizeof(int));
1288 if (n != sizeof(int)) {
1289 return false;
1290 }
1291 count += 1;
1292 return true;
1293 });
1294 *count_ptr = count;
1295 return result;
1296 }
1297
1298 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1299 // Read dbg strings
1300 uint offset = read_position();
1301 uint count = *(uint *)addr(offset);
1302 offset += sizeof(uint);
1303 for (uint i = 0; i < count; i++) {
1304 int string_id = *(uint *)addr(offset);
1305 offset += sizeof(int);
1306 const char* str = (const char*)_cache->address_for_C_string(string_id);
1307 dbg_strings.insert(str);
1308 }
1309 set_read_position(offset);
1310 }
1311 #endif // PRODUCT
1312
1313 //======================= AOTCodeAddressTable ===============
1314
1315 // address table ids for generated routines, external addresses and C
1316 // string addresses are partitioned into positive integer ranges
1317 // defined by the following positive base and max values
1318 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1319 // [_blobs_base, _blobs_base + _blobs_max -1],
1320 // ...
1321 // [_c_str_base, _c_str_base + _c_str_max -1],
1322
1323 #define _extrs_max 100
1324 #define _stubs_max 3
1325
1326 #define _shared_blobs_max 20
1327 #define _C1_blobs_max 10
1328 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1329 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1330
1331 #define _extrs_base 0
1332 #define _stubs_base (_extrs_base + _extrs_max)
1333 #define _shared_blobs_base (_stubs_base + _stubs_max)
1334 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1335 #define _blobs_end (_shared_blobs_base + _blobs_max)
1336
1337 #define SET_ADDRESS(type, addr) \
1338 { \
1339 type##_addr[type##_length++] = (address) (addr); \
1340 assert(type##_length <= type##_max, "increase size"); \
1341 }
1342
1343 static bool initializing_extrs = false;
1344
1345 void AOTCodeAddressTable::init_extrs() {
1346 if (_extrs_complete || initializing_extrs) return; // Done already
1347
1348 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1349
1350 initializing_extrs = true;
1351 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1352
1353 _extrs_length = 0;
1354
1355 // Record addresses of VM runtime methods
1356 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1357 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1358 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1359 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1360 SET_ADDRESS(_extrs, SharedRuntime::allocate_inline_types);
1361 #if defined(AARCH64) && !defined(ZERO)
1362 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1363 #endif
1364 {
1365 // Required by Shared blobs
1366 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1367 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1368 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1369 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1370 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1371 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1372 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1373 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1374 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1375 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1376 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1377 }
1378
1379 #ifdef COMPILER1
1380 {
1381 // Required by C1 blobs
1382 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1383 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1384 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1385 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1386 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1387 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1388 SET_ADDRESS(_extrs, Runtime1::new_instance);
1389 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1390 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1391 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1392 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1393 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1394 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1395 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1396 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1397 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1398 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1399 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1400 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1401 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1402 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1403 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1404 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1405 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1406 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1407 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1408 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1409 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1410 SET_ADDRESS(_extrs, Runtime1::new_null_free_array);
1411 SET_ADDRESS(_extrs, Runtime1::load_flat_array);
1412 SET_ADDRESS(_extrs, Runtime1::store_flat_array);
1413 SET_ADDRESS(_extrs, Runtime1::substitutability_check);
1414 SET_ADDRESS(_extrs, Runtime1::buffer_inline_args);
1415 SET_ADDRESS(_extrs, Runtime1::buffer_inline_args_no_receiver);
1416 SET_ADDRESS(_extrs, Runtime1::throw_identity_exception);
1417 SET_ADDRESS(_extrs, Runtime1::throw_illegal_monitor_state_exception);
1418 SET_ADDRESS(_extrs, Thread::current);
1419 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1420 #ifndef PRODUCT
1421 SET_ADDRESS(_extrs, os::breakpoint);
1422 #endif
1423 }
1424 #endif
1425
1426 #ifdef COMPILER2
1427 {
1428 // Required by C2 blobs
1429 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1430 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1431 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1432 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1433 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1434 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1435 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1436 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1437 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1438 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1439 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1440 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1441 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1442 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1443 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1444 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1445 SET_ADDRESS(_extrs, OptoRuntime::load_unknown_inline_C);
1446 SET_ADDRESS(_extrs, OptoRuntime::store_unknown_inline_C);
1447 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1448 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1449 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1450 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1451 #if defined(AARCH64)
1452 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1453 #endif // AARCH64
1454 }
1455 #endif // COMPILER2
1456
1457 #if INCLUDE_G1GC
1458 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1459 #endif
1460 #if INCLUDE_SHENANDOAHGC
1461 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1462 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1463 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1464 #endif
1465 #if INCLUDE_ZGC
1466 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1467 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1468 #if defined(AMD64)
1469 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1470 #endif
1471 #endif
1472 #ifndef ZERO
1473 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1474 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1475 #endif
1476 #endif // ZERO
1477
1478 if (UseCompressedOops) {
1479 SET_ADDRESS(_extrs, CompressedOops::base_addr());
1480 }
1481
1482 // addresses of fields in AOT runtime constants area
1483 address* p = AOTRuntimeConstants::field_addresses_list();
1484 while (*p != nullptr) {
1485 SET_ADDRESS(_extrs, *p++);
1486 }
1487
1488 _extrs_complete = true;
1489 log_debug(aot, codecache, init)("External addresses recorded");
1490 }
1491
1492 static bool initializing_early_stubs = false;
1493
1494 void AOTCodeAddressTable::init_early_stubs() {
1495 if (_complete || initializing_early_stubs) return; // Done already
1496 initializing_early_stubs = true;
1497 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1498 _stubs_length = 0;
1499 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1500
1501 {
1502 // Required by C1 blobs
1503 #if defined(AMD64) && !defined(ZERO)
1504 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1505 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1506 #endif // AMD64
1507 }
1508
1509 _early_stubs_complete = true;
1510 log_info(aot, codecache, init)("Early stubs recorded");
1511 }
1512
1513 static bool initializing_shared_blobs = false;
1514
1515 void AOTCodeAddressTable::init_shared_blobs() {
1516 if (_complete || initializing_shared_blobs) return; // Done already
1517 initializing_shared_blobs = true;
1518 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1519
1520 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1521 _shared_blobs_addr = blobs_addr;
1522 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1523
1524 _shared_blobs_length = 0;
1525 _C1_blobs_length = 0;
1526
1527 // clear the address table
1528 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1529
1530 // Record addresses of generated code blobs
1531 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1532 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1533 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1534 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1535 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1536 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1537 #if INCLUDE_JVMCI
1538 if (EnableJVMCI) {
1539 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1540 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1541 }
1542 #endif
1543
1544 _shared_blobs_complete = true;
1545 log_debug(aot, codecache, init)("Early shared blobs recorded");
1546 _complete = true;
1547 }
1548
1549 void AOTCodeAddressTable::init_early_c1() {
1550 #ifdef COMPILER1
1551 // Runtime1 Blobs
1552 StubId id = StubInfo::stub_base(StubGroup::C1);
1553 // include forward_exception in range we publish
1554 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1555 for (; id != limit; id = StubInfo::next(id)) {
1556 if (Runtime1::blob_for(id) == nullptr) {
1557 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1558 continue;
1559 }
1560 if (Runtime1::entry_for(id) == nullptr) {
1561 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1562 continue;
1563 }
1564 address entry = Runtime1::entry_for(id);
1565 SET_ADDRESS(_C1_blobs, entry);
1566 }
1567 #endif // COMPILER1
1568 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1569 _early_c1_complete = true;
1570 }
1571
1572 #undef SET_ADDRESS
1573
1574 AOTCodeAddressTable::~AOTCodeAddressTable() {
1575 if (_extrs_addr != nullptr) {
1576 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1577 }
1578 if (_stubs_addr != nullptr) {
1579 FREE_C_HEAP_ARRAY(address, _stubs_addr);
1580 }
1581 if (_shared_blobs_addr != nullptr) {
1582 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1583 }
1584 }
1585
1586 #ifdef PRODUCT
1587 #define MAX_STR_COUNT 200
1588 #else
1589 #define MAX_STR_COUNT 500
1590 #endif
1591 #define _c_str_max MAX_STR_COUNT
1592 static const int _c_str_base = _all_max;
1593
1594 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1595 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1596 static int _C_strings_count = 0;
1597 static int _C_strings_s[MAX_STR_COUNT] = {0};
1598 static int _C_strings_id[MAX_STR_COUNT] = {0};
1599 static int _C_strings_used = 0;
1600
1601 void AOTCodeCache::load_strings() {
1602 uint strings_count = _load_header->strings_count();
1603 if (strings_count == 0) {
1604 return;
1605 }
1606 uint strings_offset = _load_header->strings_offset();
1607 uint* string_lengths = (uint*)addr(strings_offset);
1608 strings_offset += (strings_count * sizeof(uint));
1609 uint strings_size = _load_header->entries_offset() - strings_offset;
1610 // We have to keep cached strings longer than _cache buffer
1611 // because they are refernced from compiled code which may
1612 // still be executed on VM exit after _cache is freed.
1613 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1614 memcpy(p, addr(strings_offset), strings_size);
1615 _C_strings_buf = p;
1616 assert(strings_count <= MAX_STR_COUNT, "sanity");
1617 for (uint i = 0; i < strings_count; i++) {
1618 _C_strings[i] = p;
1619 uint len = string_lengths[i];
1620 _C_strings_s[i] = i;
1621 _C_strings_id[i] = i;
1622 p += len;
1623 }
1624 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1625 _C_strings_count = strings_count;
1626 _C_strings_used = strings_count;
1627 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1628 }
1629
1630 int AOTCodeCache::store_strings() {
1631 if (_C_strings_used > 0) {
1632 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1633 uint offset = _write_position;
1634 uint length = 0;
1635 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1636 if (lengths == nullptr) {
1637 return -1;
1638 }
1639 for (int i = 0; i < _C_strings_used; i++) {
1640 const char* str = _C_strings[_C_strings_s[i]];
1641 uint len = (uint)strlen(str) + 1;
1642 length += len;
1643 assert(len < 1000, "big string: %s", str);
1644 lengths[i] = len;
1645 uint n = write_bytes(str, len);
1646 if (n != len) {
1647 return -1;
1648 }
1649 }
1650 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1651 _C_strings_used, length, offset);
1652 }
1653 return _C_strings_used;
1654 }
1655
1656 const char* AOTCodeCache::add_C_string(const char* str) {
1657 if (is_on_for_dump() && str != nullptr) {
1658 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1659 AOTCodeAddressTable* table = addr_table();
1660 if (table != nullptr) {
1661 return table->add_C_string(str);
1662 }
1663 }
1664 return str;
1665 }
1666
1667 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1668 if (_extrs_complete) {
1669 // Check previous strings address
1670 for (int i = 0; i < _C_strings_count; i++) {
1671 if (_C_strings_in[i] == str) {
1672 return _C_strings[i]; // Found previous one - return our duplicate
1673 } else if (strcmp(_C_strings[i], str) == 0) {
1674 return _C_strings[i];
1675 }
1676 }
1677 // Add new one
1678 if (_C_strings_count < MAX_STR_COUNT) {
1679 // Passed in string can be freed and used space become inaccessible.
1680 // Keep original address but duplicate string for future compare.
1681 _C_strings_id[_C_strings_count] = -1; // Init
1682 _C_strings_in[_C_strings_count] = str;
1683 const char* dup = os::strdup(str);
1684 _C_strings[_C_strings_count++] = dup;
1685 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1686 return dup;
1687 } else {
1688 assert(false, "Number of C strings >= MAX_STR_COUNT");
1689 }
1690 }
1691 return str;
1692 }
1693
1694 int AOTCodeAddressTable::id_for_C_string(address str) {
1695 if (str == nullptr) {
1696 return -1;
1697 }
1698 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1699 for (int i = 0; i < _C_strings_count; i++) {
1700 if (_C_strings[i] == (const char*)str) { // found
1701 int id = _C_strings_id[i];
1702 if (id >= 0) {
1703 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1704 return id; // Found recorded
1705 }
1706 // Not found in recorded, add new
1707 id = _C_strings_used++;
1708 _C_strings_s[id] = i;
1709 _C_strings_id[i] = id;
1710 return id;
1711 }
1712 }
1713 return -1;
1714 }
1715
1716 address AOTCodeAddressTable::address_for_C_string(int idx) {
1717 assert(idx < _C_strings_count, "sanity");
1718 return (address)_C_strings[idx];
1719 }
1720
1721 static int search_address(address addr, address* table, uint length) {
1722 for (int i = 0; i < (int)length; i++) {
1723 if (table[i] == addr) {
1724 return i;
1725 }
1726 }
1727 return BAD_ADDRESS_ID;
1728 }
1729
1730 address AOTCodeAddressTable::address_for_id(int idx) {
1731 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1732 if (idx == -1) {
1733 return (address)-1;
1734 }
1735 uint id = (uint)idx;
1736 // special case for symbols based relative to os::init
1737 if (id > (_c_str_base + _c_str_max)) {
1738 return (address)os::init + idx;
1739 }
1740 if (idx < 0) {
1741 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1742 return nullptr;
1743 }
1744 // no need to compare unsigned id against 0
1745 if (/* id >= _extrs_base && */ id < _extrs_length) {
1746 return _extrs_addr[id - _extrs_base];
1747 }
1748 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1749 return _stubs_addr[id - _stubs_base];
1750 }
1751 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1752 return _shared_blobs_addr[id - _shared_blobs_base];
1753 }
1754 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1755 return _C1_blobs_addr[id - _C1_blobs_base];
1756 }
1757 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1758 return address_for_C_string(id - _c_str_base);
1759 }
1760 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1761 return nullptr;
1762 }
1763
1764 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1765 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1766 int id = -1;
1767 if (addr == (address)-1) { // Static call stub has jump to itself
1768 return id;
1769 }
1770 // Check card_table_base address first since it can point to any address
1771 BarrierSet* bs = BarrierSet::barrier_set();
1772 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
1773 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
1774
1775 // Seach for C string
1776 id = id_for_C_string(addr);
1777 if (id >= 0) {
1778 return id + _c_str_base;
1779 }
1780 if (StubRoutines::contains(addr)) {
1781 // Search in stubs
1782 id = search_address(addr, _stubs_addr, _stubs_length);
1783 if (id < 0) {
1784 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1785 if (desc == nullptr) {
1786 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1787 }
1788 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1789 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1790 } else {
1791 return id + _stubs_base;
1792 }
1793 } else {
1794 CodeBlob* cb = CodeCache::find_blob(addr);
1795 if (cb != nullptr) {
1796 // Search in code blobs
1797 int id_base = _shared_blobs_base;
1798 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1799 if (id < 0) {
1800 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1801 } else {
1802 return id_base + id;
1803 }
1804 } else {
1805 // Search in runtime functions
1806 id = search_address(addr, _extrs_addr, _extrs_length);
1807 if (id < 0) {
1808 ResourceMark rm;
1809 const int buflen = 1024;
1810 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1811 int offset = 0;
1812 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1813 if (offset > 0) {
1814 // Could be address of C string
1815 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1816 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1817 p2i(addr), dist, (const char*)addr);
1818 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1819 return dist;
1820 }
1821 #ifdef ASSERT
1822 reloc.print_current_on(tty);
1823 code_blob->print_on(tty);
1824 code_blob->print_code_on(tty);
1825 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1826 #endif
1827 } else {
1828 #ifdef ASSERT
1829 reloc.print_current_on(tty);
1830 code_blob->print_on(tty);
1831 code_blob->print_code_on(tty);
1832 os::find(addr, tty);
1833 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1834 #endif
1835 }
1836 } else {
1837 return _extrs_base + id;
1838 }
1839 }
1840 }
1841 return id;
1842 }
1843
1844 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
1845
1846 void AOTRuntimeConstants::initialize_from_runtime() {
1847 BarrierSet* bs = BarrierSet::barrier_set();
1848 address card_table_base = nullptr;
1849 uint grain_shift = 0;
1850 #if INCLUDE_G1GC
1851 if (bs->is_a(BarrierSet::G1BarrierSet)) {
1852 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
1853 } else
1854 #endif
1855 #if INCLUDE_SHENANDOAHGC
1856 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
1857 grain_shift = 0;
1858 } else
1859 #endif
1860 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
1861 CardTable::CardValue* base = ci_card_table_address_const();
1862 assert(base != nullptr, "unexpected byte_map_base");
1863 card_table_base = base;
1864 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
1865 grain_shift = ctbs->grain_shift();
1866 }
1867 _aot_runtime_constants._card_table_base = card_table_base;
1868 _aot_runtime_constants._grain_shift = grain_shift;
1869 }
1870
1871 address AOTRuntimeConstants::_field_addresses_list[] = {
1872 ((address)&_aot_runtime_constants._card_table_base),
1873 ((address)&_aot_runtime_constants._grain_shift),
1874 nullptr
1875 };
1876
1877 address AOTRuntimeConstants::card_table_base_address() {
1878 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
1879 return (address)&_aot_runtime_constants._card_table_base;
1880 }
1881
1882 // This is called after initialize() but before init2()
1883 // and _cache is not set yet.
1884 void AOTCodeCache::print_on(outputStream* st) {
1885 if (opened_cache != nullptr && opened_cache->for_use()) {
1886 st->print_cr("\nAOT Code Cache");
1887 uint count = opened_cache->_load_header->entries_count();
1888 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
1889 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1890
1891 for (uint i = 0; i < count; i++) {
1892 // Use search_entries[] to order ouput
1893 int index = search_entries[2*i + 1];
1894 AOTCodeEntry* entry = &(load_entries[index]);
1895
1896 uint entry_position = entry->offset();
1897 uint name_offset = entry->name_offset() + entry_position;
1898 const char* saved_name = opened_cache->addr(name_offset);
1899
1900 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
1901 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
1902 }
1903 }
1904 }