1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciUtilities.hpp"
33 #include "classfile/javaAssertions.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "gc/shared/barrierSetNMethod.hpp"
38 #include "gc/shared/cardTableBarrierSet.hpp"
39 #include "gc/shared/gcConfig.hpp"
40 #include "logging/logStream.hpp"
41 #include "memory/memoryReserver.hpp"
42 #include "prims/jvmtiThreadState.hpp"
43 #include "prims/upcallLinker.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/flags/flagSetting.hpp"
46 #include "runtime/globals_extension.hpp"
47 #include "runtime/icache.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/mutexLocker.hpp"
50 #include "runtime/os.inline.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/stubInfo.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "utilities/copy.hpp"
55 #ifdef COMPILER1
56 #include "c1/c1_Runtime1.hpp"
57 #endif
58 #ifdef COMPILER2
59 #include "opto/runtime.hpp"
60 #endif
61 #if INCLUDE_G1GC
62 #include "gc/g1/g1BarrierSetRuntime.hpp"
63 #include "gc/g1/g1HeapRegion.hpp"
64 #endif
65 #if INCLUDE_SHENANDOAHGC
66 #include "gc/shenandoah/shenandoahRuntime.hpp"
67 #endif
68 #if INCLUDE_ZGC
69 #include "gc/z/zBarrierSetRuntime.hpp"
70 #endif
71
72 #include <errno.h>
73 #include <sys/stat.h>
74
75 const char* aot_code_entry_kind_name[] = {
76 #define DECL_KIND_STRING(kind) XSTR(kind),
77 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
78 #undef DECL_KIND_STRING
79 };
80
81 // Stream to printing AOTCodeCache loading failure.
82 // Print to error channel when -XX:AOTMode is set to "on"
83 static LogStream& load_failure_log() {
84 static LogStream err_stream(LogLevel::Error, LogTagSetMapping<LOG_TAGS(aot, codecache, init)>::tagset());
85 static LogStream dbg_stream(LogLevel::Debug, LogTagSetMapping<LOG_TAGS(aot, codecache, init)>::tagset());
86 if (RequireSharedSpaces) {
87 return err_stream;
88 } else {
89 return dbg_stream;
90 }
91 }
92
93 static void report_load_failure() {
94 if (AbortVMOnAOTCodeFailure) {
95 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
96 }
97 load_failure_log().print_cr("Unable to use AOT Code Cache.");
98 AOTCodeCache::disable_caching();
99 }
100
101 static void report_store_failure() {
102 if (AbortVMOnAOTCodeFailure) {
103 tty->print_cr("Unable to create AOT Code Cache.");
104 vm_abort(false);
105 }
106 log_error(aot, codecache, exit)("Unable to create AOT Code Cache.");
107 AOTCodeCache::disable_caching();
108 }
109
110 // The sequence of AOT code caching flags and parametters settings.
111 //
112 // 1. The initial AOT code caching flags setting is done
113 // during call to CDSConfig::check_vm_args_consistency().
114 //
115 // 2. The earliest AOT code state check done in compilationPolicy_init()
116 // where we set number of compiler threads for AOT assembly phase.
117 //
118 // 3. We determine presence of AOT code in AOT Cache in
119 // AOTMetaspace::open_static_archive() which is calles
120 // after compilationPolicy_init() but before codeCache_init().
121 //
122 // 4. AOTCodeCache::initialize() is called during universe_init()
123 // and does final AOT state and flags settings.
124 //
125 // 5. Finally AOTCodeCache::init2() is called after universe_init()
126 // when all GC settings are finalized.
127
128 // Next methods determine which action we do with AOT code depending
129 // on phase of AOT process: assembly or production.
130
131 bool AOTCodeCache::is_dumping_adapter() {
132 return AOTAdapterCaching && is_on_for_dump();
133 }
134
135 bool AOTCodeCache::is_using_adapter() {
136 return AOTAdapterCaching && is_on_for_use();
137 }
138
139 bool AOTCodeCache::is_dumping_stub() {
140 return AOTStubCaching && is_on_for_dump();
141 }
142
143 bool AOTCodeCache::is_using_stub() {
144 return AOTStubCaching && is_on_for_use();
145 }
146
147 // Next methods could be called regardless AOT code cache status.
148 // Initially they are called during flags parsing and finilized
149 // in AOTCodeCache::initialize().
150 void AOTCodeCache::enable_caching() {
151 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
152 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
153 }
154
155 void AOTCodeCache::disable_caching() {
156 FLAG_SET_ERGO(AOTStubCaching, false);
157 FLAG_SET_ERGO(AOTAdapterCaching, false);
158 }
159
160 bool AOTCodeCache::is_caching_enabled() {
161 return AOTStubCaching || AOTAdapterCaching;
162 }
163
164 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
165 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
166 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
167 // becasue both id and kind are used to find an entry, and that combination should be unique
168 if (kind == AOTCodeEntry::Adapter) {
169 return id;
170 } else if (kind == AOTCodeEntry::SharedBlob) {
171 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
172 return id;
173 } else if (kind == AOTCodeEntry::C1Blob) {
174 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
175 return id;
176 } else if (kind == AOTCodeEntry::C2Blob) {
177 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
178 return id;
179 } else {
180 // kind must be AOTCodeEntry::StubGenBlob
181 assert(StubInfo::is_stubgen(static_cast<BlobId>(id)), "not a stubgen blob id %d", id);
182 return id;
183 }
184 }
185
186 static uint _max_aot_code_size = 0;
187 uint AOTCodeCache::max_aot_code_size() {
188 return _max_aot_code_size;
189 }
190
191 // It is called from AOTMetaspace::initialize_shared_spaces()
192 // which is called from universe_init().
193 // At this point all AOT class linking seetings are finilized
194 // and AOT cache is open so we can map AOT code region.
195 void AOTCodeCache::initialize() {
196 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
197 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
198 disable_caching();
199 return;
200 #else
201 if (FLAG_IS_DEFAULT(AOTCache)) {
202 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
203 disable_caching();
204 return; // AOTCache must be specified to dump and use AOT code
205 }
206
207 if (VerifyOops) {
208 // Disable AOT stubs caching when VerifyOops flag is on.
209 // Verify oops code generated a lot of C strings which overflow
210 // AOT C string table (which has fixed size).
211 // AOT C string table will be reworked later to handle such cases.
212 //
213 // Note: AOT adapters are not affected - they don't have oop operations.
214 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
215 FLAG_SET_ERGO(AOTStubCaching, false);
216 }
217
218 bool is_dumping = false;
219 bool is_using = false;
220 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
221 is_dumping = true;
222 enable_caching();
223 is_dumping = is_caching_enabled();
224 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
225 enable_caching();
226 is_using = is_caching_enabled();
227 } else {
228 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
229 disable_caching();
230 return; // nothing to do
231 }
232 if (!(is_dumping || is_using)) {
233 disable_caching();
234 return; // AOT code caching disabled on command line
235 }
236 _max_aot_code_size = AOTCodeMaxSize;
237 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
238 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
239 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
240 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
241 }
242 }
243 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
244 if (is_using && aot_code_size == 0) {
245 log_info(aot, codecache, init)("AOT Code Cache is empty");
246 disable_caching();
247 return;
248 }
249 if (!open_cache(is_dumping, is_using)) {
250 if (is_using) {
251 report_load_failure();
252 } else {
253 report_store_failure();
254 }
255 return;
256 }
257 if (is_dumping) {
258 FLAG_SET_DEFAULT(ForceUnreachable, true);
259 }
260 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
261 #endif // defined(AMD64) || defined(AARCH64)
262 }
263
264 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
265 AOTCodeCache* AOTCodeCache::_cache = nullptr;
266 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
267
268 // It is called after universe_init() when all GC settings are finalized.
269 void AOTCodeCache::init2() {
270 DEBUG_ONLY( _passed_init2 = true; )
271 if (opened_cache == nullptr) {
272 return;
273 }
274 if (!opened_cache->verify_config()) {
275 delete opened_cache;
276 opened_cache = nullptr;
277 report_load_failure();
278 return;
279 }
280
281 // initialize aot runtime constants as appropriate to this runtime
282 AOTRuntimeConstants::initialize_from_runtime();
283
284 // initialize the table of external routines so we can save
285 // generated code blobs that reference them
286 AOTCodeAddressTable* table = opened_cache->_table;
287 assert(table != nullptr, "should be initialized already");
288 table->init_extrs();
289
290 // Now cache and address table are ready for AOT code generation
291 _cache = opened_cache;
292 }
293
294 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
295 opened_cache = new AOTCodeCache(is_dumping, is_using);
296 if (opened_cache->failed()) {
297 delete opened_cache;
298 opened_cache = nullptr;
299 return false;
300 }
301 return true;
302 }
303
304 // Called after continuations_init() when continuation stub callouts
305 // have been initialized
306 void AOTCodeCache::init3() {
307 if (opened_cache == nullptr) {
308 return;
309 }
310 // initialize external routines for continuations so we can save
311 // generated continuation blob that references them
312 AOTCodeAddressTable* table = opened_cache->_table;
313 assert(table != nullptr, "should be initialized already");
314 table->init_extrs2();
315 }
316
317 void AOTCodeCache::dump() {
318 if (is_on()) {
319 assert(is_on_for_dump(), "should be called only when dumping AOT code");
320 MutexLocker ml(Compile_lock);
321 _cache->finish_write();
322 }
323 }
324
325 #define DATA_ALIGNMENT HeapWordSize
326
327 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
328 _load_header(nullptr),
329 _load_buffer(nullptr),
330 _store_buffer(nullptr),
331 _C_store_buffer(nullptr),
332 _write_position(0),
333 _load_size(0),
334 _store_size(0),
335 _for_use(is_using),
336 _for_dump(is_dumping),
337 _failed(false),
338 _lookup_failed(false),
339 _table(nullptr),
340 _load_entries(nullptr),
341 _search_entries(nullptr),
342 _store_entries(nullptr),
343 _C_strings_buf(nullptr),
344 _store_entries_cnt(0)
345 {
346 // Read header at the begining of cache
347 if (_for_use) {
348 // Read cache
349 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
350 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
351 if (!rs.is_reserved()) {
352 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
353 set_failed();
354 return;
355 }
356 if (!AOTCacheAccess::map_aot_code_region(rs)) {
357 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
358 set_failed();
359 return;
360 }
361
362 _load_size = (uint)load_size;
363 _load_buffer = (char*)rs.base();
364 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
365 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
366
367 _load_header = (Header*)addr(0);
368 if (!_load_header->verify(_load_size)) {
369 set_failed();
370 return;
371 }
372 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
373 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
374 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
375 log_debug(aot, codecache, init)(" StubGen Blobs: total=%d", _load_header->stubgen_blobs_count());
376 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
377 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
378 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
379
380 // Read strings
381 load_strings();
382 }
383 if (_for_dump) {
384 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
385 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
386 // Entries allocated at the end of buffer in reverse (as on stack).
387 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
388 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
389 }
390 _table = new AOTCodeAddressTable();
391 }
392
393 void AOTCodeCache::add_stub_entries(StubId stub_id, address start, GrowableArray<address> *entries, int begin_idx) {
394 EntryId entry_id = StubInfo::entry_base(stub_id);
395 add_stub_entry(entry_id, start);
396 // skip past first entry
397 entry_id = StubInfo::next_in_stub(stub_id, entry_id);
398 // now check for any more entries
399 int count = StubInfo::entry_count(stub_id) - 1;
400 assert(start != nullptr, "invalid start address for stub %s", StubInfo::name(stub_id));
401 assert(entries == nullptr || begin_idx + count <= entries->length(), "sanity");
402 // write any extra entries
403 for (int i = 0; i < count; i++) {
404 assert(entry_id != EntryId::NO_ENTRYID, "not enough entries for stub %s", StubInfo::name(stub_id));
405 address a = entries->at(begin_idx + i);
406 add_stub_entry(entry_id, a);
407 entry_id = StubInfo::next_in_stub(stub_id, entry_id);
408 }
409 assert(entry_id == EntryId::NO_ENTRYID, "too many entries for stub %s", StubInfo::name(stub_id));
410 }
411
412 void AOTCodeCache::add_stub_entry(EntryId entry_id, address a) {
413 if (a != nullptr) {
414 if (_table != nullptr) {
415 log_trace(aot, codecache, stubs)("Publishing stub entry %s at address " INTPTR_FORMAT, StubInfo::name(entry_id), p2i(a));
416 return _table->add_stub_entry(entry_id, a);
417 }
418 }
419 }
420
421 void AOTCodeCache::set_shared_stubs_complete() {
422 AOTCodeAddressTable* table = addr_table();
423 if (table != nullptr) {
424 table->set_shared_stubs_complete();
425 }
426 }
427
428 void AOTCodeCache::set_c1_stubs_complete() {
429 AOTCodeAddressTable* table = addr_table();
430 if (table != nullptr) {
431 table->set_c1_stubs_complete();
432 }
433 }
434
435 void AOTCodeCache::set_c2_stubs_complete() {
436 AOTCodeAddressTable* table = addr_table();
437 if (table != nullptr) {
438 table->set_c2_stubs_complete();
439 }
440 }
441
442 void AOTCodeCache::set_stubgen_stubs_complete() {
443 AOTCodeAddressTable* table = addr_table();
444 if (table != nullptr) {
445 table->set_stubgen_stubs_complete();
446 }
447 }
448
449 void AOTCodeCache::Config::record(uint cpu_features_offset) {
450
451 #define AOTCODECACHE_SAVE_VAR(type, name) _saved_ ## name = name;
452 #define AOTCODECACHE_SAVE_FUN(type, name, fun) _saved_ ## name = fun;
453
454 AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_SAVE_VAR, AOTCODECACHE_SAVE_FUN);
455
456 // Special configs that cannot be checked with macros
457 _compressedOopBase = CompressedOops::base();
458
459 #if defined(X86) && !defined(ZERO)
460 _useUnalignedLoadStores = UseUnalignedLoadStores;
461 #endif
462
463 #if defined(AARCH64) && !defined(ZERO)
464 _avoidUnalignedAccesses = AvoidUnalignedAccesses;
465 #endif
466
467 _cpu_features_offset = cpu_features_offset;
468 }
469
470 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
471 LogStreamHandle(Debug, aot, codecache, init) log;
472 uint offset = _cpu_features_offset;
473 uint cpu_features_size = *(uint *)cache->addr(offset);
474 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
475 offset += sizeof(uint);
476
477 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
478 if (log.is_enabled()) {
479 ResourceMark rm; // required for stringStream::as_string()
480 stringStream ss;
481 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
482 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
483 }
484
485 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
486 if (log.is_enabled()) {
487 ResourceMark rm; // required for stringStream::as_string()
488 stringStream ss;
489 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
490 VM_Version::store_cpu_features(runtime_cpu_features);
491 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
492 if (!ss.is_empty()) {
493 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
494 }
495 }
496 } else {
497 if (load_failure_log().is_enabled()) {
498 ResourceMark rm; // required for stringStream::as_string()
499 stringStream ss;
500 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
501 VM_Version::store_cpu_features(runtime_cpu_features);
502 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
503 load_failure_log().print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
504 }
505 return false;
506 }
507 return true;
508 }
509
510 #define AOTCODECACHE_DISABLED_MSG "AOT Code Cache disabled: it was created with %s = "
511
512 // Special case, print "GC = ..." to be more understandable.
513 inline void log_config_mismatch(CollectedHeap::Name saved, CollectedHeap::Name current, const char* name/*unused*/) {
514 load_failure_log().print_cr("AOT Code Cache disabled: it was created with GC = \"%s\" vs current \"%s\"",
515 GCConfig::hs_err_name(saved), GCConfig::hs_err_name(current));
516 }
517
518 inline void log_config_mismatch(bool saved, bool current, const char* name) {
519 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%s vs current %s", name,
520 saved ? "true" : "false", current ? "true" : "false");
521 }
522
523 inline void log_config_mismatch(int saved, int current, const char* name) {
524 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%d vs current %d", name, saved, current);
525 }
526
527 inline void log_config_mismatch(uint saved, uint current, const char* name) {
528 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%u vs current %u", name, saved, current);
529 }
530
531 #ifdef _LP64
532 inline void log_config_mismatch(intx saved, intx current, const char* name) {
533 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zd vs current %zd", name, saved, current);
534 }
535
536 inline void log_config_mismatch(uintx saved, uintx current, const char* name) {
537 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zu vs current %zu", name, saved, current);
538 }
539 #endif
540
541 template <typename T>
542 bool check_config(T saved, T current, const char* name) {
543 if (saved != current) {
544 log_config_mismatch(saved, current, name);
545 return false;
546 } else {
547 return true;
548 }
549 }
550
551 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
552 // check CPU features before checking flags that may be
553 // auto-configured in response to them
554 if (!verify_cpu_features(cache)) {
555 return false;
556 }
557
558 // Tests for config options which might affect validity of adapters,
559 // stubs or nmethods. Currently we take a pessemistic stand and
560 // drop the whole cache if any of these are changed.
561
562 #define AOTCODECACHE_CHECK_VAR(type, name) \
563 if (!check_config(_saved_ ## name, name, #name)) { return false; }
564 #define AOTCODECACHE_CHECK_FUN(type, name, fun) \
565 if (!check_config(_saved_ ## name, fun, #fun)) { return false; }
566
567 AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_CHECK_VAR, AOTCODECACHE_CHECK_FUN);
568
569 // Special configs that cannot be checked with macros
570
571 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
572 load_failure_log().print_cr("AOT Code Cache disabled: incompatible CompressedOops::base(): %p vs current %p",
573 _compressedOopBase, CompressedOops::base());
574 return false;
575 }
576
577 #if defined(X86) && !defined(ZERO)
578 // switching off UseUnalignedLoadStores can affect validity of fill
579 // stubs
580 if (_useUnalignedLoadStores && !UseUnalignedLoadStores) {
581 log_config_mismatch(_useUnalignedLoadStores, UseUnalignedLoadStores, "UseUnalignedLoadStores");
582 return false;
583 }
584 #endif // defined(X86) && !defined(ZERO)
585
586 #if defined(AARCH64) && !defined(ZERO)
587 // switching on AvoidUnalignedAccesses may affect validity of array
588 // copy stubs and nmethods
589 if (!_avoidUnalignedAccesses && AvoidUnalignedAccesses) {
590 log_config_mismatch(_avoidUnalignedAccesses, AvoidUnalignedAccesses, "AvoidUnalignedAccesses");
591 return false;
592 }
593 #endif // defined(AARCH64) && !defined(ZERO)
594
595 return true;
596 }
597
598 bool AOTCodeCache::Header::verify(uint load_size) const {
599 if (_version != AOT_CODE_VERSION) {
600 load_failure_log().print_cr("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
601 return false;
602 }
603 if (load_size < _cache_size) {
604 load_failure_log().print_cr("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
605 return false;
606 }
607 return true;
608 }
609
610 AOTCodeCache* AOTCodeCache::open_for_use() {
611 if (AOTCodeCache::is_on_for_use()) {
612 return AOTCodeCache::cache();
613 }
614 return nullptr;
615 }
616
617 AOTCodeCache* AOTCodeCache::open_for_dump() {
618 if (AOTCodeCache::is_on_for_dump()) {
619 AOTCodeCache* cache = AOTCodeCache::cache();
620 cache->clear_lookup_failed(); // Reset bit
621 return cache;
622 }
623 return nullptr;
624 }
625
626 void copy_bytes(const char* from, address to, uint size) {
627 assert((int)size > 0, "sanity");
628 memcpy(to, from, size);
629 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
630 }
631
632 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
633 _cache = cache;
634 _entry = entry;
635 _load_buffer = cache->cache_buffer();
636 _read_position = 0;
637 _lookup_failed = false;
638 _name = nullptr;
639 _reloc_data = nullptr;
640 _reloc_count = 0;
641 _oop_maps = nullptr;
642 _entry_kind = AOTCodeEntry::None;
643 _stub_data = nullptr;
644 _id = -1;
645 }
646
647 void AOTCodeReader::set_read_position(uint pos) {
648 if (pos == _read_position) {
649 return;
650 }
651 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
652 _read_position = pos;
653 }
654
655 uint AOTCodeReader::align_read_int() {
656 return align_up(_read_position, sizeof(int));
657 }
658
659 bool AOTCodeCache::set_write_position(uint pos) {
660 if (pos == _write_position) {
661 return true;
662 }
663 if (_store_size < _write_position) {
664 _store_size = _write_position; // Adjust during write
665 }
666 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
667 _write_position = pos;
668 return true;
669 }
670
671 static char align_buffer[256] = { 0 };
672
673 bool AOTCodeCache::align_write_bytes(uint alignment) {
674 uint padding = alignment - (_write_position & (alignment - 1));
675 if (padding == alignment) {
676 return true;
677 }
678 uint n = write_bytes((const void*)&align_buffer, padding);
679 if (n != padding) {
680 return false;
681 }
682 log_trace(aot, codecache)("Adjust write alignment to %d bytes in AOT Code Cache", alignment);
683 return true;
684 }
685
686 bool AOTCodeCache::align_write() {
687 // We are not executing code from cache - we copy it by bytes first.
688 // No need for big alignment (or at all).
689 return align_write_bytes(DATA_ALIGNMENT);
690 }
691
692 bool AOTCodeCache::align_write_int() {
693 return align_write_bytes(sizeof(int));
694 }
695
696 // Check to see if AOT code cache has required space to store "nbytes" of data
697 address AOTCodeCache::reserve_bytes(uint nbytes) {
698 assert(for_dump(), "Code Cache file is not created");
699 uint new_position = _write_position + nbytes;
700 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
701 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
702 nbytes, _write_position);
703 set_failed();
704 report_store_failure();
705 return nullptr;
706 }
707 address buffer = (address)(_store_buffer + _write_position);
708 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
709 _write_position += nbytes;
710 if (_store_size < _write_position) {
711 _store_size = _write_position;
712 }
713 return buffer;
714 }
715
716 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
717 assert(for_dump(), "Code Cache file is not created");
718 if (nbytes == 0) {
719 return 0;
720 }
721 uint new_position = _write_position + nbytes;
722 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
723 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
724 nbytes, _write_position);
725 set_failed();
726 report_store_failure();
727 return 0;
728 }
729 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
730 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
731 _write_position += nbytes;
732 if (_store_size < _write_position) {
733 _store_size = _write_position;
734 }
735 return nbytes;
736 }
737
738 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
739 return (void*)(cache->add_entry());
740 }
741
742 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
743 if (entry->kind() == kind) {
744 assert(entry->id() == id, "sanity");
745 return true; // Found
746 }
747 return false;
748 }
749
750 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
751 assert(_for_use, "sanity");
752 uint count = _load_header->entries_count();
753 if (_load_entries == nullptr) {
754 // Read it
755 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
756 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
757 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
758 }
759 // Binary search
760 int l = 0;
761 int h = count - 1;
762 while (l <= h) {
763 int mid = (l + h) >> 1;
764 int ix = mid * 2;
765 uint is = _search_entries[ix];
766 if (is == id) {
767 int index = _search_entries[ix + 1];
768 AOTCodeEntry* entry = &(_load_entries[index]);
769 if (check_entry(kind, id, entry)) {
770 return entry; // Found
771 }
772 // Linear search around to handle id collission
773 for (int i = mid - 1; i >= l; i--) { // search back
774 ix = i * 2;
775 is = _search_entries[ix];
776 if (is != id) {
777 break;
778 }
779 index = _search_entries[ix + 1];
780 AOTCodeEntry* entry = &(_load_entries[index]);
781 if (check_entry(kind, id, entry)) {
782 return entry; // Found
783 }
784 }
785 for (int i = mid + 1; i <= h; i++) { // search forward
786 ix = i * 2;
787 is = _search_entries[ix];
788 if (is != id) {
789 break;
790 }
791 index = _search_entries[ix + 1];
792 AOTCodeEntry* entry = &(_load_entries[index]);
793 if (check_entry(kind, id, entry)) {
794 return entry; // Found
795 }
796 }
797 break; // Not found match
798 } else if (is < id) {
799 l = mid + 1;
800 } else {
801 h = mid - 1;
802 }
803 }
804 return nullptr;
805 }
806
807 extern "C" {
808 static int uint_cmp(const void *i, const void *j) {
809 uint a = *(uint *)i;
810 uint b = *(uint *)j;
811 return a > b ? 1 : a < b ? -1 : 0;
812 }
813 }
814
815 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
816 uint* size_ptr = (uint *)buffer;
817 *size_ptr = buffer_size;
818 buffer += sizeof(uint);
819
820 VM_Version::store_cpu_features(buffer);
821 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
822 buffer += buffer_size;
823 buffer = align_up(buffer, DATA_ALIGNMENT);
824 }
825
826 bool AOTCodeCache::finish_write() {
827 if (!align_write()) {
828 return false;
829 }
830 uint strings_offset = _write_position;
831 int strings_count = store_strings();
832 if (strings_count < 0) {
833 return false;
834 }
835 if (!align_write()) {
836 return false;
837 }
838 uint strings_size = _write_position - strings_offset;
839
840 uint entries_count = 0; // Number of entrant (useful) code entries
841 uint entries_offset = _write_position;
842
843 uint store_count = _store_entries_cnt;
844 if (store_count > 0) {
845 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
846 uint code_count = store_count;
847 uint search_count = code_count * 2;
848 uint search_size = search_count * sizeof(uint);
849 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
850 // _write_position includes size of code and strings
851 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
852 uint cpu_features_size = VM_Version::cpu_features_size();
853 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
854 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
855 align_up(total_cpu_features_size, DATA_ALIGNMENT);
856 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
857
858 // Allocate in AOT Cache buffer
859 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
860 char* start = align_up(buffer, DATA_ALIGNMENT);
861 char* current = start + header_size; // Skip header
862
863 uint cpu_features_offset = current - start;
864 store_cpu_features(current, cpu_features_size);
865 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
866 assert(current < start + total_size, "sanity check");
867
868 // Create ordered search table for entries [id, index];
869 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
870
871 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
872 uint adapters_count = 0;
873 uint shared_blobs_count = 0;
874 uint stubgen_blobs_count = 0;
875 uint C1_blobs_count = 0;
876 uint C2_blobs_count = 0;
877 uint max_size = 0;
878 // AOTCodeEntry entries were allocated in reverse in store buffer.
879 // Process them in reverse order to cache first code first.
880 for (int i = store_count - 1; i >= 0; i--) {
881 entries_address[i].set_next(nullptr); // clear pointers before storing data
882 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
883 if (size > max_size) {
884 max_size = size;
885 }
886 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
887 entries_address[i].set_offset(current - start); // New offset
888 current += size;
889 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
890 if (n != sizeof(AOTCodeEntry)) {
891 FREE_C_HEAP_ARRAY(search);
892 return false;
893 }
894 search[entries_count*2 + 0] = entries_address[i].id();
895 search[entries_count*2 + 1] = entries_count;
896 entries_count++;
897 AOTCodeEntry::Kind kind = entries_address[i].kind();
898 if (kind == AOTCodeEntry::Adapter) {
899 adapters_count++;
900 } else if (kind == AOTCodeEntry::SharedBlob) {
901 shared_blobs_count++;
902 } else if (kind == AOTCodeEntry::StubGenBlob) {
903 stubgen_blobs_count++;
904 } else if (kind == AOTCodeEntry::C1Blob) {
905 C1_blobs_count++;
906 } else if (kind == AOTCodeEntry::C2Blob) {
907 C2_blobs_count++;
908 }
909 }
910 if (entries_count == 0) {
911 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
912 FREE_C_HEAP_ARRAY(search);
913 return true; // Nothing to write
914 }
915 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
916 // Write strings
917 if (strings_count > 0) {
918 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
919 strings_offset = (current - start); // New offset
920 current += strings_size;
921 }
922
923 uint new_entries_offset = (current - start); // New offset
924 // Sort and store search table
925 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
926 search_size = 2 * entries_count * sizeof(uint);
927 copy_bytes((const char*)search, (address)current, search_size);
928 FREE_C_HEAP_ARRAY(search);
929 current += search_size;
930
931 // Write entries
932 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
933 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
934 current += entries_size;
935 uint size = (current - start);
936 assert(size <= total_size, "%d > %d", size , total_size);
937
938 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
939 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
940 log_debug(aot, codecache, exit)(" StubGen Blobs: total=%d", stubgen_blobs_count);
941 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
942 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
943 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
944
945 // Finalize header
946 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
947 header->init(size, (uint)strings_count, strings_offset,
948 entries_count, new_entries_offset,
949 adapters_count, shared_blobs_count,
950 stubgen_blobs_count, C1_blobs_count,
951 C2_blobs_count, cpu_features_offset);
952
953 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
954 }
955 return true;
956 }
957
958 //------------------Store/Load AOT code ----------------------
959
960 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data, CodeBuffer* code_buffer) {
961 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
962
963 // we only expect stub data and a code buffer for a multi stub blob
964 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr),
965 "entry_kind %d does not match stub_data pointer %p",
966 entry_kind, stub_data);
967
968 assert((stub_data == nullptr) == (code_buffer == nullptr),
969 "stub data and code buffer must both be null or both non null");
970
971 // If this is a stub and the cache is on for either load or dump we
972 // need to insert the stub entries into the AOTCacheAddressTable so
973 // that relocs which refer to entries defined by this blob get
974 // translated correctly.
975 //
976 // Entry insertion needs to be be done up front before writing the
977 // blob because some blobs rely on internal daisy-chain references
978 // from one entry to another.
979 //
980 // Entry insertion also needs to be done even if the cache is open
981 // for use but not for dump. This may be needed when an archived
982 // blob omits some entries -- either because of a config change or a
983 // load failure -- with the result that the entries end up being
984 // generated. These generated entry addresses may be needed to
985 // resolve references from subsequently loaded blobs (for either
986 // stubs or nmethods).
987
988 if (is_on() && AOTCodeEntry::is_blob(entry_kind)) {
989 publish_stub_addresses(blob, (BlobId)id, stub_data);
990 }
991
992 AOTCodeCache* cache = open_for_dump();
993 if (cache == nullptr) {
994 return false;
995 }
996 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
997 return false;
998 }
999 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1000 return false;
1001 }
1002 // we do not currently store C2 stubs because we are seeing weird
1003 // memory errors when loading them -- see JDK-8357593
1004 if (entry_kind == AOTCodeEntry::C2Blob) {
1005 return false;
1006 }
1007 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1008
1009 #ifdef ASSERT
1010 LogStreamHandle(Trace, aot, codecache, stubs) log;
1011 if (log.is_enabled()) {
1012 FlagSetting fs(PrintRelocations, true);
1013 blob.print_on(&log);
1014 }
1015 #endif
1016 // we need to take a lock to prevent race between compiler threads generating AOT code
1017 // and the main thread generating adapter
1018 MutexLocker ml(Compile_lock);
1019 if (!is_on()) {
1020 return false; // AOT code cache was already dumped and closed.
1021 }
1022 if (!cache->align_write()) {
1023 return false;
1024 }
1025 uint entry_position = cache->_write_position;
1026
1027 uint blob_offset = cache->_write_position - entry_position;
1028 // Code blob's size is aligned to oopSize
1029 address archive_buffer = cache->reserve_bytes(blob.size());
1030 if (archive_buffer == nullptr) {
1031 return false;
1032 }
1033 CodeBlob::archive_blob(&blob, archive_buffer);
1034
1035 // For a relocatable code blob its relocations are linked from the
1036 // blob. However, for a non-relocatable (stubgen) blob we only have
1037 // transient relocations attached to the code buffer that are added
1038 // in order to support AOT-load time patching. in either case, we
1039 // need to explicitly save these relocs when storing the blob to the
1040 // archive so we can then reload them and reattach them to either
1041 // the blob or to a code buffer when we reload the blob into a
1042 // production JVM.
1043 //
1044 // Either way we are then in a position to iterate over the relocs
1045 // and AOT patch the ones that refer to code that may move between
1046 // assembly and production time. We also need to save and restore
1047 // AOT address table indexes for the target addresses of affected
1048 // relocs. That happens below.
1049
1050 int reloc_count;
1051 address reloc_data;
1052 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1053 CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS);
1054 reloc_count = (cs->has_locs() ? cs->locs_count() : 0);
1055 reloc_data = (reloc_count > 0 ? (address)cs->locs_start() : nullptr);
1056 } else {
1057 reloc_count = blob.relocation_size() / sizeof(relocInfo);
1058 reloc_data = (address)blob.relocation_begin();
1059 }
1060 uint n = cache->write_bytes(&reloc_count, sizeof(int));
1061 if (n != sizeof(int)) {
1062 return false;
1063 }
1064 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1065 // align to heap word size before writing the relocs so we can
1066 // install them into a code buffer when they get restored
1067 if (!cache->align_write()) {
1068 return false;
1069 }
1070 }
1071 uint reloc_data_size = (uint)(reloc_count * sizeof(relocInfo));
1072 n = cache->write_bytes(reloc_data, reloc_data_size);
1073 if (n != reloc_data_size) {
1074 return false;
1075 }
1076
1077 bool has_oop_maps = false;
1078 if (blob.oop_maps() != nullptr) {
1079 if (!cache->write_oop_map_set(blob)) {
1080 return false;
1081 }
1082 has_oop_maps = true;
1083 }
1084
1085 // In the case of a multi-stub blob we need to write start, end,
1086 // secondary entries and extras. For any other blob entry addresses
1087 // beyond the blob start will be stored in the blob as offsets.
1088 if (stub_data != nullptr) {
1089 if (!cache->write_stub_data(blob, stub_data)) {
1090 return false;
1091 }
1092 }
1093
1094 // now we have added all the other data we can write details of any
1095 // extra the AOT relocations
1096
1097 bool write_ok = true;
1098 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1099 if (reloc_count > 0) {
1100 CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS);
1101 RelocIterator iter(cs);
1102 write_ok = cache->write_relocations(blob, iter);
1103 }
1104 } else {
1105 RelocIterator iter(&blob);
1106 write_ok = cache->write_relocations(blob, iter);
1107 }
1108
1109 if (!write_ok) {
1110 if (!cache->failed()) {
1111 // We may miss an address in AOT table - skip this code blob.
1112 cache->set_write_position(entry_position);
1113 }
1114 return false;
1115 }
1116
1117 #ifndef PRODUCT
1118 // Write asm remarks after relocation info
1119 if (!cache->write_asm_remarks(blob)) {
1120 return false;
1121 }
1122 if (!cache->write_dbg_strings(blob)) {
1123 return false;
1124 }
1125 #endif /* PRODUCT */
1126
1127 // Write name after code comments
1128 uint name_offset = cache->_write_position - entry_position;
1129 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1130 n = cache->write_bytes(name, name_size);
1131 if (n != name_size) {
1132 return false;
1133 }
1134
1135 uint entry_size = cache->_write_position - entry_position;
1136
1137 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1138 entry_position, entry_size, name_offset, name_size,
1139 blob_offset, has_oop_maps, blob.content_begin());
1140 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1141 return true;
1142 }
1143
1144 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1145 assert(!AOTCodeEntry::is_blob(entry_kind),
1146 "wrong entry kind for numeric id %d", id);
1147 return store_code_blob(blob, entry_kind, (uint)id, name, nullptr, nullptr);
1148 }
1149
1150 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1151 assert(AOTCodeEntry::is_single_stub_blob(entry_kind),
1152 "wrong entry kind for blob id %s", StubInfo::name(id));
1153 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), nullptr, nullptr);
1154 }
1155
1156 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data, CodeBuffer* code_buffer) {
1157 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind),
1158 "wrong entry kind for multi stub blob id %s", StubInfo::name(id));
1159 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), stub_data, code_buffer);
1160 }
1161
1162 bool AOTCodeCache::write_stub_data(CodeBlob &blob, AOTStubData *stub_data) {
1163 if (!align_write_int()) {
1164 return false;
1165 }
1166 BlobId blob_id = stub_data->blob_id();
1167 StubId stub_id = StubInfo::stub_base(blob_id);
1168 address blob_base = blob.code_begin();
1169 int stub_cnt = StubInfo::stub_count(blob_id);
1170 int n;
1171
1172 LogStreamHandle(Trace, aot, codecache, stubs) log;
1173
1174 if (log.is_enabled()) {
1175 log.print_cr("======== Stub data starts at offset %d", _write_position);
1176 }
1177
1178 for (int i = 0; i < stub_cnt; i++, stub_id = StubInfo::next_in_blob(blob_id, stub_id)) {
1179 // for each stub we find in the ranges list we write an int
1180 // sequence <stubid,start,end,N,offset1, ... offsetN> where
1181 //
1182 // - start_pos is the stub start address encoded as a code section offset
1183 //
1184 // - end is the stub end address encoded as an offset from start
1185 //
1186 // - N counts the number of stub-local entries/extras
1187 //
1188 // - offseti is a stub-local entry/extra address encoded as len for
1189 // a null address otherwise as an offset in range [1,len-1]
1190
1191 StubAddrRange& range = stub_data->get_range(i);
1192 GrowableArray<address>& addresses = stub_data->address_array();
1193 int base = range.start_index();
1194 if (base >= 0) {
1195 n = write_bytes(&stub_id, sizeof(StubId));
1196 if (n != sizeof(StubId)) {
1197 return false;
1198 }
1199 address start = addresses.at(base);
1200 assert (blob_base <= start, "sanity");
1201 uint offset = (uint)(start - blob_base);
1202 n = write_bytes(&offset, sizeof(uint));
1203 if (n != sizeof(int)) {
1204 return false;
1205 }
1206 address end = addresses.at(base + 1);
1207 assert (start < end, "sanity");
1208 offset = (uint)(end - start);
1209 n = write_bytes(&offset, sizeof(uint));
1210 if (n != sizeof(int)) {
1211 return false;
1212 }
1213 // write number of secondary and extra entries
1214 int count = range.count() - 2;
1215 n = write_bytes(&count, sizeof(int));
1216 if (n != sizeof(int)) {
1217 return false;
1218 }
1219 for (int j = 0; j < count; j++) {
1220 address next = addresses.at(base + 2 + j);
1221 if (next != nullptr) {
1222 // n.b. This maps next == end to the stub length which
1223 // means we will reconstitute the address as nullptr. That
1224 // happens when we have a handler range covers the end of
1225 // a stub and needs to be handled specially by the client
1226 // that restores the extras.
1227 assert(start <= next && next <= end, "sanity");
1228 offset = (uint)(next - start);
1229 } else {
1230 // this can happen when a stub is not generated or an
1231 // extra is the common handler target
1232 offset = NULL_ADDRESS_MARKER;
1233 }
1234 n = write_bytes(&offset, sizeof(uint));
1235 if (n != sizeof(int)) {
1236 return false;
1237 }
1238 }
1239 if (log.is_enabled()) {
1240 log.print_cr("======== wrote stub %s and %d addresses up to offset %d",
1241 StubInfo::name(stub_id), range.count(), _write_position);
1242 }
1243 }
1244 }
1245 // we should have exhausted all stub ids in the blob
1246 assert(stub_id == StubId::NO_STUBID, "sanity");
1247 // write NO_STUBID as an end marker
1248 n = write_bytes(&stub_id, sizeof(StubId));
1249 if (n != sizeof(StubId)) {
1250 return false;
1251 }
1252
1253 if (log.is_enabled()) {
1254 log.print_cr("======== Stub data ends at offset %d", _write_position);
1255 }
1256
1257 return true;
1258 }
1259
1260 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data) {
1261 AOTCodeCache* cache = open_for_use();
1262 if (cache == nullptr) {
1263 return nullptr;
1264 }
1265 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1266
1267 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr),
1268 "entry_kind %d does not match stub_data pointer %p",
1269 entry_kind, stub_data);
1270
1271 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1272 return nullptr;
1273 }
1274 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1275 return nullptr;
1276 }
1277 // we do not currently load C2 stubs because we are seeing weird
1278 // memory errors when loading them -- see JDK-8357593
1279 if (entry_kind == AOTCodeEntry::C2Blob) {
1280 return nullptr;
1281 }
1282 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1283
1284 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1285 if (entry == nullptr) {
1286 return nullptr;
1287 }
1288 AOTCodeReader reader(cache, entry);
1289 CodeBlob* blob = reader.compile_code_blob(name, entry_kind, id, stub_data);
1290
1291 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1292 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1293 return blob;
1294 }
1295
1296 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1297 assert(!AOTCodeEntry::is_blob(entry_kind),
1298 "wrong entry kind for numeric id %d", id);
1299 return load_code_blob(entry_kind, (uint)id, name, nullptr);
1300 }
1301
1302 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1303 assert(AOTCodeEntry::is_single_stub_blob(entry_kind),
1304 "wrong entry kind for blob id %s", StubInfo::name(id));
1305 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), nullptr);
1306 }
1307
1308 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data) {
1309 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind),
1310 "wrong entry kind for blob id %s", StubInfo::name(id));
1311 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), stub_data);
1312 }
1313
1314 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, AOTCodeEntry::Kind entry_kind, int id, AOTStubData* stub_data) {
1315 uint entry_position = _entry->offset();
1316
1317 // Read name
1318 uint name_offset = entry_position + _entry->name_offset();
1319 uint name_size = _entry->name_size(); // Includes '/0'
1320 const char* stored_name = addr(name_offset);
1321
1322 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1323 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1324 stored_name, name);
1325 set_lookup_failed(); // Skip this blob
1326 return nullptr;
1327 }
1328 _name = stored_name;
1329
1330 // Read archived code blob and related info
1331 uint offset = entry_position + _entry->blob_offset();
1332 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1333 offset += archived_blob->size();
1334
1335 _reloc_count = *(int*)addr(offset);
1336 offset += sizeof(int);
1337 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1338 // position of relocs will have been aligned to heap word size so
1339 // we can install them into a code buffer
1340 offset = align_up(offset, DATA_ALIGNMENT);
1341 }
1342 _reloc_data = (address)addr(offset);
1343 offset += _reloc_count * sizeof(relocInfo);
1344 set_read_position(offset);
1345
1346 if (_entry->has_oop_maps()) {
1347 _oop_maps = read_oop_map_set();
1348 }
1349
1350 // record current context for use by that callback
1351 _stub_data = stub_data;
1352 _entry_kind = entry_kind;
1353 _id = id;
1354
1355 // CodeBlob::restore() calls AOTCodeReader::restore()
1356
1357 CodeBlob* code_blob = CodeBlob::create(archived_blob, this);
1358
1359 if (code_blob == nullptr) { // no space left in CodeCache
1360 return nullptr;
1361 }
1362
1363 #ifdef ASSERT
1364 LogStreamHandle(Trace, aot, codecache, stubs) log;
1365 if (log.is_enabled()) {
1366 FlagSetting fs(PrintRelocations, true);
1367 code_blob->print_on(&log);
1368 }
1369 #endif
1370 return code_blob;
1371 }
1372
1373 void AOTCodeReader::restore(CodeBlob* code_blob) {
1374 precond(AOTCodeCache::is_on_for_use());
1375 precond(_name != nullptr);
1376 precond(_reloc_data != nullptr);
1377
1378 code_blob->set_name(_name);
1379 // Saved relocations need restoring except for the case of a
1380 // multi-stub blob which has no runtime relocations. However, we may
1381 // still have saved some (re-)load time relocs that were attached to
1382 // the generator's code buffer. We don't attach them to the blob but
1383 // they get processed below by fix_relocations.
1384 if (!AOTCodeEntry::is_multi_stub_blob(_entry_kind)) {
1385 code_blob->restore_mutable_data(_reloc_data);
1386 }
1387 code_blob->set_oop_maps(_oop_maps);
1388
1389 // if this is a multi stub blob load its entries
1390 if (AOTCodeEntry::is_blob(_entry_kind)) {
1391 BlobId blob_id = static_cast<BlobId>(_id);
1392 if (StubInfo::is_stubgen(blob_id)) {
1393 assert(_stub_data != nullptr, "sanity");
1394 read_stub_data(code_blob, _stub_data);
1395 }
1396 // publish entries found either in stub_data or as offsets in blob
1397 AOTCodeCache::publish_stub_addresses(*code_blob, blob_id, _stub_data);
1398 }
1399
1400 // Now that all the entry points are in the address table we can
1401 // read all the extra reloc info and fix up any addresses that need
1402 // patching to adjust for a new location in a new JVM. We can be
1403 // sure to correctly update all runtime references, including
1404 // cross-linked stubs that are internally daisy-chained. If
1405 // relocation fails and we have to re-generate any of the stubs then
1406 // the entry points for newly generated stubs will get updated,
1407 // ensuring that any other stubs or nmethods we need to relocate
1408 // will use the correct address.
1409
1410 // if we have a relocatable code blob then the relocs are already
1411 // attached to the blob and we can iterate over it to find the ones
1412 // we need to patch. With a non-relocatable code blob we need to
1413 // wrap it with a CodeBuffer and then reattach the relocs to the
1414 // code buffer.
1415
1416 if (AOTCodeEntry::is_multi_stub_blob(_entry_kind)) {
1417 // the blob doesn't have any proper runtime relocs but we can
1418 // reinstate the AOT-load time relocs we saved from the code
1419 // buffer that generated this blob in a new code buffer and use
1420 // the latter to iterate over them
1421 if (_reloc_count > 0) {
1422 CodeBuffer code_buffer(code_blob);
1423 relocInfo* locs = (relocInfo*)_reloc_data;
1424 code_buffer.insts()->initialize_shared_locs(locs, _reloc_count);
1425 code_buffer.insts()->set_locs_end(locs + _reloc_count);
1426 CodeSection *cs = code_buffer.code_section(CodeBuffer::SECT_INSTS);
1427 RelocIterator reloc_iter(cs);
1428 fix_relocations(code_blob, reloc_iter);
1429 }
1430 } else {
1431 // the AOT-load time relocs will be in the blob's restored relocs
1432 RelocIterator reloc_iter(code_blob);
1433 fix_relocations(code_blob, reloc_iter);
1434 }
1435
1436 #ifndef PRODUCT
1437 code_blob->asm_remarks().init();
1438 read_asm_remarks(code_blob->asm_remarks());
1439 code_blob->dbg_strings().init();
1440 read_dbg_strings(code_blob->dbg_strings());
1441 #endif // PRODUCT
1442 }
1443
1444 void AOTCodeReader::read_stub_data(CodeBlob* code_blob, AOTStubData* stub_data) {
1445 GrowableArray<address>& addresses = stub_data->address_array();
1446 // Read the list of stub ids and associated start, end, secondary
1447 // and extra addresses and install them in the stub data.
1448 //
1449 // Also insert all start and secondary addresses into the AOTCache
1450 // address table so we correctly relocate this blob and any followng
1451 // blobs/nmethods.
1452 //
1453 // n.b. if an error occurs and we need to regenerate any of these
1454 // stubs the address table will be updated as a side-effect of
1455 // regeneration.
1456
1457 address blob_base = code_blob->code_begin();
1458 uint blob_size = (uint)(code_blob->code_end() - blob_base);
1459 uint offset = align_read_int();
1460 LogStreamHandle(Trace, aot, codecache, stubs) log;
1461 if (log.is_enabled()) {
1462 log.print_cr("======== Stub data starts at offset %d", offset);
1463 }
1464 // read stub and entries until we see NO_STUBID
1465 StubId stub_id = *(StubId*)addr(offset); offset += sizeof(StubId);
1466 // we ought to have at least one saved stub in the blob
1467 assert(stub_id != StubId::NO_STUBID, "blob %s contains no stubs!", StubInfo::name(stub_data->blob_id()));
1468 while (stub_id != StubId::NO_STUBID) {
1469 assert(StubInfo::blob(stub_id) == stub_data->blob_id(), "sanity");
1470 int idx = StubInfo::stubgen_offset_in_blob(stub_data->blob_id(), stub_id);
1471 StubAddrRange& range = stub_data->get_range(idx);
1472 // we should only see a stub once
1473 assert(range.start_index() < 0, "repeated entry for stub %s", StubInfo::name(stub_id));
1474 int address_base = addresses.length();
1475 // start is an offset from the blob base
1476 uint start = *(uint*)addr(offset); offset += sizeof(uint);
1477 assert(start < blob_size, "stub %s start offset %d exceeds buffer length %d", StubInfo::name(stub_id), start, blob_size);
1478 address stub_start = blob_base + start;
1479 addresses.append(stub_start);
1480 // end is an offset from the stub start
1481 uint end = *(uint*)addr(offset); offset += sizeof(uint);
1482 assert(start + end <= blob_size, "stub %s end offset %d exceeds remaining buffer length %d", StubInfo::name(stub_id), end, blob_size - start);
1483 addresses.append(stub_start + end);
1484 // read count of secondary entries plus extras
1485 int entries_count = *(int*)addr(offset); offset += sizeof(int);
1486 assert(entries_count >= (StubInfo::entry_count(stub_id) - 1), "not enough entries for %s", StubInfo::name(stub_id));
1487 for (int i = 0; i < entries_count; i++) {
1488 // entry offset is an offset from the stub start less than or
1489 // equal to end
1490 uint entry = *(uint*)addr(offset); offset += sizeof(uint);
1491 if (entry <= end) {
1492 // entry addresses may not address end but extras can
1493 assert(entry < end || i >= StubInfo::entry_count(stub_id),
1494 "entry offset 0x%x exceeds stub length 0x%x for stub %s",
1495 entry, end, StubInfo::name(stub_id));
1496 addresses.append(stub_start + entry);
1497 } else {
1498 // special case: entry encodes a nullptr
1499 assert(entry == AOTCodeCache::NULL_ADDRESS_MARKER, "stub %s entry offset %d lies beyond stub end %d and does not equal NULL_ADDRESS_MARKER", StubInfo::name(stub_id), entry, end);
1500 addresses.append(nullptr);
1501 }
1502 }
1503 if (log.is_enabled()) {
1504 log.print_cr("======== read stub %s and %d addresses up to offset %d",
1505 StubInfo::name(stub_id), 2 + entries_count, offset);
1506 }
1507 range.init_entry(address_base, 2 + entries_count);
1508 // move on to next stub or NO_STUBID
1509 stub_id = *(StubId*)addr(offset); offset += sizeof(StubId);
1510 }
1511 if (log.is_enabled()) {
1512 log.print_cr("======== Stub data ends at offset %d", offset);
1513 }
1514
1515 set_read_position(offset);
1516 }
1517
1518 void AOTCodeCache::publish_external_addresses(GrowableArray<address>& addresses) {
1519 DEBUG_ONLY( _passed_init2 = true; )
1520 if (opened_cache == nullptr) {
1521 return;
1522 }
1523
1524 cache()->_table->add_external_addresses(addresses);
1525 }
1526
1527 void AOTCodeCache::publish_stub_addresses(CodeBlob &code_blob, BlobId blob_id, AOTStubData *stub_data) {
1528 if (stub_data != nullptr) {
1529 // register all entries in stub
1530 assert(StubInfo::stub_count(blob_id) > 1,
1531 "multiple stub data provided for single stub blob %s",
1532 StubInfo::name(blob_id));
1533 assert(blob_id == stub_data->blob_id(),
1534 "blob id %s does not match id in stub data %s",
1535 StubInfo::name(blob_id),
1536 StubInfo::name(stub_data->blob_id()));
1537 // iterate over all stubs in the blob
1538 StubId stub_id = StubInfo::stub_base(blob_id);
1539 int stub_cnt = StubInfo::stub_count(blob_id);
1540 GrowableArray<address>& addresses = stub_data->address_array();
1541 for (int i = 0; i < stub_cnt; i++) {
1542 assert(stub_id != StubId::NO_STUBID, "sanity");
1543 StubAddrRange& range = stub_data->get_range(i);
1544 int base = range.start_index();
1545 if (base >= 0) {
1546 cache()->add_stub_entries(stub_id, addresses.at(base), &addresses, base + 2);
1547 }
1548 stub_id = StubInfo::next_in_blob(blob_id, stub_id);
1549 }
1550 // we should have exhausted all stub ids in the blob
1551 assert(stub_id == StubId::NO_STUBID, "sanity");
1552 } else {
1553 // register entry or entries for a single stub blob
1554 StubId stub_id = StubInfo::stub_base(blob_id);
1555 assert(StubInfo::stub_count(blob_id) == 1,
1556 "multiple stub blob %s provided without stub data",
1557 StubInfo::name(blob_id));
1558 address start = code_blob.code_begin();
1559 if (StubInfo::entry_count(stub_id) == 1) {
1560 assert(!code_blob.is_deoptimization_stub(), "expecting multiple entries for stub %s", StubInfo::name(stub_id));
1561 // register the blob base address as the only entry
1562 cache()->add_stub_entries(stub_id, start);
1563 } else {
1564 assert(code_blob.is_deoptimization_stub(), "only expecting one entry for stub %s", StubInfo::name(stub_id));
1565 DeoptimizationBlob *deopt_blob = code_blob.as_deoptimization_blob();
1566 assert(deopt_blob->unpack() == start, "unexpected offset 0x%x for deopt stub entry", (int)(deopt_blob->unpack() - start));
1567 GrowableArray<address> addresses;
1568 addresses.append(deopt_blob->unpack_with_exception());
1569 addresses.append(deopt_blob->unpack_with_reexecution());
1570 addresses.append(deopt_blob->unpack_with_exception_in_tls());
1571 #if INCLUDE_JVMCI
1572 addresses.append(deopt_blob->uncommon_trap());
1573 addresses.append(deopt_blob->implicit_exception_uncommon_trap());
1574 #endif // INCLUDE_JVMCI
1575 cache()->add_stub_entries(stub_id, start, &addresses, 0);
1576 }
1577 }
1578 }
1579
1580 // ------------ process code and data --------------
1581
1582 // Can't use -1. It is valid value for jump to iteself destination
1583 // used by static call stub: see NativeJump::jump_destination().
1584 #define BAD_ADDRESS_ID -2
1585
1586 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, RelocIterator& iter) {
1587 if (!align_write_int()) {
1588 return false;
1589 }
1590 GrowableArray<uint> reloc_data;
1591 LogStreamHandle(Trace, aot, codecache, reloc) log;
1592 while (iter.next()) {
1593 int idx = reloc_data.append(0); // default value
1594 switch (iter.type()) {
1595 case relocInfo::none:
1596 break;
1597 case relocInfo::runtime_call_type: {
1598 // Record offset of runtime destination
1599 CallRelocation* r = (CallRelocation*)iter.reloc();
1600 address dest = r->destination();
1601 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1602 dest = (address)-1; // do nothing in this case when loading this relocation
1603 }
1604 int id = _table->id_for_address(dest, iter, &code_blob);
1605 if (id == BAD_ADDRESS_ID) {
1606 return false;
1607 }
1608 reloc_data.at_put(idx, id);
1609 break;
1610 }
1611 case relocInfo::runtime_call_w_cp_type:
1612 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1613 return false;
1614 case relocInfo::external_word_type: {
1615 // Record offset of runtime target
1616 address target = ((external_word_Relocation*)iter.reloc())->target();
1617 int id = _table->id_for_address(target, iter, &code_blob);
1618 if (id == BAD_ADDRESS_ID) {
1619 return false;
1620 }
1621 reloc_data.at_put(idx, id);
1622 break;
1623 }
1624 case relocInfo::internal_word_type:
1625 break;
1626 case relocInfo::section_word_type:
1627 break;
1628 case relocInfo::post_call_nop_type:
1629 break;
1630 default:
1631 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1632 return false;
1633 break;
1634 }
1635 if (log.is_enabled()) {
1636 iter.print_current_on(&log);
1637 }
1638 }
1639
1640 // Write additional relocation data: uint per relocation
1641 // Write the count first
1642 int count = reloc_data.length();
1643 write_bytes(&count, sizeof(int));
1644 if (log.is_enabled()) {
1645 log.print_cr("======== extra relocations count=%d", count);
1646 log.print( " {");
1647 }
1648 bool first = true;
1649 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1650 iter != reloc_data.end(); ++iter) {
1651 uint value = *iter;
1652 int n = write_bytes(&value, sizeof(uint));
1653 if (n != sizeof(uint)) {
1654 return false;
1655 }
1656 if (log.is_enabled()) {
1657 if (first) {
1658 first = false;
1659 log.print("%d", value);
1660 } else {
1661 log.print(", %d", value);
1662 }
1663 }
1664 }
1665 if (log.is_enabled()) {
1666 log.print_cr("}");
1667 }
1668 return true;
1669 }
1670
1671 void AOTCodeReader::fix_relocations(CodeBlob *code_blob, RelocIterator& iter) {
1672 uint offset = align_read_int();
1673 int reloc_count = *(int*)addr(offset);
1674 offset += sizeof(int);
1675 uint* reloc_data = (uint*)addr(offset);
1676 offset += (reloc_count * sizeof(uint));
1677 set_read_position(offset);
1678
1679 LogStreamHandle(Trace, aot, codecache, reloc) log;
1680 if (log.is_enabled()) {
1681 log.print_cr("======== extra relocations count=%d", reloc_count);
1682 log.print(" {");
1683 for(int i = 0; i < reloc_count; i++) {
1684 if (i == 0) {
1685 log.print("%d", reloc_data[i]);
1686 } else {
1687 log.print(", %d", reloc_data[i]);
1688 }
1689 }
1690 log.print_cr("}");
1691 }
1692
1693 int j = 0;
1694 while (iter.next()) {
1695 switch (iter.type()) {
1696 case relocInfo::none:
1697 break;
1698 case relocInfo::runtime_call_type: {
1699 address dest = _cache->address_for_id(reloc_data[j]);
1700 if (dest != (address)-1) {
1701 ((CallRelocation*)iter.reloc())->set_destination(dest);
1702 }
1703 break;
1704 }
1705 case relocInfo::runtime_call_w_cp_type:
1706 // this relocation should not be in cache (see write_relocations)
1707 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1708 break;
1709 case relocInfo::external_word_type: {
1710 address target = _cache->address_for_id(reloc_data[j]);
1711 // Add external address to global table
1712 int index = ExternalsRecorder::find_index(target);
1713 // Update index in relocation
1714 Relocation::add_jint(iter.data(), index);
1715 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1716 assert(reloc->target() == target, "sanity");
1717 reloc->set_value(target); // Patch address in the code
1718 break;
1719 }
1720 case relocInfo::internal_word_type: {
1721 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1722 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1723 break;
1724 }
1725 case relocInfo::section_word_type: {
1726 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1727 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1728 break;
1729 }
1730 case relocInfo::post_call_nop_type:
1731 break;
1732 default:
1733 assert(false,"relocation %d unimplemented", (int)iter.type());
1734 break;
1735 }
1736 if (log.is_enabled()) {
1737 iter.print_current_on(&log);
1738 }
1739 j++;
1740 }
1741 assert(j == reloc_count, "sanity");
1742 }
1743
1744 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1745 if (!align_write_int()) {
1746 return false;
1747 }
1748 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1749 int oopmaps_size = oopmaps->nr_of_bytes();
1750 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1751 return false;
1752 }
1753 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1754 if (n != (uint)oopmaps->nr_of_bytes()) {
1755 return false;
1756 }
1757 return true;
1758 }
1759
1760 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1761 uint offset = align_read_int();
1762 int size = *(int *)addr(offset);
1763 offset += sizeof(int);
1764 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1765 offset += size;
1766 set_read_position(offset);
1767 return oopmaps;
1768 }
1769
1770 #ifndef PRODUCT
1771 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1772 if (!align_write_int()) {
1773 return false;
1774 }
1775 // Write asm remarks
1776 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1777 if (count_ptr == nullptr) {
1778 return false;
1779 }
1780 uint count = 0;
1781 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1782 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1783 uint n = write_bytes(&offset, sizeof(uint));
1784 if (n != sizeof(uint)) {
1785 return false;
1786 }
1787 const char* cstr = add_C_string(str);
1788 int id = _table->id_for_C_string((address)cstr);
1789 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1790 n = write_bytes(&id, sizeof(int));
1791 if (n != sizeof(int)) {
1792 return false;
1793 }
1794 count += 1;
1795 return true;
1796 });
1797 *count_ptr = count;
1798 return result;
1799 }
1800
1801 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1802 // Read asm remarks
1803 uint offset = align_read_int();
1804 uint count = *(uint *)addr(offset);
1805 offset += sizeof(uint);
1806 for (uint i = 0; i < count; i++) {
1807 uint remark_offset = *(uint *)addr(offset);
1808 offset += sizeof(uint);
1809 int remark_string_id = *(uint *)addr(offset);
1810 offset += sizeof(int);
1811 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1812 asm_remarks.insert(remark_offset, remark);
1813 }
1814 set_read_position(offset);
1815 }
1816
1817 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1818 if (!align_write_int()) {
1819 return false;
1820 }
1821 // Write dbg strings
1822 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1823 if (count_ptr == nullptr) {
1824 return false;
1825 }
1826 uint count = 0;
1827 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1828 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1829 const char* cstr = add_C_string(str);
1830 int id = _table->id_for_C_string((address)cstr);
1831 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1832 uint n = write_bytes(&id, sizeof(int));
1833 if (n != sizeof(int)) {
1834 return false;
1835 }
1836 count += 1;
1837 return true;
1838 });
1839 *count_ptr = count;
1840 return result;
1841 }
1842
1843 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1844 // Read dbg strings
1845 uint offset = align_read_int();
1846 uint count = *(uint *)addr(offset);
1847 offset += sizeof(uint);
1848 for (uint i = 0; i < count; i++) {
1849 int string_id = *(uint *)addr(offset);
1850 offset += sizeof(int);
1851 const char* str = (const char*)_cache->address_for_C_string(string_id);
1852 dbg_strings.insert(str);
1853 }
1854 set_read_position(offset);
1855 }
1856 #endif // PRODUCT
1857
1858 //======================= AOTCodeAddressTable ===============
1859
1860 // address table ids for generated routine entry adresses, external
1861 // addresses and C string addresses are partitioned into positive
1862 // integer ranges defined by the following positive base and max
1863 // values i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1864 // [_stubs_base, _stubs_base + _stubs_max -1], [_c_str_base,
1865 // _c_str_base + _c_str_max -1],
1866
1867 #define _extrs_max 380
1868 #define _stubs_max static_cast<int>(EntryId::NUM_ENTRYIDS)
1869
1870 #define _extrs_base 0
1871 #define _stubs_base (_extrs_base + _extrs_max)
1872 #define _all_max (_stubs_base + _stubs_max)
1873
1874 // setter for external addresses and string addresses inserts new
1875 // addresses in the order they are encountered them which must remain
1876 // the same across an assembly run and subsequent production run
1877
1878 #define ADD_EXTERNAL_ADDRESS(addr) \
1879 { \
1880 hash_address((address) addr, _extrs_base + _extrs_length); \
1881 _extrs_addr[_extrs_length++] = (address) (addr); \
1882 assert(_extrs_length <= _extrs_max, "increase size"); \
1883 }
1884
1885 // insert into to the address hash table the index of an external
1886 // address or a stub address in the list of external or stub
1887 // addresses, respectively, keyed by the relevant address
1888
1889 void AOTCodeAddressTable::hash_address(address addr, int idx) {
1890 // only do this if we have a non-null address to record and the
1891 // cache is open for dumping
1892 if (addr == nullptr) {
1893 return;
1894 }
1895 // check opened_cache because this can be called before the cache is
1896 // properly initialized and only continue when dumping is enabled
1897 if (opened_cache != nullptr && opened_cache->for_dump()) {
1898 if (_hash_table == nullptr) {
1899 _hash_table = new (mtCode) AOTCodeAddressHashTable();
1900 }
1901 assert(_hash_table->get(addr) == nullptr, "repeated insert of address " INTPTR_FORMAT, p2i(addr));
1902 _hash_table->put(addr, idx);
1903 log_trace(aot, codecache)("Address " INTPTR_FORMAT " inserted into AOT Code Cache address hash table with index '%d'",
1904 p2i(addr), idx);
1905 }
1906 }
1907
1908 static bool initializing_extrs = false;
1909
1910 void AOTCodeAddressTable::init_extrs() {
1911 if (_extrs_complete || initializing_extrs) return; // Done already
1912
1913 initializing_extrs = true;
1914 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1915
1916 _extrs_length = 0;
1917
1918 {
1919 // Required by initial stubs
1920 ADD_EXTERNAL_ADDRESS(SharedRuntime::exception_handler_for_return_address); // used by forward_exception
1921 ADD_EXTERNAL_ADDRESS(CompressedOops::base_addr()); // used by call_stub
1922 ADD_EXTERNAL_ADDRESS(Thread::current); // used by call_stub
1923 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_StackOverflowError);
1924 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError);
1925 }
1926
1927 // Record addresses of VM runtime methods
1928 ADD_EXTERNAL_ADDRESS(SharedRuntime::fixup_callers_callsite);
1929 ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method);
1930 ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_abstract);
1931 ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_ic_miss);
1932 #if defined(AARCH64) && !defined(ZERO)
1933 ADD_EXTERNAL_ADDRESS(JavaThread::aarch64_get_thread_helper);
1934 ADD_EXTERNAL_ADDRESS(BarrierSetAssembler::patching_epoch_addr());
1935 #endif
1936
1937 #ifndef PRODUCT
1938 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jbyte_array_copy_ctr); // used by arraycopy stub on arm32 and x86_64
1939 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jshort_array_copy_ctr); // used by arraycopy stub
1940 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jint_array_copy_ctr); // used by arraycopy stub
1941 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jlong_array_copy_ctr); // used by arraycopy stub
1942 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_oop_array_copy_ctr); // used by arraycopy stub
1943 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_checkcast_array_copy_ctr); // used by arraycopy stub
1944 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_array_copy_ctr); // used by arraycopy stub
1945 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_generic_array_copy_ctr); // used by arraycopy stub
1946 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_set_memory_ctr); // used by arraycopy stub
1947 #endif /* PRODUCT */
1948
1949 ADD_EXTERNAL_ADDRESS(SharedRuntime::enable_stack_reserved_zone);
1950
1951 #if defined(AMD64) && !defined(ZERO)
1952 ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_multiply);
1953 ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_square);
1954 #endif // defined(AMD64) && !defined(ZERO)
1955
1956 ADD_EXTERNAL_ADDRESS(SharedRuntime::d2f);
1957 ADD_EXTERNAL_ADDRESS(SharedRuntime::d2i);
1958 ADD_EXTERNAL_ADDRESS(SharedRuntime::d2l);
1959 ADD_EXTERNAL_ADDRESS(SharedRuntime::dcos);
1960 ADD_EXTERNAL_ADDRESS(SharedRuntime::dexp);
1961 ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog);
1962 ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog10);
1963 ADD_EXTERNAL_ADDRESS(SharedRuntime::dpow);
1964 #ifndef ZERO
1965 ADD_EXTERNAL_ADDRESS(SharedRuntime::drem);
1966 #endif
1967 ADD_EXTERNAL_ADDRESS(SharedRuntime::dsin);
1968 ADD_EXTERNAL_ADDRESS(SharedRuntime::dtan);
1969 ADD_EXTERNAL_ADDRESS(SharedRuntime::f2i);
1970 ADD_EXTERNAL_ADDRESS(SharedRuntime::f2l);
1971 #ifndef ZERO
1972 ADD_EXTERNAL_ADDRESS(SharedRuntime::frem);
1973 #endif
1974 ADD_EXTERNAL_ADDRESS(SharedRuntime::l2d);
1975 ADD_EXTERNAL_ADDRESS(SharedRuntime::l2f);
1976 ADD_EXTERNAL_ADDRESS(SharedRuntime::ldiv);
1977 ADD_EXTERNAL_ADDRESS(SharedRuntime::lmul);
1978 ADD_EXTERNAL_ADDRESS(SharedRuntime::lrem);
1979
1980 #if INCLUDE_JVMTI
1981 ADD_EXTERNAL_ADDRESS(&JvmtiExport::_should_notify_object_alloc);
1982 #endif /* INCLUDE_JVMTI */
1983
1984 ADD_EXTERNAL_ADDRESS(ThreadIdentifier::unsafe_offset());
1985 // already added
1986 // ADD_EXTERNAL_ADDRESS(Thread::current);
1987
1988 ADD_EXTERNAL_ADDRESS(os::javaTimeMillis);
1989 ADD_EXTERNAL_ADDRESS(os::javaTimeNanos);
1990 #ifndef PRODUCT
1991 ADD_EXTERNAL_ADDRESS(os::breakpoint);
1992 #endif
1993
1994 ADD_EXTERNAL_ADDRESS(StubRoutines::crc_table_addr());
1995 #ifndef PRODUCT
1996 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_partial_subtype_ctr);
1997 #endif
1998
1999 #if INCLUDE_JFR
2000 ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::write_checkpoint);
2001 ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::return_lease);
2002 #endif
2003
2004 ADD_EXTERNAL_ADDRESS(UpcallLinker::handle_uncaught_exception); // used by upcall_stub_exception_handler
2005
2006 {
2007 // Required by Shared blobs
2008 ADD_EXTERNAL_ADDRESS(Deoptimization::fetch_unroll_info);
2009 ADD_EXTERNAL_ADDRESS(Deoptimization::unpack_frames);
2010 ADD_EXTERNAL_ADDRESS(SafepointSynchronize::handle_polling_page_exception);
2011 ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_opt_virtual_call_C);
2012 ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_virtual_call_C);
2013 ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_static_call_C);
2014 // already added
2015 // ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError);
2016 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_AbstractMethodError);
2017 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_IncompatibleClassChangeError);
2018 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_NullPointerException_at_call);
2019 }
2020
2021 #ifdef COMPILER1
2022 {
2023 // Required by C1 blobs
2024 ADD_EXTERNAL_ADDRESS(static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
2025 ADD_EXTERNAL_ADDRESS(SharedRuntime::register_finalizer);
2026 ADD_EXTERNAL_ADDRESS(Runtime1::is_instance_of);
2027 ADD_EXTERNAL_ADDRESS(Runtime1::exception_handler_for_pc);
2028 ADD_EXTERNAL_ADDRESS(Runtime1::check_abort_on_vm_exception);
2029 ADD_EXTERNAL_ADDRESS(Runtime1::new_instance);
2030 ADD_EXTERNAL_ADDRESS(Runtime1::counter_overflow);
2031 ADD_EXTERNAL_ADDRESS(Runtime1::new_type_array);
2032 ADD_EXTERNAL_ADDRESS(Runtime1::new_object_array);
2033 ADD_EXTERNAL_ADDRESS(Runtime1::new_multi_array);
2034 ADD_EXTERNAL_ADDRESS(Runtime1::throw_range_check_exception);
2035 ADD_EXTERNAL_ADDRESS(Runtime1::throw_index_exception);
2036 ADD_EXTERNAL_ADDRESS(Runtime1::throw_div0_exception);
2037 ADD_EXTERNAL_ADDRESS(Runtime1::throw_null_pointer_exception);
2038 ADD_EXTERNAL_ADDRESS(Runtime1::throw_array_store_exception);
2039 ADD_EXTERNAL_ADDRESS(Runtime1::throw_class_cast_exception);
2040 ADD_EXTERNAL_ADDRESS(Runtime1::throw_incompatible_class_change_error);
2041 ADD_EXTERNAL_ADDRESS(Runtime1::monitorenter);
2042 ADD_EXTERNAL_ADDRESS(Runtime1::monitorexit);
2043 ADD_EXTERNAL_ADDRESS(Runtime1::deoptimize);
2044 ADD_EXTERNAL_ADDRESS(Runtime1::access_field_patching);
2045 ADD_EXTERNAL_ADDRESS(Runtime1::move_klass_patching);
2046 ADD_EXTERNAL_ADDRESS(Runtime1::move_mirror_patching);
2047 ADD_EXTERNAL_ADDRESS(Runtime1::move_appendix_patching);
2048 ADD_EXTERNAL_ADDRESS(Runtime1::predicate_failed_trap);
2049 ADD_EXTERNAL_ADDRESS(Runtime1::unimplemented_entry);
2050 // already added
2051 // ADD_EXTERNAL_ADDRESS(Thread::current);
2052 ADD_EXTERNAL_ADDRESS(CompressedKlassPointers::base_addr());
2053 }
2054 #endif
2055
2056 #ifdef COMPILER2
2057 {
2058 // Required by C2 blobs
2059 ADD_EXTERNAL_ADDRESS(Deoptimization::uncommon_trap);
2060 ADD_EXTERNAL_ADDRESS(OptoRuntime::handle_exception_C);
2061 ADD_EXTERNAL_ADDRESS(OptoRuntime::new_instance_C);
2062 ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_C);
2063 ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_nozero_C);
2064 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray2_C);
2065 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray3_C);
2066 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray4_C);
2067 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray5_C);
2068 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarrayN_C);
2069 ADD_EXTERNAL_ADDRESS(OptoRuntime::complete_monitor_locking_C);
2070 ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notify_C);
2071 ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notifyAll_C);
2072 ADD_EXTERNAL_ADDRESS(OptoRuntime::rethrow_C);
2073 ADD_EXTERNAL_ADDRESS(OptoRuntime::slow_arraycopy_C);
2074 ADD_EXTERNAL_ADDRESS(OptoRuntime::register_finalizer_C);
2075 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_first_transition_C);
2076 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_final_transition_C);
2077 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_transition_C);
2078 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_transition_C);
2079 // already added for
2080 #if defined(AARCH64) && ! defined(PRODUCT)
2081 ADD_EXTERNAL_ADDRESS(JavaThread::verify_cross_modify_fence_failure);
2082 #endif // AARCH64 && !PRODUCT
2083 }
2084 #endif // COMPILER2
2085
2086 #if INCLUDE_G1GC
2087 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_field_pre_entry);
2088 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry); // used by arraycopy stubs
2089 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_oop_entry); // used by arraycopy stubs
2090 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_post_entry); // used by arraycopy stubs
2091 ADD_EXTERNAL_ADDRESS(BarrierSetNMethod::nmethod_stub_entry_barrier); // used by method_entry_barrier
2092
2093 #endif
2094 #if INCLUDE_SHENANDOAHGC
2095 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::write_barrier_pre);
2096 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong);
2097 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong_narrow);
2098 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak);
2099 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak_narrow);
2100 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom);
2101 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom_narrow);
2102 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_oop);
2103 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_narrow_oop);
2104 #endif
2105 #if INCLUDE_ZGC
2106 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
2107 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good_addr());
2108 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr());
2109 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
2110 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr());
2111 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr());
2112 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr());
2113 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr());
2114 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr());
2115 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr());
2116 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_array_addr());
2117
2118 ADD_EXTERNAL_ADDRESS(ZPointerVectorLoadBadMask);
2119 ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreBadMask);
2120 ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreGoodMask);
2121 #if defined(AMD64)
2122 ADD_EXTERNAL_ADDRESS(&ZPointerLoadShift);
2123 ADD_EXTERNAL_ADDRESS(&ZPointerLoadShiftTable);
2124 #endif
2125 #endif
2126 #ifndef ZERO
2127 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
2128 ADD_EXTERNAL_ADDRESS(MacroAssembler::debug64);
2129 #endif // defined(AMD64) || defined(AARCH64) || defined(RISCV64)
2130 #if defined(AMD64)
2131 ADD_EXTERNAL_ADDRESS(warning);
2132 #endif // defined(AMD64)
2133 #endif // ZERO
2134
2135 // addresses of fields in AOT runtime constants area
2136 address* p = AOTRuntimeConstants::field_addresses_list();
2137 while (*p != nullptr) {
2138 address to_add = (address)*p++;
2139 ADD_EXTERNAL_ADDRESS(to_add);
2140 }
2141
2142 log_debug(aot, codecache, init)("External addresses opened and recorded");
2143 // allocate storage for stub entries
2144 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
2145 log_debug(aot, codecache, init)("Stub addresses opened");
2146 }
2147
2148 void AOTCodeAddressTable::init_extrs2() {
2149 assert(initializing_extrs && !_extrs_complete,
2150 "invalid sequence for init_extrs2");
2151
2152 {
2153 ADD_EXTERNAL_ADDRESS(Continuation::prepare_thaw); // used by cont_thaw
2154 ADD_EXTERNAL_ADDRESS(Continuation::thaw_entry()); // used by cont_thaw
2155 ADD_EXTERNAL_ADDRESS(ContinuationEntry::thaw_call_pc_address()); // used by cont_preempt_stub
2156 }
2157 _extrs_complete = true;
2158 initializing_extrs = false;
2159 log_debug(aot, codecache, init)("External addresses recorded and closed");
2160 }
2161
2162 void AOTCodeAddressTable::add_external_addresses(GrowableArray<address>& addresses) {
2163 assert(initializing_extrs && !_extrs_complete,
2164 "invalid sequence for add_external_addresses");
2165 for (int i = 0; i < addresses.length(); i++) {
2166 ADD_EXTERNAL_ADDRESS(addresses.at(i));
2167 }
2168 log_debug(aot, codecache, init)("Recorded %d additional external addresses",
2169 addresses.length());
2170 }
2171
2172 void AOTCodeAddressTable::add_stub_entry(EntryId entry_id, address a) {
2173 assert(_extrs_complete || initializing_extrs,
2174 "recording stub entry address before external addresses complete");
2175 assert(!(StubInfo::is_shared(StubInfo::stub(entry_id)) && _shared_stubs_complete), "too late to add shared entry");
2176 assert(!(StubInfo::is_stubgen(StubInfo::stub(entry_id)) && _stubgen_stubs_complete), "too late to add stubgen entry");
2177 assert(!(StubInfo::is_c1(StubInfo::stub(entry_id)) && _c1_stubs_complete), "too late to add c1 entry");
2178 assert(!(StubInfo::is_c2(StubInfo::stub(entry_id)) && _c2_stubs_complete), "too late to add c2 entry");
2179 log_debug(aot, stubs)("Recording address 0x%p for %s entry %s", a, StubInfo::name(StubInfo::stubgroup(entry_id)), StubInfo::name(entry_id));
2180 int idx = static_cast<int>(entry_id);
2181 hash_address(a, _stubs_base + idx);
2182 _stubs_addr[idx] = a;
2183 }
2184
2185 void AOTCodeAddressTable::set_shared_stubs_complete() {
2186 assert(!_shared_stubs_complete, "repeated close for shared stubs!");
2187 _shared_stubs_complete = true;
2188 log_debug(aot, codecache, init)("Shared stubs closed");
2189 }
2190
2191 void AOTCodeAddressTable::set_c1_stubs_complete() {
2192 assert(!_c1_stubs_complete, "repeated close for c1 stubs!");
2193 _c1_stubs_complete = true;
2194 log_debug(aot, codecache, init)("C1 stubs closed");
2195 }
2196
2197 void AOTCodeAddressTable::set_c2_stubs_complete() {
2198 assert(!_c2_stubs_complete, "repeated close for c2 stubs!");
2199 _c2_stubs_complete = true;
2200 log_debug(aot, codecache, init)("C2 stubs closed");
2201 }
2202
2203 void AOTCodeAddressTable::set_stubgen_stubs_complete() {
2204 assert(!_stubgen_stubs_complete, "repeated close for stubgen stubs!");
2205 _stubgen_stubs_complete = true;
2206 log_debug(aot, codecache, init)("StubGen stubs closed");
2207 }
2208
2209 #ifdef PRODUCT
2210 #define MAX_STR_COUNT 200
2211 #else
2212 #define MAX_STR_COUNT 2000
2213 #endif
2214 #define _c_str_max MAX_STR_COUNT
2215 static const int _c_str_base = _all_max;
2216
2217 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
2218 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
2219 static int _C_strings_count = 0;
2220 static int _C_strings_s[MAX_STR_COUNT] = {0};
2221 static int _C_strings_id[MAX_STR_COUNT] = {0};
2222 static int _C_strings_used = 0;
2223
2224 void AOTCodeCache::load_strings() {
2225 uint strings_count = _load_header->strings_count();
2226 if (strings_count == 0) {
2227 return;
2228 }
2229 if (strings_count > MAX_STR_COUNT) {
2230 fatal("Invalid strings_count loaded from AOT Code Cache: %d > MAX_STR_COUNT [%d]", strings_count, MAX_STR_COUNT);
2231 return;
2232 }
2233 uint strings_offset = _load_header->strings_offset();
2234 uint* string_lengths = (uint*)addr(strings_offset);
2235 strings_offset += (strings_count * sizeof(uint));
2236 uint strings_size = _load_header->entries_offset() - strings_offset;
2237 // We have to keep cached strings longer than _cache buffer
2238 // because they are refernced from compiled code which may
2239 // still be executed on VM exit after _cache is freed.
2240 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
2241 memcpy(p, addr(strings_offset), strings_size);
2242 _C_strings_buf = p;
2243 for (uint i = 0; i < strings_count; i++) {
2244 _C_strings[i] = p;
2245 uint len = string_lengths[i];
2246 _C_strings_s[i] = i;
2247 _C_strings_id[i] = i;
2248 log_trace(aot, codecache, stringtable)("load_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(p), p);
2249 p += len;
2250 }
2251 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
2252 _C_strings_count = strings_count;
2253 _C_strings_used = strings_count;
2254 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
2255 }
2256
2257 int AOTCodeCache::store_strings() {
2258 if (_C_strings_used > 0) {
2259 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2260 uint offset = _write_position;
2261 uint length = 0;
2262 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
2263 if (lengths == nullptr) {
2264 return -1;
2265 }
2266 for (int i = 0; i < _C_strings_used; i++) {
2267 const char* str = _C_strings[_C_strings_s[i]];
2268 log_trace(aot, codecache, stringtable)("store_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(str), str);
2269 uint len = (uint)strlen(str) + 1;
2270 length += len;
2271 assert(len < 1000, "big string: %s", str);
2272 lengths[i] = len;
2273 uint n = write_bytes(str, len);
2274 if (n != len) {
2275 return -1;
2276 }
2277 }
2278 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
2279 _C_strings_used, length, offset);
2280 }
2281 return _C_strings_used;
2282 }
2283
2284 const char* AOTCodeCache::add_C_string(const char* str) {
2285 if (is_on_for_dump() && str != nullptr) {
2286 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2287 AOTCodeAddressTable* table = addr_table();
2288 if (table != nullptr) {
2289 return table->add_C_string(str);
2290 }
2291 }
2292 return str;
2293 }
2294
2295 const char* AOTCodeAddressTable::add_C_string(const char* str) {
2296 if (_extrs_complete || initializing_extrs) {
2297 // Check previous strings address
2298 for (int i = 0; i < _C_strings_count; i++) {
2299 if (_C_strings_in[i] == str) {
2300 return _C_strings[i]; // Found previous one - return our duplicate
2301 } else if (strcmp(_C_strings[i], str) == 0) {
2302 return _C_strings[i];
2303 }
2304 }
2305 // Add new one
2306 if (_C_strings_count < MAX_STR_COUNT) {
2307 // Passed in string can be freed and used space become inaccessible.
2308 // Keep original address but duplicate string for future compare.
2309 _C_strings_id[_C_strings_count] = -1; // Init
2310 _C_strings_in[_C_strings_count] = str;
2311 const char* dup = os::strdup(str);
2312 _C_strings[_C_strings_count++] = dup;
2313 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
2314 return dup;
2315 } else {
2316 assert(false, "Number of C strings >= MAX_STR_COUNT");
2317 }
2318 }
2319 return str;
2320 }
2321
2322 int AOTCodeAddressTable::id_for_C_string(address str) {
2323 if (str == nullptr) {
2324 return BAD_ADDRESS_ID;
2325 }
2326 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2327 for (int i = 0; i < _C_strings_count; i++) {
2328 if (_C_strings[i] == (const char*)str) { // found
2329 int id = _C_strings_id[i];
2330 if (id >= 0) {
2331 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
2332 return id; // Found recorded
2333 }
2334 log_trace(aot, codecache, stringtable)("id_for_C_string: _C_strings[%d ==> %d] " INTPTR_FORMAT " '%s'", i, _C_strings_used, p2i(str), str);
2335 // Not found in recorded, add new
2336 id = _C_strings_used++;
2337 _C_strings_s[id] = i;
2338 _C_strings_id[i] = id;
2339 return id;
2340 }
2341 }
2342 return BAD_ADDRESS_ID;
2343 }
2344
2345 address AOTCodeAddressTable::address_for_C_string(int idx) {
2346 assert(idx < _C_strings_count, "sanity");
2347 return (address)_C_strings[idx];
2348 }
2349
2350 static int search_address(address addr, address* table, uint length) {
2351 for (int i = 0; i < (int)length; i++) {
2352 if (table[i] == addr) {
2353 return i;
2354 }
2355 }
2356 return BAD_ADDRESS_ID;
2357 }
2358
2359 address AOTCodeAddressTable::address_for_id(int idx) {
2360 assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete");
2361 if (idx == -1) {
2362 return (address)-1;
2363 }
2364 uint id = (uint)idx;
2365 // special case for symbols based relative to os::init
2366 if (id > (_c_str_base + _c_str_max)) {
2367 return (address)os::init + idx;
2368 }
2369 if (idx < 0) {
2370 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2371 return nullptr;
2372 }
2373 // no need to compare unsigned id against 0
2374 if (/* id >= _extrs_base && */ id < _extrs_length) {
2375 return _extrs_addr[id - _extrs_base];
2376 }
2377 if (id >= _stubs_base && id < _c_str_base) {
2378 return _stubs_addr[id - _stubs_base];
2379 }
2380 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
2381 return address_for_C_string(id - _c_str_base);
2382 }
2383 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2384 return nullptr;
2385 }
2386
2387 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
2388 assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete");
2389 int id = -1;
2390 if (addr == (address)-1) { // Static call stub has jump to itself
2391 return id;
2392 }
2393 // Check card_table_base address first since it can point to any address
2394 BarrierSet* bs = BarrierSet::barrier_set();
2395 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
2396 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
2397 // fast path for stubs and external addresses
2398 if (_hash_table != nullptr) {
2399 int *result = _hash_table->get(addr);
2400 if (result != nullptr) {
2401 id = *result;
2402 log_trace(aot, codecache)("Address " INTPTR_FORMAT " retrieved from AOT Code Cache address hash table with index '%d'",
2403 p2i(addr), id);
2404 return id;
2405 }
2406 }
2407 // Seach for C string
2408 id = id_for_C_string(addr);
2409 if (id != BAD_ADDRESS_ID) {
2410 return id + _c_str_base;
2411 }
2412 if (StubRoutines::contains(addr) || CodeCache::find_blob(addr) != nullptr) {
2413 // Search for a matching stub entry
2414 id = search_address(addr, _stubs_addr, _stubs_max);
2415 if (id == BAD_ADDRESS_ID) {
2416 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
2417 if (desc == nullptr) {
2418 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
2419 }
2420 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
2421 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
2422 } else {
2423 return id + _stubs_base;
2424 }
2425 } else {
2426 // Search in runtime functions
2427 id = search_address(addr, _extrs_addr, _extrs_length);
2428 if (id == BAD_ADDRESS_ID) {
2429 ResourceMark rm;
2430 const int buflen = 1024;
2431 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
2432 int offset = 0;
2433 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
2434 if (offset > 0) {
2435 // Could be address of C string
2436 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
2437 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
2438 p2i(addr), dist, (const char*)addr);
2439 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
2440 return dist;
2441 }
2442 #ifdef ASSERT
2443 reloc.print_current_on(tty);
2444 code_blob->print_on(tty);
2445 code_blob->print_code_on(tty);
2446 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
2447 #endif
2448 } else {
2449 #ifdef ASSERT
2450 reloc.print_current_on(tty);
2451 code_blob->print_on(tty);
2452 code_blob->print_code_on(tty);
2453 os::find(addr, tty);
2454 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
2455 #endif
2456 }
2457 } else {
2458 return _extrs_base + id;
2459 }
2460 }
2461 return id;
2462 }
2463
2464 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
2465
2466 void AOTRuntimeConstants::initialize_from_runtime() {
2467 BarrierSet* bs = BarrierSet::barrier_set();
2468 address card_table_base = nullptr;
2469 uint grain_shift = 0;
2470 #if INCLUDE_G1GC
2471 if (bs->is_a(BarrierSet::G1BarrierSet)) {
2472 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
2473 } else
2474 #endif
2475 #if INCLUDE_SHENANDOAHGC
2476 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
2477 grain_shift = 0;
2478 } else
2479 #endif
2480 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
2481 CardTable::CardValue* base = ci_card_table_address_const();
2482 assert(base != nullptr, "unexpected byte_map_base");
2483 card_table_base = base;
2484 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
2485 grain_shift = ctbs->grain_shift();
2486 }
2487 _aot_runtime_constants._card_table_base = card_table_base;
2488 _aot_runtime_constants._grain_shift = grain_shift;
2489 }
2490
2491 address AOTRuntimeConstants::_field_addresses_list[] = {
2492 ((address)&_aot_runtime_constants._card_table_base),
2493 ((address)&_aot_runtime_constants._grain_shift),
2494 nullptr
2495 };
2496
2497 address AOTRuntimeConstants::card_table_base_address() {
2498 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
2499 return (address)&_aot_runtime_constants._card_table_base;
2500 }
2501
2502 // This is called after initialize() but before init2()
2503 // and _cache is not set yet.
2504 void AOTCodeCache::print_on(outputStream* st) {
2505 if (opened_cache != nullptr && opened_cache->for_use()) {
2506 st->print_cr("\nAOT Code Cache");
2507 uint count = opened_cache->_load_header->entries_count();
2508 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
2509 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
2510
2511 for (uint i = 0; i < count; i++) {
2512 // Use search_entries[] to order ouput
2513 int index = search_entries[2*i + 1];
2514 AOTCodeEntry* entry = &(load_entries[index]);
2515
2516 uint entry_position = entry->offset();
2517 uint name_offset = entry->name_offset() + entry_position;
2518 const char* saved_name = opened_cache->addr(name_offset);
2519
2520 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
2521 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
2522 }
2523 }
2524 }
2525
2526 // methods for managing entries in multi-stub blobs
2527
2528
2529 AOTStubData::AOTStubData(BlobId blob_id) :
2530 _blob_id(blob_id),
2531 _cached_blob(nullptr),
2532 _stub_cnt(0),
2533 _ranges(nullptr),
2534 _flags(0) {
2535 assert(StubInfo::is_stubgen(blob_id),
2536 "AOTStubData expects a multi-stub blob not %s",
2537 StubInfo::name(blob_id));
2538
2539 // we cannot save or restore preuniversestubs because the cache
2540 // cannot be accessed before initialising the universe
2541 if (blob_id == BlobId::stubgen_preuniverse_id) {
2542 // invalidate any attempt to use this
2543 _flags = INVALID;
2544 return;
2545 }
2546 if (AOTCodeCache::is_on()) {
2547 _flags = OPEN;
2548 // allow update of stub entry addresses
2549 if (AOTCodeCache::is_using_stub()) {
2550 // allow stub loading
2551 _flags |= USING;
2552 }
2553 if (AOTCodeCache::is_dumping_stub()) {
2554 // allow stub saving
2555 _flags |= DUMPING;
2556 }
2557 // we need to track all the blob's entries
2558 _stub_cnt = StubInfo::stub_count(_blob_id);
2559 _ranges = NEW_C_HEAP_ARRAY(StubAddrRange, _stub_cnt, mtCode);
2560 for (int i = 0; i < _stub_cnt; i++) {
2561 _ranges[i].default_init();
2562 }
2563 }
2564 }
2565
2566 bool AOTStubData::load_code_blob() {
2567 assert(is_using(), "should not call");
2568 assert(!is_invalid() && _cached_blob == nullptr, "repeated init");
2569 _cached_blob = AOTCodeCache::load_code_blob(AOTCodeEntry::StubGenBlob,
2570 _blob_id,
2571 this);
2572 if (_cached_blob == nullptr) {
2573 set_invalid();
2574 return false;
2575 } else {
2576 return true;
2577 }
2578 }
2579
2580 bool AOTStubData::store_code_blob(CodeBlob& new_blob, CodeBuffer *code_buffer) {
2581 assert(is_dumping(), "should not call");
2582 assert(_cached_blob == nullptr, "should not be loading and storing!");
2583 if (!AOTCodeCache::store_code_blob(new_blob,
2584 AOTCodeEntry::StubGenBlob,
2585 _blob_id, this, code_buffer)) {
2586 set_invalid();
2587 return false;
2588 } else {
2589 return true;
2590 }
2591 }
2592
2593 address AOTStubData::load_archive_data(StubId stub_id, address& end, GrowableArray<address>* entries, GrowableArray<address>* extras) {
2594 assert(StubInfo::blob(stub_id) == _blob_id, "sanity check");
2595 if (is_invalid()) {
2596 return nullptr;
2597 }
2598 int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2599 assert(idx >= 0 && idx < _stub_cnt, "invalid index %d for stub count %d", idx, _stub_cnt);
2600 // ensure we have a valid associated range
2601 StubAddrRange &range = _ranges[idx];
2602 int base = range.start_index();
2603 if (base < 0) {
2604 #ifdef DEBUG
2605 // reset index so we can idenitfy which ones we failed to find
2606 range.init_entry(-2, 0);
2607 #endif
2608 return nullptr;
2609 }
2610 int count = range.count();
2611 assert(base >= 0, "sanity");
2612 assert(count >= 2, "sanity");
2613 // first two saved addresses are start and end
2614 address start = _address_array.at(base);
2615 end = _address_array.at(base + 1);
2616 assert(start != nullptr, "failed to load start address of stub %s", StubInfo::name(stub_id));
2617 assert(end != nullptr, "failed to load end address of stub %s", StubInfo::name(stub_id));
2618 assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id));
2619
2620 int entry_count = StubInfo::entry_count(stub_id);
2621 // the address count must at least include the stub start, end
2622 // and secondary addresses
2623 assert(count >= entry_count + 1, "stub %s requires %d saved addresses but only has %d", StubInfo::name(stub_id), entry_count + 1, count);
2624
2625 // caller must retrieve secondary entries if and only if they exist
2626 assert((entry_count == 1) == (entries == nullptr), "trying to retrieve wrong number of entries for stub %s", StubInfo::name(stub_id));
2627 int index = 2;
2628 if (entries != nullptr) {
2629 assert(entries->length() == 0, "non-empty array when retrieving entries for stub %s!", StubInfo::name(stub_id));
2630 while (index < entry_count + 1) {
2631 address entry = _address_array.at(base + index++);
2632 assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id));
2633 entries->append(entry);
2634 }
2635 }
2636 // caller must retrieve extras if and only if they exist
2637 assert((index < count) == (extras != nullptr), "trying to retrieve wrong number of extras for stub %s", StubInfo::name(stub_id));
2638 if (extras != nullptr) {
2639 assert(extras->length() == 0, "non-empty array when retrieving extras for stub %s!", StubInfo::name(stub_id));
2640 while (index < count) {
2641 address extra = _address_array.at(base + index++);
2642 assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id));
2643 extras->append(extra);
2644 }
2645 }
2646
2647 return start;
2648 }
2649
2650 void AOTStubData::store_archive_data(StubId stub_id, address start, address end, GrowableArray<address>* entries, GrowableArray<address>* extras) {
2651 assert(StubInfo::blob(stub_id) == _blob_id, "sanity check");
2652 assert(start != nullptr, "start address cannot be null");
2653 assert(end != nullptr, "end address cannot be null");
2654 assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id));
2655 int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2656 StubAddrRange& range = _ranges[idx];
2657 assert(range.start_index() == -1, "sanity");
2658 int base = _address_array.length();
2659 assert(base >= 0, "sanity");
2660 // first two saved addresses are start and end
2661 _address_array.append(start);
2662 _address_array.append(end);
2663 // caller must save secondary entries if and only if they exist
2664 assert((StubInfo::entry_count(stub_id) == 1) == (entries == nullptr), "trying to save wrong number of entries for stub %s", StubInfo::name(stub_id));
2665 if (entries != nullptr) {
2666 assert(entries->length() == StubInfo::entry_count(stub_id) - 1, "incorrect entry count %d when saving entries for stub %s!", entries->length(), StubInfo::name(stub_id));
2667 for (int i = 0; i < entries->length(); i++) {
2668 address entry = entries->at(i);
2669 assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id));
2670 _address_array.append(entry);
2671 }
2672 }
2673 // caller may wish to save extra addresses
2674 if (extras != nullptr) {
2675 for (int i = 0; i < extras->length(); i++) {
2676 address extra = extras->at(i);
2677 // handler range end may be end -- it gets restored as nullptr
2678 assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id));
2679 _address_array.append(extra);
2680 }
2681 }
2682 range.init_entry(base, _address_array.length() - base);
2683 }