1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciUtilities.hpp"
33 #include "classfile/javaAssertions.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "gc/shared/barrierSetNMethod.hpp"
38 #include "gc/shared/cardTableBarrierSet.hpp"
39 #include "gc/shared/gcConfig.hpp"
40 #include "logging/logStream.hpp"
41 #include "memory/memoryReserver.hpp"
42 #include "prims/jvmtiThreadState.hpp"
43 #include "prims/upcallLinker.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/flags/flagSetting.hpp"
46 #include "runtime/globals_extension.hpp"
47 #include "runtime/icache.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/mutexLocker.hpp"
50 #include "runtime/os.inline.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/stubInfo.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "utilities/copy.hpp"
55 #ifdef COMPILER1
56 #include "c1/c1_Runtime1.hpp"
57 #endif
58 #ifdef COMPILER2
59 #include "opto/runtime.hpp"
60 #endif
61 #if INCLUDE_G1GC
62 #include "gc/g1/g1BarrierSetRuntime.hpp"
63 #include "gc/g1/g1HeapRegion.hpp"
64 #endif
65 #if INCLUDE_SHENANDOAHGC
66 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
67 #include "gc/shenandoah/shenandoahRuntime.hpp"
68 #endif
69 #if INCLUDE_ZGC
70 #include "gc/z/zBarrierSetRuntime.hpp"
71 #endif
72
73 #include <errno.h>
74 #include <sys/stat.h>
75
76 const char* aot_code_entry_kind_name[] = {
77 #define DECL_KIND_STRING(kind) XSTR(kind),
78 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
79 #undef DECL_KIND_STRING
80 };
81
82 // Stream to printing AOTCodeCache loading failure.
83 // Print to error channel when -XX:AOTMode is set to "on"
84 static LogStream& load_failure_log() {
85 static LogStream err_stream(LogLevel::Error, LogTagSetMapping<LOG_TAGS(aot, codecache, init)>::tagset());
86 static LogStream dbg_stream(LogLevel::Debug, LogTagSetMapping<LOG_TAGS(aot, codecache, init)>::tagset());
87 if (RequireSharedSpaces || AbortVMOnAOTCodeFailure) {
88 return err_stream;
89 } else {
90 return dbg_stream;
91 }
92 }
93
94 // Report AOT code cache failure and exit VM
95 // if (AOTMode is `on` and AbortVMOnAOTCodeFailure is default)
96 // or AbortVMOnAOTCodeFailure is `true`.
97 //
98 // Note, specifying -XX:-AbortVMOnAOTCodeFailure on command line
99 // will prevent aborting VM when AOTMode is `on`. It is used for testing.
100
101 static void report_load_failure() {
102 bool abort_vm = AbortVMOnAOTCodeFailure ||
103 (FLAG_IS_DEFAULT(AbortVMOnAOTCodeFailure) && RequireSharedSpaces);
104 if (abort_vm) {
105 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
106 }
107 load_failure_log().print_cr("Unable to use AOT Code Cache.");
108 AOTCodeCache::disable_caching();
109 }
110
111 static void report_store_failure() {
112 if (AbortVMOnAOTCodeFailure) {
113 tty->print_cr("Unable to create AOT Code Cache.");
114 vm_abort(false);
115 }
116 log_error(aot, codecache, exit)("Unable to create AOT Code Cache.");
117 AOTCodeCache::disable_caching();
118 }
119
120 // The sequence of AOT code caching flags and parametters settings.
121 //
122 // 1. The initial AOT code caching flags setting is done
123 // during call to CDSConfig::check_vm_args_consistency().
124 //
125 // 2. The earliest AOT code state check done in compilationPolicy_init()
126 // where we set number of compiler threads for AOT assembly phase.
127 //
128 // 3. We determine presence of AOT code in AOT Cache in
129 // AOTMetaspace::open_static_archive() which is calles
130 // after compilationPolicy_init() but before codeCache_init().
131 //
132 // 4. AOTCodeCache::initialize() is called during universe_init()
133 // and does final AOT state and flags settings.
134 //
135 // 5. Finally AOTCodeCache::init2() is called after universe_init()
136 // when all GC settings are finalized.
137
138 // Next methods determine which action we do with AOT code depending
139 // on phase of AOT process: assembly or production.
140
141 bool AOTCodeCache::is_dumping_adapter() {
142 return AOTAdapterCaching && is_on_for_dump();
143 }
144
145 bool AOTCodeCache::is_using_adapter() {
146 return AOTAdapterCaching && is_on_for_use();
147 }
148
149 bool AOTCodeCache::is_dumping_stub() {
150 return AOTStubCaching && is_on_for_dump();
151 }
152
153 bool AOTCodeCache::is_using_stub() {
154 return AOTStubCaching && is_on_for_use();
155 }
156
157 // Next methods could be called regardless AOT code cache status.
158 // Initially they are called during flags parsing and finilized
159 // in AOTCodeCache::initialize().
160 void AOTCodeCache::enable_caching() {
161 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
162 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
163 }
164
165 void AOTCodeCache::disable_caching() {
166 FLAG_SET_ERGO(AOTStubCaching, false);
167 FLAG_SET_ERGO(AOTAdapterCaching, false);
168 }
169
170 bool AOTCodeCache::is_caching_enabled() {
171 return AOTStubCaching || AOTAdapterCaching;
172 }
173
174 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
175 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
176 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
177 // becasue both id and kind are used to find an entry, and that combination should be unique
178 if (kind == AOTCodeEntry::Adapter) {
179 return id;
180 } else if (kind == AOTCodeEntry::SharedBlob) {
181 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
182 return id;
183 } else if (kind == AOTCodeEntry::C1Blob) {
184 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
185 return id;
186 } else if (kind == AOTCodeEntry::C2Blob) {
187 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
188 return id;
189 } else {
190 // kind must be AOTCodeEntry::StubGenBlob
191 assert(StubInfo::is_stubgen(static_cast<BlobId>(id)), "not a stubgen blob id %d", id);
192 return id;
193 }
194 }
195
196 static uint _max_aot_code_size = 0;
197 uint AOTCodeCache::max_aot_code_size() {
198 return _max_aot_code_size;
199 }
200
201 // It is called from AOTMetaspace::initialize_shared_spaces()
202 // which is called from universe_init().
203 // At this point all AOT class linking seetings are finilized
204 // and AOT cache is open so we can map AOT code region.
205 void AOTCodeCache::initialize() {
206 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
207 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
208 disable_caching();
209 return;
210 #else
211 if (FLAG_IS_DEFAULT(AOTCache)) {
212 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
213 disable_caching();
214 return; // AOTCache must be specified to dump and use AOT code
215 }
216
217 if (VerifyOops) {
218 // Disable AOT stubs caching when VerifyOops flag is on.
219 // Verify oops code generated a lot of C strings which overflow
220 // AOT C string table (which has fixed size).
221 // AOT C string table will be reworked later to handle such cases.
222 //
223 // Note: AOT adapters are not affected - they don't have oop operations.
224 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
225 FLAG_SET_ERGO(AOTStubCaching, false);
226 }
227
228 bool is_dumping = false;
229 bool is_using = false;
230 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
231 is_dumping = true;
232 enable_caching();
233 is_dumping = is_caching_enabled();
234 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
235 enable_caching();
236 is_using = is_caching_enabled();
237 } else {
238 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
239 disable_caching();
240 return; // nothing to do
241 }
242 if (!(is_dumping || is_using)) {
243 disable_caching();
244 return; // AOT code caching disabled on command line
245 }
246 _max_aot_code_size = AOTCodeMaxSize;
247 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
248 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
249 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
250 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
251 }
252 }
253 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
254 if (is_using && aot_code_size == 0) {
255 log_info(aot, codecache, init)("AOT Code Cache is empty");
256 disable_caching();
257 return;
258 }
259 if (!open_cache(is_dumping, is_using)) {
260 if (is_using) {
261 report_load_failure();
262 } else {
263 report_store_failure();
264 }
265 return;
266 }
267 if (is_dumping) {
268 FLAG_SET_DEFAULT(ForceUnreachable, true);
269 }
270 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
271 #endif // defined(AMD64) || defined(AARCH64)
272 }
273
274 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
275 AOTCodeCache* AOTCodeCache::_cache = nullptr;
276 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
277
278 // It is called after universe_init() when all GC settings are finalized.
279 void AOTCodeCache::init2() {
280 DEBUG_ONLY( _passed_init2 = true; )
281 if (opened_cache == nullptr) {
282 return;
283 }
284 if (!opened_cache->verify_config()) {
285 delete opened_cache;
286 opened_cache = nullptr;
287 report_load_failure();
288 return;
289 }
290
291 // initialize aot runtime constants as appropriate to this runtime
292 AOTRuntimeConstants::initialize_from_runtime();
293
294 // initialize the table of external routines so we can save
295 // generated code blobs that reference them
296 AOTCodeAddressTable* table = opened_cache->_table;
297 assert(table != nullptr, "should be initialized already");
298 table->init_extrs();
299
300 // Now cache and address table are ready for AOT code generation
301 _cache = opened_cache;
302 }
303
304 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
305 opened_cache = new AOTCodeCache(is_dumping, is_using);
306 if (opened_cache->failed()) {
307 delete opened_cache;
308 opened_cache = nullptr;
309 return false;
310 }
311 return true;
312 }
313
314 // Called after continuations_init() when continuation stub callouts
315 // have been initialized
316 void AOTCodeCache::init3() {
317 if (opened_cache == nullptr) {
318 return;
319 }
320 // initialize external routines for continuations so we can save
321 // generated continuation blob that references them
322 AOTCodeAddressTable* table = opened_cache->_table;
323 assert(table != nullptr, "should be initialized already");
324 table->init_extrs2();
325 }
326
327 void AOTCodeCache::dump() {
328 if (is_on()) {
329 assert(is_on_for_dump(), "should be called only when dumping AOT code");
330 MutexLocker ml(Compile_lock);
331 _cache->finish_write();
332 }
333 }
334
335 #define DATA_ALIGNMENT HeapWordSize
336
337 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
338 _load_header(nullptr),
339 _load_buffer(nullptr),
340 _store_buffer(nullptr),
341 _C_store_buffer(nullptr),
342 _write_position(0),
343 _load_size(0),
344 _store_size(0),
345 _for_use(is_using),
346 _for_dump(is_dumping),
347 _failed(false),
348 _lookup_failed(false),
349 _table(nullptr),
350 _load_entries(nullptr),
351 _search_entries(nullptr),
352 _store_entries(nullptr),
353 _C_strings_buf(nullptr),
354 _store_entries_cnt(0)
355 {
356 // Read header at the begining of cache
357 if (_for_use) {
358 // Read cache
359 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
360 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
361 if (!rs.is_reserved()) {
362 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
363 set_failed();
364 return;
365 }
366 if (!AOTCacheAccess::map_aot_code_region(rs)) {
367 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
368 set_failed();
369 return;
370 }
371
372 _load_size = (uint)load_size;
373 _load_buffer = (char*)rs.base();
374 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
375 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
376
377 _load_header = (Header*)addr(0);
378 if (!_load_header->verify(_load_size)) {
379 set_failed();
380 return;
381 }
382 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
383 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
384 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
385 log_debug(aot, codecache, init)(" StubGen Blobs: total=%d", _load_header->stubgen_blobs_count());
386 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
387 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
388 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
389
390 // Read strings
391 load_strings();
392 }
393 if (_for_dump) {
394 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
395 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
396 // Entries allocated at the end of buffer in reverse (as on stack).
397 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
398 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
399 }
400 _table = new AOTCodeAddressTable();
401 }
402
403 void AOTCodeCache::add_stub_entries(StubId stub_id, address start, GrowableArray<address> *entries, int begin_idx) {
404 EntryId entry_id = StubInfo::entry_base(stub_id);
405 add_stub_entry(entry_id, start);
406 // skip past first entry
407 entry_id = StubInfo::next_in_stub(stub_id, entry_id);
408 // now check for any more entries
409 int count = StubInfo::entry_count(stub_id) - 1;
410 assert(start != nullptr, "invalid start address for stub %s", StubInfo::name(stub_id));
411 assert(entries == nullptr || begin_idx + count <= entries->length(), "sanity");
412 // write any extra entries
413 for (int i = 0; i < count; i++) {
414 assert(entry_id != EntryId::NO_ENTRYID, "not enough entries for stub %s", StubInfo::name(stub_id));
415 address a = entries->at(begin_idx + i);
416 add_stub_entry(entry_id, a);
417 entry_id = StubInfo::next_in_stub(stub_id, entry_id);
418 }
419 assert(entry_id == EntryId::NO_ENTRYID, "too many entries for stub %s", StubInfo::name(stub_id));
420 }
421
422 void AOTCodeCache::add_stub_entry(EntryId entry_id, address a) {
423 if (a != nullptr) {
424 if (_table != nullptr) {
425 log_trace(aot, codecache, stubs)("Publishing stub entry %s at address " INTPTR_FORMAT, StubInfo::name(entry_id), p2i(a));
426 return _table->add_stub_entry(entry_id, a);
427 }
428 }
429 }
430
431 void AOTCodeCache::set_shared_stubs_complete() {
432 AOTCodeAddressTable* table = addr_table();
433 if (table != nullptr) {
434 table->set_shared_stubs_complete();
435 }
436 }
437
438 void AOTCodeCache::set_c1_stubs_complete() {
439 AOTCodeAddressTable* table = addr_table();
440 if (table != nullptr) {
441 table->set_c1_stubs_complete();
442 }
443 }
444
445 void AOTCodeCache::set_c2_stubs_complete() {
446 AOTCodeAddressTable* table = addr_table();
447 if (table != nullptr) {
448 table->set_c2_stubs_complete();
449 }
450 }
451
452 void AOTCodeCache::set_stubgen_stubs_complete() {
453 AOTCodeAddressTable* table = addr_table();
454 if (table != nullptr) {
455 table->set_stubgen_stubs_complete();
456 }
457 }
458
459 void AOTCodeCache::Config::record(uint cpu_features_offset) {
460
461 #define AOTCODECACHE_SAVE_VAR(type, name) _saved_ ## name = name;
462 #define AOTCODECACHE_SAVE_FUN(type, name, fun) _saved_ ## name = fun;
463
464 AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_SAVE_VAR, AOTCODECACHE_SAVE_FUN);
465
466 // Special configs that cannot be checked with macros
467 _compressedOopBase = CompressedOops::base();
468
469 #if defined(X86) && !defined(ZERO)
470 _useUnalignedLoadStores = UseUnalignedLoadStores;
471 #endif
472
473 #if defined(AARCH64) && !defined(ZERO)
474 _avoidUnalignedAccesses = AvoidUnalignedAccesses;
475 #endif
476
477 _cpu_features_offset = cpu_features_offset;
478 }
479
480 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
481 LogStreamHandle(Debug, aot, codecache, init) log;
482 uint offset = _cpu_features_offset;
483 uint cpu_features_size = *(uint *)cache->addr(offset);
484 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
485 offset += sizeof(uint);
486
487 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
488 if (log.is_enabled()) {
489 ResourceMark rm; // required for stringStream::as_string()
490 stringStream ss;
491 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
492 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
493 }
494
495 if (!VM_Version::verify_aot_code_cache_features(cached_cpu_features_buffer)) {
496 if (load_failure_log().is_enabled()) {
497 ResourceMark rm; // required for stringStream::as_string()
498 load_failure_log().print_cr("AOT Code Cache disabled: cpu features are incompatible");
499 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
500 VM_Version::store_cpu_features(runtime_cpu_features);
501
502 stringStream missing_features;
503 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, missing_features);
504 if (!missing_features.is_empty()) {
505 load_failure_log().print_cr("cpu features that are required: \"%s\"", missing_features.as_string());
506 }
507
508 stringStream additional_features;
509 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, additional_features);
510 if (!additional_features.is_empty()) {
511 load_failure_log().print("cpu features that are additional: \"%s\"", additional_features.as_string());
512 }
513 load_failure_log().print_cr("");
514 }
515 return false;
516 }
517 return true;
518 }
519
520 #define AOTCODECACHE_DISABLED_MSG "AOT Code Cache disabled: it was created with %s = "
521
522 // Special case, print "GC = ..." to be more understandable.
523 inline void log_config_mismatch(CollectedHeap::Name saved, CollectedHeap::Name current, const char* name/*unused*/) {
524 load_failure_log().print_cr("AOT Code Cache disabled: it was created with GC = \"%s\" vs current \"%s\"",
525 GCConfig::hs_err_name(saved), GCConfig::hs_err_name(current));
526 }
527
528 inline void log_config_mismatch(bool saved, bool current, const char* name) {
529 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%s vs current %s", name,
530 saved ? "true" : "false", current ? "true" : "false");
531 }
532
533 inline void log_config_mismatch(int saved, int current, const char* name) {
534 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%d vs current %d", name, saved, current);
535 }
536
537 inline void log_config_mismatch(uint saved, uint current, const char* name) {
538 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%u vs current %u", name, saved, current);
539 }
540
541 #ifdef _LP64
542 inline void log_config_mismatch(intx saved, intx current, const char* name) {
543 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zd vs current %zd", name, saved, current);
544 }
545
546 inline void log_config_mismatch(uintx saved, uintx current, const char* name) {
547 load_failure_log().print_cr(AOTCODECACHE_DISABLED_MSG "%zu vs current %zu", name, saved, current);
548 }
549 #endif
550
551 template <typename T>
552 bool check_config(T saved, T current, const char* name) {
553 if (saved != current) {
554 log_config_mismatch(saved, current, name);
555 return false;
556 } else {
557 return true;
558 }
559 }
560
561 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
562 // check CPU features before checking flags that may be
563 // auto-configured in response to them
564 if (!verify_cpu_features(cache)) {
565 return false;
566 }
567
568 // Tests for config options which might affect validity of adapters,
569 // stubs or nmethods. Currently we take a pessemistic stand and
570 // drop the whole cache if any of these are changed.
571
572 #define AOTCODECACHE_CHECK_VAR(type, name) \
573 if (!check_config(_saved_ ## name, name, #name)) { return false; }
574 #define AOTCODECACHE_CHECK_FUN(type, name, fun) \
575 if (!check_config(_saved_ ## name, fun, #fun)) { return false; }
576
577 AOTCODECACHE_CONFIGS_DO(AOTCODECACHE_CHECK_VAR, AOTCODECACHE_CHECK_FUN);
578
579 // Special configs that cannot be checked with macros
580
581 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
582 load_failure_log().print_cr("AOT Code Cache disabled: incompatible CompressedOops::base(): %p vs current %p",
583 _compressedOopBase, CompressedOops::base());
584 return false;
585 }
586
587 #if defined(X86) && !defined(ZERO)
588 // switching off UseUnalignedLoadStores can affect validity of fill
589 // stubs
590 if (_useUnalignedLoadStores && !UseUnalignedLoadStores) {
591 log_config_mismatch(_useUnalignedLoadStores, UseUnalignedLoadStores, "UseUnalignedLoadStores");
592 return false;
593 }
594 #endif // defined(X86) && !defined(ZERO)
595
596 #if defined(AARCH64) && !defined(ZERO)
597 // switching on AvoidUnalignedAccesses may affect validity of array
598 // copy stubs and nmethods
599 if (!_avoidUnalignedAccesses && AvoidUnalignedAccesses) {
600 log_config_mismatch(_avoidUnalignedAccesses, AvoidUnalignedAccesses, "AvoidUnalignedAccesses");
601 return false;
602 }
603 #endif // defined(AARCH64) && !defined(ZERO)
604
605 return true;
606 }
607
608 bool AOTCodeCache::Header::verify(uint load_size) const {
609 if (_version != AOT_CODE_VERSION) {
610 load_failure_log().print_cr("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
611 return false;
612 }
613 if (load_size < _cache_size) {
614 load_failure_log().print_cr("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
615 return false;
616 }
617 return true;
618 }
619
620 AOTCodeCache* AOTCodeCache::open_for_use() {
621 if (AOTCodeCache::is_on_for_use()) {
622 return AOTCodeCache::cache();
623 }
624 return nullptr;
625 }
626
627 AOTCodeCache* AOTCodeCache::open_for_dump() {
628 if (AOTCodeCache::is_on_for_dump()) {
629 AOTCodeCache* cache = AOTCodeCache::cache();
630 cache->clear_lookup_failed(); // Reset bit
631 return cache;
632 }
633 return nullptr;
634 }
635
636 void copy_bytes(const char* from, address to, uint size) {
637 assert((int)size > 0, "sanity");
638 memcpy(to, from, size);
639 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
640 }
641
642 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
643 _cache = cache;
644 _entry = entry;
645 _load_buffer = cache->cache_buffer();
646 _read_position = 0;
647 _lookup_failed = false;
648 _name = nullptr;
649 _reloc_data = nullptr;
650 _reloc_count = 0;
651 _oop_maps = nullptr;
652 _entry_kind = AOTCodeEntry::None;
653 _stub_data = nullptr;
654 _id = -1;
655 }
656
657 void AOTCodeReader::set_read_position(uint pos) {
658 if (pos == _read_position) {
659 return;
660 }
661 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
662 _read_position = pos;
663 }
664
665 uint AOTCodeReader::align_read_int() {
666 return align_up(_read_position, sizeof(int));
667 }
668
669 bool AOTCodeCache::set_write_position(uint pos) {
670 if (pos == _write_position) {
671 return true;
672 }
673 if (_store_size < _write_position) {
674 _store_size = _write_position; // Adjust during write
675 }
676 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
677 _write_position = pos;
678 return true;
679 }
680
681 static char align_buffer[256] = { 0 };
682
683 bool AOTCodeCache::align_write_bytes(uint alignment) {
684 uint padding = alignment - (_write_position & (alignment - 1));
685 if (padding == alignment) {
686 return true;
687 }
688 uint n = write_bytes((const void*)&align_buffer, padding);
689 if (n != padding) {
690 return false;
691 }
692 log_trace(aot, codecache)("Adjust write alignment to %d bytes in AOT Code Cache", alignment);
693 return true;
694 }
695
696 bool AOTCodeCache::align_write() {
697 // We are not executing code from cache - we copy it by bytes first.
698 // No need for big alignment (or at all).
699 return align_write_bytes(DATA_ALIGNMENT);
700 }
701
702 bool AOTCodeCache::align_write_int() {
703 return align_write_bytes(sizeof(int));
704 }
705
706 // Check to see if AOT code cache has required space to store "nbytes" of data
707 address AOTCodeCache::reserve_bytes(uint nbytes) {
708 assert(for_dump(), "Code Cache file is not created");
709 uint new_position = _write_position + nbytes;
710 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
711 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
712 nbytes, _write_position);
713 set_failed();
714 report_store_failure();
715 return nullptr;
716 }
717 address buffer = (address)(_store_buffer + _write_position);
718 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
719 _write_position += nbytes;
720 if (_store_size < _write_position) {
721 _store_size = _write_position;
722 }
723 return buffer;
724 }
725
726 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
727 assert(for_dump(), "Code Cache file is not created");
728 if (nbytes == 0) {
729 return 0;
730 }
731 uint new_position = _write_position + nbytes;
732 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
733 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
734 nbytes, _write_position);
735 set_failed();
736 report_store_failure();
737 return 0;
738 }
739 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
740 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
741 _write_position += nbytes;
742 if (_store_size < _write_position) {
743 _store_size = _write_position;
744 }
745 return nbytes;
746 }
747
748 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
749 return (void*)(cache->add_entry());
750 }
751
752 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
753 if (entry->kind() == kind) {
754 assert(entry->id() == id, "sanity");
755 return true; // Found
756 }
757 return false;
758 }
759
760 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
761 assert(_for_use, "sanity");
762 uint count = _load_header->entries_count();
763 if (_load_entries == nullptr) {
764 // Read it
765 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
766 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
767 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
768 }
769 // Binary search
770 int l = 0;
771 int h = count - 1;
772 while (l <= h) {
773 int mid = (l + h) >> 1;
774 int ix = mid * 2;
775 uint is = _search_entries[ix];
776 if (is == id) {
777 int index = _search_entries[ix + 1];
778 AOTCodeEntry* entry = &(_load_entries[index]);
779 if (check_entry(kind, id, entry)) {
780 return entry; // Found
781 }
782 // Linear search around to handle id collission
783 for (int i = mid - 1; i >= l; i--) { // search back
784 ix = i * 2;
785 is = _search_entries[ix];
786 if (is != id) {
787 break;
788 }
789 index = _search_entries[ix + 1];
790 AOTCodeEntry* entry = &(_load_entries[index]);
791 if (check_entry(kind, id, entry)) {
792 return entry; // Found
793 }
794 }
795 for (int i = mid + 1; i <= h; i++) { // search forward
796 ix = i * 2;
797 is = _search_entries[ix];
798 if (is != id) {
799 break;
800 }
801 index = _search_entries[ix + 1];
802 AOTCodeEntry* entry = &(_load_entries[index]);
803 if (check_entry(kind, id, entry)) {
804 return entry; // Found
805 }
806 }
807 break; // Not found match
808 } else if (is < id) {
809 l = mid + 1;
810 } else {
811 h = mid - 1;
812 }
813 }
814 return nullptr;
815 }
816
817 extern "C" {
818 static int uint_cmp(const void *i, const void *j) {
819 uint a = *(uint *)i;
820 uint b = *(uint *)j;
821 return a > b ? 1 : a < b ? -1 : 0;
822 }
823 }
824
825 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
826 uint* size_ptr = (uint *)buffer;
827 *size_ptr = buffer_size;
828 buffer += sizeof(uint);
829
830 VM_Version::store_cpu_features(buffer);
831 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
832 buffer += buffer_size;
833 buffer = align_up(buffer, DATA_ALIGNMENT);
834 }
835
836 bool AOTCodeCache::finish_write() {
837 if (!align_write()) {
838 return false;
839 }
840 uint strings_offset = _write_position;
841 int strings_count = store_strings();
842 if (strings_count < 0) {
843 return false;
844 }
845 if (!align_write()) {
846 return false;
847 }
848 uint strings_size = _write_position - strings_offset;
849
850 uint entries_count = 0; // Number of entrant (useful) code entries
851 uint entries_offset = _write_position;
852
853 uint store_count = _store_entries_cnt;
854 if (store_count > 0) {
855 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
856 uint code_count = store_count;
857 uint search_count = code_count * 2;
858 uint search_size = search_count * sizeof(uint);
859 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
860 // _write_position includes size of code and strings
861 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
862 uint cpu_features_size = VM_Version::cpu_features_size();
863 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
864 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
865 align_up(total_cpu_features_size, DATA_ALIGNMENT);
866 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
867
868 // Allocate in AOT Cache buffer
869 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
870 char* start = align_up(buffer, DATA_ALIGNMENT);
871 char* current = start + header_size; // Skip header
872
873 uint cpu_features_offset = current - start;
874 store_cpu_features(current, cpu_features_size);
875 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
876 assert(current < start + total_size, "sanity check");
877
878 // Create ordered search table for entries [id, index];
879 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
880
881 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
882 uint adapters_count = 0;
883 uint shared_blobs_count = 0;
884 uint stubgen_blobs_count = 0;
885 uint C1_blobs_count = 0;
886 uint C2_blobs_count = 0;
887 uint max_size = 0;
888 // AOTCodeEntry entries were allocated in reverse in store buffer.
889 // Process them in reverse order to cache first code first.
890 for (int i = store_count - 1; i >= 0; i--) {
891 entries_address[i].set_next(nullptr); // clear pointers before storing data
892 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
893 if (size > max_size) {
894 max_size = size;
895 }
896 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
897 entries_address[i].set_offset(current - start); // New offset
898 current += size;
899 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
900 if (n != sizeof(AOTCodeEntry)) {
901 FREE_C_HEAP_ARRAY(search);
902 return false;
903 }
904 search[entries_count*2 + 0] = entries_address[i].id();
905 search[entries_count*2 + 1] = entries_count;
906 entries_count++;
907 AOTCodeEntry::Kind kind = entries_address[i].kind();
908 if (kind == AOTCodeEntry::Adapter) {
909 adapters_count++;
910 } else if (kind == AOTCodeEntry::SharedBlob) {
911 shared_blobs_count++;
912 } else if (kind == AOTCodeEntry::StubGenBlob) {
913 stubgen_blobs_count++;
914 } else if (kind == AOTCodeEntry::C1Blob) {
915 C1_blobs_count++;
916 } else if (kind == AOTCodeEntry::C2Blob) {
917 C2_blobs_count++;
918 }
919 }
920 if (entries_count == 0) {
921 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
922 FREE_C_HEAP_ARRAY(search);
923 return true; // Nothing to write
924 }
925 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
926 // Write strings
927 if (strings_count > 0) {
928 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
929 strings_offset = (current - start); // New offset
930 current += strings_size;
931 }
932
933 uint new_entries_offset = (current - start); // New offset
934 // Sort and store search table
935 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
936 search_size = 2 * entries_count * sizeof(uint);
937 copy_bytes((const char*)search, (address)current, search_size);
938 FREE_C_HEAP_ARRAY(search);
939 current += search_size;
940
941 // Write entries
942 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
943 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
944 current += entries_size;
945 uint size = (current - start);
946 assert(size <= total_size, "%d > %d", size , total_size);
947
948 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
949 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
950 log_debug(aot, codecache, exit)(" StubGen Blobs: total=%d", stubgen_blobs_count);
951 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
952 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
953 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
954
955 // Finalize header
956 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
957 header->init(size, (uint)strings_count, strings_offset,
958 entries_count, new_entries_offset,
959 adapters_count, shared_blobs_count,
960 stubgen_blobs_count, C1_blobs_count,
961 C2_blobs_count, cpu_features_offset);
962
963 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
964 }
965 return true;
966 }
967
968 //------------------Store/Load AOT code ----------------------
969
970 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data, CodeBuffer* code_buffer) {
971 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
972
973 // we only expect stub data and a code buffer for a multi stub blob
974 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr),
975 "entry_kind %d does not match stub_data pointer %p",
976 entry_kind, stub_data);
977
978 assert((stub_data == nullptr) == (code_buffer == nullptr),
979 "stub data and code buffer must both be null or both non null");
980
981 // If this is a stub and the cache is on for either load or dump we
982 // need to insert the stub entries into the AOTCacheAddressTable so
983 // that relocs which refer to entries defined by this blob get
984 // translated correctly.
985 //
986 // Entry insertion needs to be be done up front before writing the
987 // blob because some blobs rely on internal daisy-chain references
988 // from one entry to another.
989 //
990 // Entry insertion also needs to be done even if the cache is open
991 // for use but not for dump. This may be needed when an archived
992 // blob omits some entries -- either because of a config change or a
993 // load failure -- with the result that the entries end up being
994 // generated. These generated entry addresses may be needed to
995 // resolve references from subsequently loaded blobs (for either
996 // stubs or nmethods).
997
998 if (is_on() && AOTCodeEntry::is_blob(entry_kind)) {
999 publish_stub_addresses(blob, (BlobId)id, stub_data);
1000 }
1001
1002 AOTCodeCache* cache = open_for_dump();
1003 if (cache == nullptr) {
1004 return false;
1005 }
1006 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1007 return false;
1008 }
1009 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1010 return false;
1011 }
1012 // we do not currently store C2 stubs because we are seeing weird
1013 // memory errors when loading them -- see JDK-8357593
1014 if (entry_kind == AOTCodeEntry::C2Blob) {
1015 return false;
1016 }
1017 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1018
1019 #ifdef ASSERT
1020 LogStreamHandle(Trace, aot, codecache, stubs) log;
1021 if (log.is_enabled()) {
1022 FlagSetting fs(PrintRelocations, true);
1023 blob.print_on(&log);
1024 }
1025 #endif
1026 // we need to take a lock to prevent race between compiler threads generating AOT code
1027 // and the main thread generating adapter
1028 MutexLocker ml(Compile_lock);
1029 if (!is_on()) {
1030 return false; // AOT code cache was already dumped and closed.
1031 }
1032 if (!cache->align_write()) {
1033 return false;
1034 }
1035 uint entry_position = cache->_write_position;
1036
1037 uint blob_offset = cache->_write_position - entry_position;
1038 // Code blob's size is aligned to oopSize
1039 address archive_buffer = cache->reserve_bytes(blob.size());
1040 if (archive_buffer == nullptr) {
1041 return false;
1042 }
1043 CodeBlob::archive_blob(&blob, archive_buffer);
1044
1045 // For a relocatable code blob its relocations are linked from the
1046 // blob. However, for a non-relocatable (stubgen) blob we only have
1047 // transient relocations attached to the code buffer that are added
1048 // in order to support AOT-load time patching. in either case, we
1049 // need to explicitly save these relocs when storing the blob to the
1050 // archive so we can then reload them and reattach them to either
1051 // the blob or to a code buffer when we reload the blob into a
1052 // production JVM.
1053 //
1054 // Either way we are then in a position to iterate over the relocs
1055 // and AOT patch the ones that refer to code that may move between
1056 // assembly and production time. We also need to save and restore
1057 // AOT address table indexes for the target addresses of affected
1058 // relocs. That happens below.
1059
1060 int reloc_count;
1061 address reloc_data;
1062 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1063 CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS);
1064 reloc_count = (cs->has_locs() ? cs->locs_count() : 0);
1065 reloc_data = (reloc_count > 0 ? (address)cs->locs_start() : nullptr);
1066 } else {
1067 reloc_count = blob.relocation_size() / sizeof(relocInfo);
1068 reloc_data = (address)blob.relocation_begin();
1069 }
1070 uint n = cache->write_bytes(&reloc_count, sizeof(int));
1071 if (n != sizeof(int)) {
1072 return false;
1073 }
1074 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1075 // align to heap word size before writing the relocs so we can
1076 // install them into a code buffer when they get restored
1077 if (!cache->align_write()) {
1078 return false;
1079 }
1080 }
1081 uint reloc_data_size = (uint)(reloc_count * sizeof(relocInfo));
1082 n = cache->write_bytes(reloc_data, reloc_data_size);
1083 if (n != reloc_data_size) {
1084 return false;
1085 }
1086
1087 bool has_oop_maps = false;
1088 if (blob.oop_maps() != nullptr) {
1089 if (!cache->write_oop_map_set(blob)) {
1090 return false;
1091 }
1092 has_oop_maps = true;
1093 }
1094
1095 // In the case of a multi-stub blob we need to write start, end,
1096 // secondary entries and extras. For any other blob entry addresses
1097 // beyond the blob start will be stored in the blob as offsets.
1098 if (stub_data != nullptr) {
1099 if (!cache->write_stub_data(blob, stub_data)) {
1100 return false;
1101 }
1102 }
1103
1104 // now we have added all the other data we can write details of any
1105 // extra the AOT relocations
1106
1107 bool write_ok = true;
1108 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1109 if (reloc_count > 0) {
1110 CodeSection* cs = code_buffer->code_section(CodeBuffer::SECT_INSTS);
1111 RelocIterator iter(cs);
1112 write_ok = cache->write_relocations(blob, iter);
1113 }
1114 } else {
1115 RelocIterator iter(&blob);
1116 write_ok = cache->write_relocations(blob, iter);
1117 }
1118
1119 if (!write_ok) {
1120 if (!cache->failed()) {
1121 // We may miss an address in AOT table - skip this code blob.
1122 cache->set_write_position(entry_position);
1123 }
1124 return false;
1125 }
1126
1127 #ifndef PRODUCT
1128 // Write asm remarks after relocation info
1129 if (!cache->write_asm_remarks(blob)) {
1130 return false;
1131 }
1132 if (!cache->write_dbg_strings(blob)) {
1133 return false;
1134 }
1135 #endif /* PRODUCT */
1136
1137 // Write name after code comments
1138 uint name_offset = cache->_write_position - entry_position;
1139 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1140 n = cache->write_bytes(name, name_size);
1141 if (n != name_size) {
1142 return false;
1143 }
1144
1145 uint entry_size = cache->_write_position - entry_position;
1146
1147 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1148 entry_position, entry_size, name_offset, name_size,
1149 blob_offset, has_oop_maps, blob.content_begin());
1150 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1151 return true;
1152 }
1153
1154 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1155 assert(!AOTCodeEntry::is_blob(entry_kind),
1156 "wrong entry kind for numeric id %d", id);
1157 return store_code_blob(blob, entry_kind, (uint)id, name, nullptr, nullptr);
1158 }
1159
1160 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1161 assert(AOTCodeEntry::is_single_stub_blob(entry_kind),
1162 "wrong entry kind for blob id %s", StubInfo::name(id));
1163 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), nullptr, nullptr);
1164 }
1165
1166 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data, CodeBuffer* code_buffer) {
1167 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind),
1168 "wrong entry kind for multi stub blob id %s", StubInfo::name(id));
1169 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), stub_data, code_buffer);
1170 }
1171
1172 bool AOTCodeCache::write_stub_data(CodeBlob &blob, AOTStubData *stub_data) {
1173 if (!align_write_int()) {
1174 return false;
1175 }
1176 BlobId blob_id = stub_data->blob_id();
1177 StubId stub_id = StubInfo::stub_base(blob_id);
1178 address blob_base = blob.code_begin();
1179 int stub_cnt = StubInfo::stub_count(blob_id);
1180 int n;
1181
1182 LogStreamHandle(Trace, aot, codecache, stubs) log;
1183
1184 if (log.is_enabled()) {
1185 log.print_cr("======== Stub data starts at offset %d", _write_position);
1186 }
1187
1188 for (int i = 0; i < stub_cnt; i++, stub_id = StubInfo::next_in_blob(blob_id, stub_id)) {
1189 // for each stub we find in the ranges list we write an int
1190 // sequence <stubid,start,end,N,offset1, ... offsetN> where
1191 //
1192 // - start_pos is the stub start address encoded as a code section offset
1193 //
1194 // - end is the stub end address encoded as an offset from start
1195 //
1196 // - N counts the number of stub-local entries/extras
1197 //
1198 // - offseti is a stub-local entry/extra address encoded as len for
1199 // a null address otherwise as an offset in range [1,len-1]
1200
1201 StubAddrRange& range = stub_data->get_range(i);
1202 GrowableArray<address>& addresses = stub_data->address_array();
1203 int base = range.start_index();
1204 if (base >= 0) {
1205 n = write_bytes(&stub_id, sizeof(StubId));
1206 if (n != sizeof(StubId)) {
1207 return false;
1208 }
1209 address start = addresses.at(base);
1210 assert (blob_base <= start, "sanity");
1211 uint offset = (uint)(start - blob_base);
1212 n = write_bytes(&offset, sizeof(uint));
1213 if (n != sizeof(int)) {
1214 return false;
1215 }
1216 address end = addresses.at(base + 1);
1217 assert (start < end, "sanity");
1218 offset = (uint)(end - start);
1219 n = write_bytes(&offset, sizeof(uint));
1220 if (n != sizeof(int)) {
1221 return false;
1222 }
1223 // write number of secondary and extra entries
1224 int count = range.count() - 2;
1225 n = write_bytes(&count, sizeof(int));
1226 if (n != sizeof(int)) {
1227 return false;
1228 }
1229 for (int j = 0; j < count; j++) {
1230 address next = addresses.at(base + 2 + j);
1231 if (next != nullptr) {
1232 // n.b. This maps next == end to the stub length which
1233 // means we will reconstitute the address as nullptr. That
1234 // happens when we have a handler range covers the end of
1235 // a stub and needs to be handled specially by the client
1236 // that restores the extras.
1237 assert(start <= next && next <= end, "sanity");
1238 offset = (uint)(next - start);
1239 } else {
1240 // this can happen when a stub is not generated or an
1241 // extra is the common handler target
1242 offset = NULL_ADDRESS_MARKER;
1243 }
1244 n = write_bytes(&offset, sizeof(uint));
1245 if (n != sizeof(int)) {
1246 return false;
1247 }
1248 }
1249 if (log.is_enabled()) {
1250 log.print_cr("======== wrote stub %s and %d addresses up to offset %d",
1251 StubInfo::name(stub_id), range.count(), _write_position);
1252 }
1253 }
1254 }
1255 // we should have exhausted all stub ids in the blob
1256 assert(stub_id == StubId::NO_STUBID, "sanity");
1257 // write NO_STUBID as an end marker
1258 n = write_bytes(&stub_id, sizeof(StubId));
1259 if (n != sizeof(StubId)) {
1260 return false;
1261 }
1262
1263 if (log.is_enabled()) {
1264 log.print_cr("======== Stub data ends at offset %d", _write_position);
1265 }
1266
1267 return true;
1268 }
1269
1270 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, AOTStubData* stub_data) {
1271 AOTCodeCache* cache = open_for_use();
1272 if (cache == nullptr) {
1273 return nullptr;
1274 }
1275 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1276
1277 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind) == (stub_data != nullptr),
1278 "entry_kind %d does not match stub_data pointer %p",
1279 entry_kind, stub_data);
1280
1281 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1282 return nullptr;
1283 }
1284 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1285 return nullptr;
1286 }
1287 // we do not currently load C2 stubs because we are seeing weird
1288 // memory errors when loading them -- see JDK-8357593
1289 if (entry_kind == AOTCodeEntry::C2Blob) {
1290 return nullptr;
1291 }
1292 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1293
1294 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1295 if (entry == nullptr) {
1296 return nullptr;
1297 }
1298 AOTCodeReader reader(cache, entry);
1299 CodeBlob* blob = reader.compile_code_blob(name, entry_kind, id, stub_data);
1300
1301 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1302 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1303 return blob;
1304 }
1305
1306 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1307 assert(!AOTCodeEntry::is_blob(entry_kind),
1308 "wrong entry kind for numeric id %d", id);
1309 return load_code_blob(entry_kind, (uint)id, name, nullptr);
1310 }
1311
1312 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1313 assert(AOTCodeEntry::is_single_stub_blob(entry_kind),
1314 "wrong entry kind for blob id %s", StubInfo::name(id));
1315 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), nullptr);
1316 }
1317
1318 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id, AOTStubData* stub_data) {
1319 assert(AOTCodeEntry::is_multi_stub_blob(entry_kind),
1320 "wrong entry kind for blob id %s", StubInfo::name(id));
1321 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), stub_data);
1322 }
1323
1324 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, AOTCodeEntry::Kind entry_kind, int id, AOTStubData* stub_data) {
1325 uint entry_position = _entry->offset();
1326
1327 // Read name
1328 uint name_offset = entry_position + _entry->name_offset();
1329 uint name_size = _entry->name_size(); // Includes '/0'
1330 const char* stored_name = addr(name_offset);
1331
1332 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1333 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1334 stored_name, name);
1335 set_lookup_failed(); // Skip this blob
1336 return nullptr;
1337 }
1338 _name = stored_name;
1339
1340 // Read archived code blob and related info
1341 uint offset = entry_position + _entry->blob_offset();
1342 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1343 offset += archived_blob->size();
1344
1345 _reloc_count = *(int*)addr(offset);
1346 offset += sizeof(int);
1347 if (AOTCodeEntry::is_multi_stub_blob(entry_kind)) {
1348 // position of relocs will have been aligned to heap word size so
1349 // we can install them into a code buffer
1350 offset = align_up(offset, DATA_ALIGNMENT);
1351 }
1352 _reloc_data = (address)addr(offset);
1353 offset += _reloc_count * sizeof(relocInfo);
1354 set_read_position(offset);
1355
1356 if (_entry->has_oop_maps()) {
1357 _oop_maps = read_oop_map_set();
1358 }
1359
1360 // record current context for use by that callback
1361 _stub_data = stub_data;
1362 _entry_kind = entry_kind;
1363 _id = id;
1364
1365 // CodeBlob::restore() calls AOTCodeReader::restore()
1366
1367 CodeBlob* code_blob = CodeBlob::create(archived_blob, this);
1368
1369 if (code_blob == nullptr) { // no space left in CodeCache
1370 return nullptr;
1371 }
1372
1373 #ifdef ASSERT
1374 LogStreamHandle(Trace, aot, codecache, stubs) log;
1375 if (log.is_enabled()) {
1376 FlagSetting fs(PrintRelocations, true);
1377 code_blob->print_on(&log);
1378 }
1379 #endif
1380 return code_blob;
1381 }
1382
1383 void AOTCodeReader::restore(CodeBlob* code_blob) {
1384 precond(AOTCodeCache::is_on_for_use());
1385 precond(_name != nullptr);
1386 precond(_reloc_data != nullptr);
1387
1388 code_blob->set_name(_name);
1389 // Saved relocations need restoring except for the case of a
1390 // multi-stub blob which has no runtime relocations. However, we may
1391 // still have saved some (re-)load time relocs that were attached to
1392 // the generator's code buffer. We don't attach them to the blob but
1393 // they get processed below by fix_relocations.
1394 if (!AOTCodeEntry::is_multi_stub_blob(_entry_kind)) {
1395 code_blob->restore_mutable_data(_reloc_data);
1396 }
1397 code_blob->set_oop_maps(_oop_maps);
1398
1399 // if this is a multi stub blob load its entries
1400 if (AOTCodeEntry::is_blob(_entry_kind)) {
1401 BlobId blob_id = static_cast<BlobId>(_id);
1402 if (StubInfo::is_stubgen(blob_id)) {
1403 assert(_stub_data != nullptr, "sanity");
1404 read_stub_data(code_blob, _stub_data);
1405 }
1406 // publish entries found either in stub_data or as offsets in blob
1407 AOTCodeCache::publish_stub_addresses(*code_blob, blob_id, _stub_data);
1408 }
1409
1410 // Now that all the entry points are in the address table we can
1411 // read all the extra reloc info and fix up any addresses that need
1412 // patching to adjust for a new location in a new JVM. We can be
1413 // sure to correctly update all runtime references, including
1414 // cross-linked stubs that are internally daisy-chained. If
1415 // relocation fails and we have to re-generate any of the stubs then
1416 // the entry points for newly generated stubs will get updated,
1417 // ensuring that any other stubs or nmethods we need to relocate
1418 // will use the correct address.
1419
1420 // if we have a relocatable code blob then the relocs are already
1421 // attached to the blob and we can iterate over it to find the ones
1422 // we need to patch. With a non-relocatable code blob we need to
1423 // wrap it with a CodeBuffer and then reattach the relocs to the
1424 // code buffer.
1425
1426 if (AOTCodeEntry::is_multi_stub_blob(_entry_kind)) {
1427 // the blob doesn't have any proper runtime relocs but we can
1428 // reinstate the AOT-load time relocs we saved from the code
1429 // buffer that generated this blob in a new code buffer and use
1430 // the latter to iterate over them
1431 if (_reloc_count > 0) {
1432 CodeBuffer code_buffer(code_blob);
1433 relocInfo* locs = (relocInfo*)_reloc_data;
1434 code_buffer.insts()->initialize_shared_locs(locs, _reloc_count);
1435 code_buffer.insts()->set_locs_end(locs + _reloc_count);
1436 CodeSection *cs = code_buffer.code_section(CodeBuffer::SECT_INSTS);
1437 RelocIterator reloc_iter(cs);
1438 fix_relocations(code_blob, reloc_iter);
1439 }
1440 } else {
1441 // the AOT-load time relocs will be in the blob's restored relocs
1442 RelocIterator reloc_iter(code_blob);
1443 fix_relocations(code_blob, reloc_iter);
1444 }
1445
1446 #ifndef PRODUCT
1447 code_blob->asm_remarks().init();
1448 read_asm_remarks(code_blob->asm_remarks());
1449 code_blob->dbg_strings().init();
1450 read_dbg_strings(code_blob->dbg_strings());
1451 #endif // PRODUCT
1452 }
1453
1454 void AOTCodeReader::read_stub_data(CodeBlob* code_blob, AOTStubData* stub_data) {
1455 GrowableArray<address>& addresses = stub_data->address_array();
1456 // Read the list of stub ids and associated start, end, secondary
1457 // and extra addresses and install them in the stub data.
1458 //
1459 // Also insert all start and secondary addresses into the AOTCache
1460 // address table so we correctly relocate this blob and any followng
1461 // blobs/nmethods.
1462 //
1463 // n.b. if an error occurs and we need to regenerate any of these
1464 // stubs the address table will be updated as a side-effect of
1465 // regeneration.
1466
1467 address blob_base = code_blob->code_begin();
1468 uint blob_size = (uint)(code_blob->code_end() - blob_base);
1469 uint offset = align_read_int();
1470 LogStreamHandle(Trace, aot, codecache, stubs) log;
1471 if (log.is_enabled()) {
1472 log.print_cr("======== Stub data starts at offset %d", offset);
1473 }
1474 // read stub and entries until we see NO_STUBID
1475 StubId stub_id = *(StubId*)addr(offset); offset += sizeof(StubId);
1476 // we ought to have at least one saved stub in the blob
1477 assert(stub_id != StubId::NO_STUBID, "blob %s contains no stubs!", StubInfo::name(stub_data->blob_id()));
1478 while (stub_id != StubId::NO_STUBID) {
1479 assert(StubInfo::blob(stub_id) == stub_data->blob_id(), "sanity");
1480 int idx = StubInfo::stubgen_offset_in_blob(stub_data->blob_id(), stub_id);
1481 StubAddrRange& range = stub_data->get_range(idx);
1482 // we should only see a stub once
1483 assert(range.start_index() < 0, "repeated entry for stub %s", StubInfo::name(stub_id));
1484 int address_base = addresses.length();
1485 // start is an offset from the blob base
1486 uint start = *(uint*)addr(offset); offset += sizeof(uint);
1487 assert(start < blob_size, "stub %s start offset %d exceeds buffer length %d", StubInfo::name(stub_id), start, blob_size);
1488 address stub_start = blob_base + start;
1489 addresses.append(stub_start);
1490 // end is an offset from the stub start
1491 uint end = *(uint*)addr(offset); offset += sizeof(uint);
1492 assert(start + end <= blob_size, "stub %s end offset %d exceeds remaining buffer length %d", StubInfo::name(stub_id), end, blob_size - start);
1493 addresses.append(stub_start + end);
1494 // read count of secondary entries plus extras
1495 int entries_count = *(int*)addr(offset); offset += sizeof(int);
1496 assert(entries_count >= (StubInfo::entry_count(stub_id) - 1), "not enough entries for %s", StubInfo::name(stub_id));
1497 for (int i = 0; i < entries_count; i++) {
1498 // entry offset is an offset from the stub start less than or
1499 // equal to end
1500 uint entry = *(uint*)addr(offset); offset += sizeof(uint);
1501 if (entry <= end) {
1502 // entry addresses may not address end but extras can
1503 assert(entry < end || i >= StubInfo::entry_count(stub_id),
1504 "entry offset 0x%x exceeds stub length 0x%x for stub %s",
1505 entry, end, StubInfo::name(stub_id));
1506 addresses.append(stub_start + entry);
1507 } else {
1508 // special case: entry encodes a nullptr
1509 assert(entry == AOTCodeCache::NULL_ADDRESS_MARKER, "stub %s entry offset %d lies beyond stub end %d and does not equal NULL_ADDRESS_MARKER", StubInfo::name(stub_id), entry, end);
1510 addresses.append(nullptr);
1511 }
1512 }
1513 if (log.is_enabled()) {
1514 log.print_cr("======== read stub %s and %d addresses up to offset %d",
1515 StubInfo::name(stub_id), 2 + entries_count, offset);
1516 }
1517 range.init_entry(address_base, 2 + entries_count);
1518 // move on to next stub or NO_STUBID
1519 stub_id = *(StubId*)addr(offset); offset += sizeof(StubId);
1520 }
1521 if (log.is_enabled()) {
1522 log.print_cr("======== Stub data ends at offset %d", offset);
1523 }
1524
1525 set_read_position(offset);
1526 }
1527
1528 void AOTCodeCache::publish_external_addresses(GrowableArray<address>& addresses) {
1529 DEBUG_ONLY( _passed_init2 = true; )
1530 if (opened_cache == nullptr) {
1531 return;
1532 }
1533
1534 cache()->_table->add_external_addresses(addresses);
1535 }
1536
1537 void AOTCodeCache::publish_stub_addresses(CodeBlob &code_blob, BlobId blob_id, AOTStubData *stub_data) {
1538 if (stub_data != nullptr) {
1539 // register all entries in stub
1540 assert(StubInfo::stub_count(blob_id) > 1,
1541 "multiple stub data provided for single stub blob %s",
1542 StubInfo::name(blob_id));
1543 assert(blob_id == stub_data->blob_id(),
1544 "blob id %s does not match id in stub data %s",
1545 StubInfo::name(blob_id),
1546 StubInfo::name(stub_data->blob_id()));
1547 // iterate over all stubs in the blob
1548 StubId stub_id = StubInfo::stub_base(blob_id);
1549 int stub_cnt = StubInfo::stub_count(blob_id);
1550 GrowableArray<address>& addresses = stub_data->address_array();
1551 for (int i = 0; i < stub_cnt; i++) {
1552 assert(stub_id != StubId::NO_STUBID, "sanity");
1553 StubAddrRange& range = stub_data->get_range(i);
1554 int base = range.start_index();
1555 if (base >= 0) {
1556 cache()->add_stub_entries(stub_id, addresses.at(base), &addresses, base + 2);
1557 }
1558 stub_id = StubInfo::next_in_blob(blob_id, stub_id);
1559 }
1560 // we should have exhausted all stub ids in the blob
1561 assert(stub_id == StubId::NO_STUBID, "sanity");
1562 } else {
1563 // register entry or entries for a single stub blob
1564 StubId stub_id = StubInfo::stub_base(blob_id);
1565 assert(StubInfo::stub_count(blob_id) == 1,
1566 "multiple stub blob %s provided without stub data",
1567 StubInfo::name(blob_id));
1568 address start = code_blob.code_begin();
1569 if (StubInfo::entry_count(stub_id) == 1) {
1570 assert(!code_blob.is_deoptimization_stub(), "expecting multiple entries for stub %s", StubInfo::name(stub_id));
1571 // register the blob base address as the only entry
1572 cache()->add_stub_entries(stub_id, start);
1573 } else {
1574 assert(code_blob.is_deoptimization_stub(), "only expecting one entry for stub %s", StubInfo::name(stub_id));
1575 DeoptimizationBlob *deopt_blob = code_blob.as_deoptimization_blob();
1576 assert(deopt_blob->unpack() == start, "unexpected offset 0x%x for deopt stub entry", (int)(deopt_blob->unpack() - start));
1577 GrowableArray<address> addresses;
1578 addresses.append(deopt_blob->unpack_with_exception());
1579 addresses.append(deopt_blob->unpack_with_reexecution());
1580 addresses.append(deopt_blob->unpack_with_exception_in_tls());
1581 #if INCLUDE_JVMCI
1582 addresses.append(deopt_blob->uncommon_trap());
1583 addresses.append(deopt_blob->implicit_exception_uncommon_trap());
1584 #endif // INCLUDE_JVMCI
1585 cache()->add_stub_entries(stub_id, start, &addresses, 0);
1586 }
1587 }
1588 }
1589
1590 // ------------ process code and data --------------
1591
1592 // Can't use -1. It is valid value for jump to iteself destination
1593 // used by static call stub: see NativeJump::jump_destination().
1594 #define BAD_ADDRESS_ID -2
1595
1596 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, RelocIterator& iter) {
1597 if (!align_write_int()) {
1598 return false;
1599 }
1600 GrowableArray<uint> reloc_data;
1601 LogStreamHandle(Trace, aot, codecache, reloc) log;
1602 while (iter.next()) {
1603 int idx = reloc_data.append(0); // default value
1604 switch (iter.type()) {
1605 case relocInfo::none:
1606 break;
1607 case relocInfo::runtime_call_type: {
1608 // Record offset of runtime destination
1609 CallRelocation* r = (CallRelocation*)iter.reloc();
1610 address dest = r->destination();
1611 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1612 dest = (address)-1; // do nothing in this case when loading this relocation
1613 }
1614 int id = _table->id_for_address(dest, iter, &code_blob);
1615 if (id == BAD_ADDRESS_ID) {
1616 return false;
1617 }
1618 reloc_data.at_put(idx, id);
1619 break;
1620 }
1621 case relocInfo::runtime_call_w_cp_type:
1622 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1623 return false;
1624 case relocInfo::external_word_type: {
1625 // Record offset of runtime target
1626 address target = ((external_word_Relocation*)iter.reloc())->target();
1627 int id = _table->id_for_address(target, iter, &code_blob);
1628 if (id == BAD_ADDRESS_ID) {
1629 return false;
1630 }
1631 reloc_data.at_put(idx, id);
1632 break;
1633 }
1634 case relocInfo::internal_word_type:
1635 break;
1636 case relocInfo::section_word_type:
1637 break;
1638 case relocInfo::post_call_nop_type:
1639 break;
1640 default:
1641 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1642 return false;
1643 break;
1644 }
1645 if (log.is_enabled()) {
1646 iter.print_current_on(&log);
1647 }
1648 }
1649
1650 // Write additional relocation data: uint per relocation
1651 // Write the count first
1652 int count = reloc_data.length();
1653 write_bytes(&count, sizeof(int));
1654 if (log.is_enabled()) {
1655 log.print_cr("======== extra relocations count=%d", count);
1656 log.print( " {");
1657 }
1658 bool first = true;
1659 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1660 iter != reloc_data.end(); ++iter) {
1661 uint value = *iter;
1662 int n = write_bytes(&value, sizeof(uint));
1663 if (n != sizeof(uint)) {
1664 return false;
1665 }
1666 if (log.is_enabled()) {
1667 if (first) {
1668 first = false;
1669 log.print("%d", value);
1670 } else {
1671 log.print(", %d", value);
1672 }
1673 }
1674 }
1675 if (log.is_enabled()) {
1676 log.print_cr("}");
1677 }
1678 return true;
1679 }
1680
1681 void AOTCodeReader::fix_relocations(CodeBlob *code_blob, RelocIterator& iter) {
1682 uint offset = align_read_int();
1683 int reloc_count = *(int*)addr(offset);
1684 offset += sizeof(int);
1685 uint* reloc_data = (uint*)addr(offset);
1686 offset += (reloc_count * sizeof(uint));
1687 set_read_position(offset);
1688
1689 LogStreamHandle(Trace, aot, codecache, reloc) log;
1690 if (log.is_enabled()) {
1691 log.print_cr("======== extra relocations count=%d", reloc_count);
1692 log.print(" {");
1693 for(int i = 0; i < reloc_count; i++) {
1694 if (i == 0) {
1695 log.print("%d", reloc_data[i]);
1696 } else {
1697 log.print(", %d", reloc_data[i]);
1698 }
1699 }
1700 log.print_cr("}");
1701 }
1702
1703 int j = 0;
1704 while (iter.next()) {
1705 switch (iter.type()) {
1706 case relocInfo::none:
1707 break;
1708 case relocInfo::runtime_call_type: {
1709 address dest = _cache->address_for_id(reloc_data[j]);
1710 if (dest != (address)-1) {
1711 ((CallRelocation*)iter.reloc())->set_destination(dest);
1712 }
1713 break;
1714 }
1715 case relocInfo::runtime_call_w_cp_type:
1716 // this relocation should not be in cache (see write_relocations)
1717 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1718 break;
1719 case relocInfo::external_word_type: {
1720 address target = _cache->address_for_id(reloc_data[j]);
1721 // Add external address to global table
1722 int index = ExternalsRecorder::find_index(target);
1723 // Update index in relocation
1724 Relocation::add_jint(iter.data(), index);
1725 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1726 assert(reloc->target() == target, "sanity");
1727 reloc->set_value(target); // Patch address in the code
1728 break;
1729 }
1730 case relocInfo::internal_word_type: {
1731 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1732 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1733 break;
1734 }
1735 case relocInfo::section_word_type: {
1736 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1737 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1738 break;
1739 }
1740 case relocInfo::post_call_nop_type:
1741 break;
1742 default:
1743 assert(false,"relocation %d unimplemented", (int)iter.type());
1744 break;
1745 }
1746 if (log.is_enabled()) {
1747 iter.print_current_on(&log);
1748 }
1749 j++;
1750 }
1751 assert(j == reloc_count, "sanity");
1752 }
1753
1754 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1755 if (!align_write_int()) {
1756 return false;
1757 }
1758 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1759 int oopmaps_size = oopmaps->nr_of_bytes();
1760 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1761 return false;
1762 }
1763 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1764 if (n != (uint)oopmaps->nr_of_bytes()) {
1765 return false;
1766 }
1767 return true;
1768 }
1769
1770 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1771 uint offset = align_read_int();
1772 int size = *(int *)addr(offset);
1773 offset += sizeof(int);
1774 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1775 offset += size;
1776 set_read_position(offset);
1777 return oopmaps;
1778 }
1779
1780 #ifndef PRODUCT
1781 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1782 if (!align_write_int()) {
1783 return false;
1784 }
1785 // Write asm remarks
1786 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1787 if (count_ptr == nullptr) {
1788 return false;
1789 }
1790 uint count = 0;
1791 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1792 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1793 uint n = write_bytes(&offset, sizeof(uint));
1794 if (n != sizeof(uint)) {
1795 return false;
1796 }
1797 const char* cstr = add_C_string(str);
1798 int id = _table->id_for_C_string((address)cstr);
1799 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1800 n = write_bytes(&id, sizeof(int));
1801 if (n != sizeof(int)) {
1802 return false;
1803 }
1804 count += 1;
1805 return true;
1806 });
1807 *count_ptr = count;
1808 return result;
1809 }
1810
1811 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1812 // Read asm remarks
1813 uint offset = align_read_int();
1814 uint count = *(uint *)addr(offset);
1815 offset += sizeof(uint);
1816 for (uint i = 0; i < count; i++) {
1817 uint remark_offset = *(uint *)addr(offset);
1818 offset += sizeof(uint);
1819 int remark_string_id = *(uint *)addr(offset);
1820 offset += sizeof(int);
1821 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1822 asm_remarks.insert(remark_offset, remark);
1823 }
1824 set_read_position(offset);
1825 }
1826
1827 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1828 if (!align_write_int()) {
1829 return false;
1830 }
1831 // Write dbg strings
1832 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1833 if (count_ptr == nullptr) {
1834 return false;
1835 }
1836 uint count = 0;
1837 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1838 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1839 const char* cstr = add_C_string(str);
1840 int id = _table->id_for_C_string((address)cstr);
1841 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1842 uint n = write_bytes(&id, sizeof(int));
1843 if (n != sizeof(int)) {
1844 return false;
1845 }
1846 count += 1;
1847 return true;
1848 });
1849 *count_ptr = count;
1850 return result;
1851 }
1852
1853 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1854 // Read dbg strings
1855 uint offset = align_read_int();
1856 uint count = *(uint *)addr(offset);
1857 offset += sizeof(uint);
1858 for (uint i = 0; i < count; i++) {
1859 int string_id = *(uint *)addr(offset);
1860 offset += sizeof(int);
1861 const char* str = (const char*)_cache->address_for_C_string(string_id);
1862 dbg_strings.insert(str);
1863 }
1864 set_read_position(offset);
1865 }
1866 #endif // PRODUCT
1867
1868 //======================= AOTCodeAddressTable ===============
1869
1870 // address table ids for generated routine entry adresses, external
1871 // addresses and C string addresses are partitioned into positive
1872 // integer ranges defined by the following positive base and max
1873 // values i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1874 // [_stubs_base, _stubs_base + _stubs_max -1], [_c_str_base,
1875 // _c_str_base + _c_str_max -1],
1876
1877 #define _extrs_max 380
1878 #define _stubs_max static_cast<int>(EntryId::NUM_ENTRYIDS)
1879
1880 #define _extrs_base 0
1881 #define _stubs_base (_extrs_base + _extrs_max)
1882 #define _all_max (_stubs_base + _stubs_max)
1883
1884 // setter for external addresses and string addresses inserts new
1885 // addresses in the order they are encountered them which must remain
1886 // the same across an assembly run and subsequent production run
1887
1888 #define ADD_EXTERNAL_ADDRESS(addr) \
1889 { \
1890 hash_address((address) addr, _extrs_base + _extrs_length); \
1891 _extrs_addr[_extrs_length++] = (address) (addr); \
1892 assert(_extrs_length <= _extrs_max, "increase size"); \
1893 }
1894
1895 // insert into to the address hash table the index of an external
1896 // address or a stub address in the list of external or stub
1897 // addresses, respectively, keyed by the relevant address
1898
1899 void AOTCodeAddressTable::hash_address(address addr, int idx) {
1900 // only do this if we have a non-null address to record and the
1901 // cache is open for dumping
1902 if (addr == nullptr) {
1903 return;
1904 }
1905 // check opened_cache because this can be called before the cache is
1906 // properly initialized and only continue when dumping is enabled
1907 if (opened_cache != nullptr && opened_cache->for_dump()) {
1908 if (_hash_table == nullptr) {
1909 _hash_table = new (mtCode) AOTCodeAddressHashTable();
1910 }
1911 assert(_hash_table->get(addr) == nullptr, "repeated insert of address " INTPTR_FORMAT, p2i(addr));
1912 _hash_table->put(addr, idx);
1913 log_trace(aot, codecache)("Address " INTPTR_FORMAT " inserted into AOT Code Cache address hash table with index '%d'",
1914 p2i(addr), idx);
1915 }
1916 }
1917
1918 static bool initializing_extrs = false;
1919
1920 void AOTCodeAddressTable::init_extrs() {
1921 if (_extrs_complete || initializing_extrs) return; // Done already
1922
1923 initializing_extrs = true;
1924 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1925
1926 _extrs_length = 0;
1927
1928 {
1929 // Required by initial stubs
1930 ADD_EXTERNAL_ADDRESS(SharedRuntime::exception_handler_for_return_address); // used by forward_exception
1931 ADD_EXTERNAL_ADDRESS(CompressedOops::base_addr()); // used by call_stub
1932 ADD_EXTERNAL_ADDRESS(Thread::current); // used by call_stub
1933 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_StackOverflowError);
1934 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError);
1935 }
1936
1937 // Record addresses of VM runtime methods
1938 ADD_EXTERNAL_ADDRESS(SharedRuntime::fixup_callers_callsite);
1939 ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method);
1940 ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_abstract);
1941 ADD_EXTERNAL_ADDRESS(SharedRuntime::handle_wrong_method_ic_miss);
1942 #if defined(AARCH64) && !defined(ZERO)
1943 ADD_EXTERNAL_ADDRESS(JavaThread::aarch64_get_thread_helper);
1944 ADD_EXTERNAL_ADDRESS(BarrierSetAssembler::patching_epoch_addr());
1945 #endif
1946
1947 #ifndef PRODUCT
1948 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jbyte_array_copy_ctr); // used by arraycopy stub on arm32 and x86_64
1949 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jshort_array_copy_ctr); // used by arraycopy stub
1950 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jint_array_copy_ctr); // used by arraycopy stub
1951 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_jlong_array_copy_ctr); // used by arraycopy stub
1952 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_oop_array_copy_ctr); // used by arraycopy stub
1953 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_checkcast_array_copy_ctr); // used by arraycopy stub
1954 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_array_copy_ctr); // used by arraycopy stub
1955 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_generic_array_copy_ctr); // used by arraycopy stub
1956 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_unsafe_set_memory_ctr); // used by arraycopy stub
1957 #endif /* PRODUCT */
1958
1959 ADD_EXTERNAL_ADDRESS(SharedRuntime::enable_stack_reserved_zone);
1960
1961 #if defined(AMD64) && !defined(ZERO)
1962 ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_multiply);
1963 ADD_EXTERNAL_ADDRESS(SharedRuntime::montgomery_square);
1964 #endif // defined(AMD64) && !defined(ZERO)
1965
1966 ADD_EXTERNAL_ADDRESS(SharedRuntime::d2f);
1967 ADD_EXTERNAL_ADDRESS(SharedRuntime::d2i);
1968 ADD_EXTERNAL_ADDRESS(SharedRuntime::d2l);
1969 ADD_EXTERNAL_ADDRESS(SharedRuntime::dcos);
1970 ADD_EXTERNAL_ADDRESS(SharedRuntime::dexp);
1971 ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog);
1972 ADD_EXTERNAL_ADDRESS(SharedRuntime::dlog10);
1973 ADD_EXTERNAL_ADDRESS(SharedRuntime::dpow);
1974 #ifndef ZERO
1975 ADD_EXTERNAL_ADDRESS(SharedRuntime::drem);
1976 #endif
1977 ADD_EXTERNAL_ADDRESS(SharedRuntime::dsin);
1978 ADD_EXTERNAL_ADDRESS(SharedRuntime::dtan);
1979 ADD_EXTERNAL_ADDRESS(SharedRuntime::f2i);
1980 ADD_EXTERNAL_ADDRESS(SharedRuntime::f2l);
1981 #ifndef ZERO
1982 ADD_EXTERNAL_ADDRESS(SharedRuntime::frem);
1983 #endif
1984 ADD_EXTERNAL_ADDRESS(SharedRuntime::l2d);
1985 ADD_EXTERNAL_ADDRESS(SharedRuntime::l2f);
1986 ADD_EXTERNAL_ADDRESS(SharedRuntime::ldiv);
1987 ADD_EXTERNAL_ADDRESS(SharedRuntime::lmul);
1988 ADD_EXTERNAL_ADDRESS(SharedRuntime::lrem);
1989
1990 #if INCLUDE_JVMTI
1991 ADD_EXTERNAL_ADDRESS(&JvmtiExport::_should_notify_object_alloc);
1992 #endif /* INCLUDE_JVMTI */
1993
1994 ADD_EXTERNAL_ADDRESS(ThreadIdentifier::unsafe_offset());
1995 // already added
1996 // ADD_EXTERNAL_ADDRESS(Thread::current);
1997
1998 ADD_EXTERNAL_ADDRESS(os::javaTimeMillis);
1999 ADD_EXTERNAL_ADDRESS(os::javaTimeNanos);
2000 #ifndef PRODUCT
2001 ADD_EXTERNAL_ADDRESS(os::breakpoint);
2002 #endif
2003
2004 ADD_EXTERNAL_ADDRESS(StubRoutines::crc_table_addr());
2005 #ifndef PRODUCT
2006 ADD_EXTERNAL_ADDRESS(&SharedRuntime::_partial_subtype_ctr);
2007 #endif
2008
2009 #if INCLUDE_JFR
2010 ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::write_checkpoint);
2011 ADD_EXTERNAL_ADDRESS(JfrIntrinsicSupport::return_lease);
2012 #endif
2013
2014 ADD_EXTERNAL_ADDRESS(UpcallLinker::handle_uncaught_exception); // used by upcall_stub_exception_handler
2015
2016 {
2017 // Required by Shared blobs
2018 ADD_EXTERNAL_ADDRESS(Deoptimization::fetch_unroll_info);
2019 ADD_EXTERNAL_ADDRESS(Deoptimization::unpack_frames);
2020 ADD_EXTERNAL_ADDRESS(SafepointSynchronize::handle_polling_page_exception);
2021 ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_opt_virtual_call_C);
2022 ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_virtual_call_C);
2023 ADD_EXTERNAL_ADDRESS(SharedRuntime::resolve_static_call_C);
2024 // already added
2025 // ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_delayed_StackOverflowError);
2026 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_AbstractMethodError);
2027 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_IncompatibleClassChangeError);
2028 ADD_EXTERNAL_ADDRESS(SharedRuntime::throw_NullPointerException_at_call);
2029 }
2030
2031 #ifdef COMPILER1
2032 {
2033 // Required by C1 blobs
2034 ADD_EXTERNAL_ADDRESS(static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
2035 ADD_EXTERNAL_ADDRESS(SharedRuntime::register_finalizer);
2036 ADD_EXTERNAL_ADDRESS(Runtime1::is_instance_of);
2037 ADD_EXTERNAL_ADDRESS(Runtime1::exception_handler_for_pc);
2038 ADD_EXTERNAL_ADDRESS(Runtime1::check_abort_on_vm_exception);
2039 ADD_EXTERNAL_ADDRESS(Runtime1::new_instance);
2040 ADD_EXTERNAL_ADDRESS(Runtime1::counter_overflow);
2041 ADD_EXTERNAL_ADDRESS(Runtime1::new_type_array);
2042 ADD_EXTERNAL_ADDRESS(Runtime1::new_object_array);
2043 ADD_EXTERNAL_ADDRESS(Runtime1::new_multi_array);
2044 ADD_EXTERNAL_ADDRESS(Runtime1::throw_range_check_exception);
2045 ADD_EXTERNAL_ADDRESS(Runtime1::throw_index_exception);
2046 ADD_EXTERNAL_ADDRESS(Runtime1::throw_div0_exception);
2047 ADD_EXTERNAL_ADDRESS(Runtime1::throw_null_pointer_exception);
2048 ADD_EXTERNAL_ADDRESS(Runtime1::throw_array_store_exception);
2049 ADD_EXTERNAL_ADDRESS(Runtime1::throw_class_cast_exception);
2050 ADD_EXTERNAL_ADDRESS(Runtime1::throw_incompatible_class_change_error);
2051 ADD_EXTERNAL_ADDRESS(Runtime1::monitorenter);
2052 ADD_EXTERNAL_ADDRESS(Runtime1::monitorexit);
2053 ADD_EXTERNAL_ADDRESS(Runtime1::deoptimize);
2054 ADD_EXTERNAL_ADDRESS(Runtime1::access_field_patching);
2055 ADD_EXTERNAL_ADDRESS(Runtime1::move_klass_patching);
2056 ADD_EXTERNAL_ADDRESS(Runtime1::move_mirror_patching);
2057 ADD_EXTERNAL_ADDRESS(Runtime1::move_appendix_patching);
2058 ADD_EXTERNAL_ADDRESS(Runtime1::predicate_failed_trap);
2059 ADD_EXTERNAL_ADDRESS(Runtime1::unimplemented_entry);
2060 // already added
2061 // ADD_EXTERNAL_ADDRESS(Thread::current);
2062 ADD_EXTERNAL_ADDRESS(CompressedKlassPointers::base_addr());
2063 }
2064 #endif
2065
2066 #ifdef COMPILER2
2067 {
2068 // Required by C2 blobs
2069 ADD_EXTERNAL_ADDRESS(Deoptimization::uncommon_trap);
2070 ADD_EXTERNAL_ADDRESS(OptoRuntime::handle_exception_C);
2071 ADD_EXTERNAL_ADDRESS(OptoRuntime::new_instance_C);
2072 ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_C);
2073 ADD_EXTERNAL_ADDRESS(OptoRuntime::new_array_nozero_C);
2074 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray2_C);
2075 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray3_C);
2076 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray4_C);
2077 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarray5_C);
2078 ADD_EXTERNAL_ADDRESS(OptoRuntime::multianewarrayN_C);
2079 ADD_EXTERNAL_ADDRESS(OptoRuntime::complete_monitor_locking_C);
2080 ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notify_C);
2081 ADD_EXTERNAL_ADDRESS(OptoRuntime::monitor_notifyAll_C);
2082 ADD_EXTERNAL_ADDRESS(OptoRuntime::rethrow_C);
2083 ADD_EXTERNAL_ADDRESS(OptoRuntime::slow_arraycopy_C);
2084 ADD_EXTERNAL_ADDRESS(OptoRuntime::register_finalizer_C);
2085 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_first_transition_C);
2086 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_final_transition_C);
2087 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_start_transition_C);
2088 ADD_EXTERNAL_ADDRESS(OptoRuntime::vthread_end_transition_C);
2089 // already added for
2090 #if defined(AARCH64) && ! defined(PRODUCT)
2091 ADD_EXTERNAL_ADDRESS(JavaThread::verify_cross_modify_fence_failure);
2092 #endif // AARCH64 && !PRODUCT
2093 }
2094 #endif // COMPILER2
2095
2096 #if INCLUDE_G1GC
2097 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_field_pre_entry);
2098 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry); // used by arraycopy stubs
2099 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_pre_oop_entry); // used by arraycopy stubs
2100 ADD_EXTERNAL_ADDRESS(G1BarrierSetRuntime::write_ref_array_post_entry); // used by arraycopy stubs
2101 ADD_EXTERNAL_ADDRESS(BarrierSetNMethod::nmethod_stub_entry_barrier); // used by method_entry_barrier
2102
2103 #endif
2104 #if INCLUDE_SHENANDOAHGC
2105 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::write_barrier_pre);
2106 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::write_barrier_pre_narrow);
2107 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong);
2108 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong_narrow);
2109 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_strong_narrow_narrow);
2110 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak);
2111 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak_narrow);
2112 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_weak_narrow_narrow);
2113 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom);
2114 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom_narrow);
2115 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::load_reference_barrier_phantom_narrow_narrow);
2116 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_oop);
2117 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::arraycopy_barrier_narrow_oop);
2118 ADD_EXTERNAL_ADDRESS(ShenandoahRuntime::clone);
2119 #endif
2120 #if INCLUDE_ZGC
2121 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
2122 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good_addr());
2123 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr());
2124 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
2125 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr());
2126 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr());
2127 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr());
2128 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr());
2129 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr());
2130 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr());
2131 ADD_EXTERNAL_ADDRESS(ZBarrierSetRuntime::load_barrier_on_oop_array_addr());
2132
2133 ADD_EXTERNAL_ADDRESS(ZPointerVectorLoadBadMask);
2134 ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreBadMask);
2135 ADD_EXTERNAL_ADDRESS(ZPointerVectorStoreGoodMask);
2136 #if defined(AMD64)
2137 ADD_EXTERNAL_ADDRESS(&ZPointerLoadShift);
2138 ADD_EXTERNAL_ADDRESS(&ZPointerLoadShiftTable);
2139 #endif
2140 #endif
2141 #ifndef ZERO
2142 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
2143 ADD_EXTERNAL_ADDRESS(MacroAssembler::debug64);
2144 #endif // defined(AMD64) || defined(AARCH64) || defined(RISCV64)
2145 #if defined(AMD64)
2146 ADD_EXTERNAL_ADDRESS(warning);
2147 #endif // defined(AMD64)
2148 #endif // ZERO
2149
2150 // addresses of fields in AOT runtime constants area
2151 address* p = AOTRuntimeConstants::field_addresses_list();
2152 while (*p != nullptr) {
2153 address to_add = (address)*p++;
2154 ADD_EXTERNAL_ADDRESS(to_add);
2155 }
2156
2157 log_debug(aot, codecache, init)("External addresses opened and recorded");
2158 // allocate storage for stub entries
2159 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
2160 log_debug(aot, codecache, init)("Stub addresses opened");
2161 }
2162
2163 void AOTCodeAddressTable::init_extrs2() {
2164 assert(initializing_extrs && !_extrs_complete,
2165 "invalid sequence for init_extrs2");
2166
2167 {
2168 ADD_EXTERNAL_ADDRESS(Continuation::prepare_thaw); // used by cont_thaw
2169 ADD_EXTERNAL_ADDRESS(Continuation::thaw_entry()); // used by cont_thaw
2170 ADD_EXTERNAL_ADDRESS(ContinuationEntry::thaw_call_pc_address()); // used by cont_preempt_stub
2171 }
2172 _extrs_complete = true;
2173 initializing_extrs = false;
2174 log_debug(aot, codecache, init)("External addresses recorded and closed");
2175 }
2176
2177 void AOTCodeAddressTable::add_external_addresses(GrowableArray<address>& addresses) {
2178 assert(initializing_extrs && !_extrs_complete,
2179 "invalid sequence for add_external_addresses");
2180 for (int i = 0; i < addresses.length(); i++) {
2181 ADD_EXTERNAL_ADDRESS(addresses.at(i));
2182 }
2183 log_debug(aot, codecache, init)("Recorded %d additional external addresses",
2184 addresses.length());
2185 }
2186
2187 void AOTCodeAddressTable::add_stub_entry(EntryId entry_id, address a) {
2188 assert(_extrs_complete || initializing_extrs,
2189 "recording stub entry address before external addresses complete");
2190 assert(!(StubInfo::is_shared(StubInfo::stub(entry_id)) && _shared_stubs_complete), "too late to add shared entry");
2191 assert(!(StubInfo::is_stubgen(StubInfo::stub(entry_id)) && _stubgen_stubs_complete), "too late to add stubgen entry");
2192 assert(!(StubInfo::is_c1(StubInfo::stub(entry_id)) && _c1_stubs_complete), "too late to add c1 entry");
2193 assert(!(StubInfo::is_c2(StubInfo::stub(entry_id)) && _c2_stubs_complete), "too late to add c2 entry");
2194 log_debug(aot, stubs)("Recording address 0x%p for %s entry %s", a, StubInfo::name(StubInfo::stubgroup(entry_id)), StubInfo::name(entry_id));
2195 int idx = static_cast<int>(entry_id);
2196 hash_address(a, _stubs_base + idx);
2197 _stubs_addr[idx] = a;
2198 }
2199
2200 void AOTCodeAddressTable::set_shared_stubs_complete() {
2201 assert(!_shared_stubs_complete, "repeated close for shared stubs!");
2202 _shared_stubs_complete = true;
2203 log_debug(aot, codecache, init)("Shared stubs closed");
2204 }
2205
2206 void AOTCodeAddressTable::set_c1_stubs_complete() {
2207 assert(!_c1_stubs_complete, "repeated close for c1 stubs!");
2208 _c1_stubs_complete = true;
2209 log_debug(aot, codecache, init)("C1 stubs closed");
2210 }
2211
2212 void AOTCodeAddressTable::set_c2_stubs_complete() {
2213 assert(!_c2_stubs_complete, "repeated close for c2 stubs!");
2214 _c2_stubs_complete = true;
2215 log_debug(aot, codecache, init)("C2 stubs closed");
2216 }
2217
2218 void AOTCodeAddressTable::set_stubgen_stubs_complete() {
2219 assert(!_stubgen_stubs_complete, "repeated close for stubgen stubs!");
2220 _stubgen_stubs_complete = true;
2221 log_debug(aot, codecache, init)("StubGen stubs closed");
2222 }
2223
2224 #ifdef PRODUCT
2225 #define MAX_STR_COUNT 200
2226 #else
2227 #define MAX_STR_COUNT 2000
2228 #endif
2229 #define _c_str_max MAX_STR_COUNT
2230 static const int _c_str_base = _all_max;
2231
2232 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
2233 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
2234 static int _C_strings_count = 0;
2235 static int _C_strings_s[MAX_STR_COUNT] = {0};
2236 static int _C_strings_id[MAX_STR_COUNT] = {0};
2237 static int _C_strings_used = 0;
2238
2239 void AOTCodeCache::load_strings() {
2240 uint strings_count = _load_header->strings_count();
2241 if (strings_count == 0) {
2242 return;
2243 }
2244 if (strings_count > MAX_STR_COUNT) {
2245 fatal("Invalid strings_count loaded from AOT Code Cache: %d > MAX_STR_COUNT [%d]", strings_count, MAX_STR_COUNT);
2246 return;
2247 }
2248 uint strings_offset = _load_header->strings_offset();
2249 uint* string_lengths = (uint*)addr(strings_offset);
2250 strings_offset += (strings_count * sizeof(uint));
2251 uint strings_size = _load_header->entries_offset() - strings_offset;
2252 // We have to keep cached strings longer than _cache buffer
2253 // because they are refernced from compiled code which may
2254 // still be executed on VM exit after _cache is freed.
2255 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
2256 memcpy(p, addr(strings_offset), strings_size);
2257 _C_strings_buf = p;
2258 for (uint i = 0; i < strings_count; i++) {
2259 _C_strings[i] = p;
2260 uint len = string_lengths[i];
2261 _C_strings_s[i] = i;
2262 _C_strings_id[i] = i;
2263 log_trace(aot, codecache, stringtable)("load_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(p), p);
2264 p += len;
2265 }
2266 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
2267 _C_strings_count = strings_count;
2268 _C_strings_used = strings_count;
2269 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
2270 }
2271
2272 int AOTCodeCache::store_strings() {
2273 if (_C_strings_used > 0) {
2274 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2275 uint offset = _write_position;
2276 uint length = 0;
2277 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
2278 if (lengths == nullptr) {
2279 return -1;
2280 }
2281 for (int i = 0; i < _C_strings_used; i++) {
2282 const char* str = _C_strings[_C_strings_s[i]];
2283 log_trace(aot, codecache, stringtable)("store_strings: _C_strings[%d] " INTPTR_FORMAT " '%s'", i, p2i(str), str);
2284 uint len = (uint)strlen(str) + 1;
2285 length += len;
2286 assert(len < 1000, "big string: %s", str);
2287 lengths[i] = len;
2288 uint n = write_bytes(str, len);
2289 if (n != len) {
2290 return -1;
2291 }
2292 }
2293 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
2294 _C_strings_used, length, offset);
2295 }
2296 return _C_strings_used;
2297 }
2298
2299 const char* AOTCodeCache::add_C_string(const char* str) {
2300 if (is_on_for_dump() && str != nullptr) {
2301 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2302 AOTCodeAddressTable* table = addr_table();
2303 if (table != nullptr) {
2304 return table->add_C_string(str);
2305 }
2306 }
2307 return str;
2308 }
2309
2310 const char* AOTCodeAddressTable::add_C_string(const char* str) {
2311 if (_extrs_complete || initializing_extrs) {
2312 // Check previous strings address
2313 for (int i = 0; i < _C_strings_count; i++) {
2314 if (_C_strings_in[i] == str) {
2315 return _C_strings[i]; // Found previous one - return our duplicate
2316 } else if (strcmp(_C_strings[i], str) == 0) {
2317 return _C_strings[i];
2318 }
2319 }
2320 // Add new one
2321 if (_C_strings_count < MAX_STR_COUNT) {
2322 // Passed in string can be freed and used space become inaccessible.
2323 // Keep original address but duplicate string for future compare.
2324 _C_strings_id[_C_strings_count] = -1; // Init
2325 _C_strings_in[_C_strings_count] = str;
2326 const char* dup = os::strdup(str);
2327 _C_strings[_C_strings_count++] = dup;
2328 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
2329 return dup;
2330 } else {
2331 assert(false, "Number of C strings >= MAX_STR_COUNT");
2332 }
2333 }
2334 return str;
2335 }
2336
2337 int AOTCodeAddressTable::id_for_C_string(address str) {
2338 if (str == nullptr) {
2339 return BAD_ADDRESS_ID;
2340 }
2341 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
2342 for (int i = 0; i < _C_strings_count; i++) {
2343 if (_C_strings[i] == (const char*)str) { // found
2344 int id = _C_strings_id[i];
2345 if (id >= 0) {
2346 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
2347 return id; // Found recorded
2348 }
2349 log_trace(aot, codecache, stringtable)("id_for_C_string: _C_strings[%d ==> %d] " INTPTR_FORMAT " '%s'", i, _C_strings_used, p2i(str), str);
2350 // Not found in recorded, add new
2351 id = _C_strings_used++;
2352 _C_strings_s[id] = i;
2353 _C_strings_id[i] = id;
2354 return id;
2355 }
2356 }
2357 return BAD_ADDRESS_ID;
2358 }
2359
2360 address AOTCodeAddressTable::address_for_C_string(int idx) {
2361 assert(idx < _C_strings_count, "sanity");
2362 return (address)_C_strings[idx];
2363 }
2364
2365 static int search_address(address addr, address* table, uint length) {
2366 for (int i = 0; i < (int)length; i++) {
2367 if (table[i] == addr) {
2368 return i;
2369 }
2370 }
2371 return BAD_ADDRESS_ID;
2372 }
2373
2374 address AOTCodeAddressTable::address_for_id(int idx) {
2375 assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete");
2376 if (idx == -1) {
2377 return (address)-1;
2378 }
2379 uint id = (uint)idx;
2380 // special case for symbols based relative to os::init
2381 if (id > (_c_str_base + _c_str_max)) {
2382 return (address)os::init + idx;
2383 }
2384 if (idx < 0) {
2385 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2386 return nullptr;
2387 }
2388 // no need to compare unsigned id against 0
2389 if (/* id >= _extrs_base && */ id < _extrs_length) {
2390 return _extrs_addr[id - _extrs_base];
2391 }
2392 if (id >= _stubs_base && id < _c_str_base) {
2393 return _stubs_addr[id - _stubs_base];
2394 }
2395 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
2396 return address_for_C_string(id - _c_str_base);
2397 }
2398 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
2399 return nullptr;
2400 }
2401
2402 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
2403 assert(_extrs_complete || initializing_extrs, "AOT Code Cache VM runtime addresses table is not complete");
2404 int id = -1;
2405 if (addr == (address)-1) { // Static call stub has jump to itself
2406 return id;
2407 }
2408 // fast path for stubs and external addresses
2409 if (_hash_table != nullptr) {
2410 int *result = _hash_table->get(addr);
2411 if (result != nullptr) {
2412 id = *result;
2413 log_trace(aot, codecache)("Address " INTPTR_FORMAT " retrieved from AOT Code Cache address hash table with index '%d'",
2414 p2i(addr), id);
2415 return id;
2416 }
2417 }
2418 // Seach for C string
2419 id = id_for_C_string(addr);
2420 if (id != BAD_ADDRESS_ID) {
2421 return id + _c_str_base;
2422 }
2423 if (StubRoutines::contains(addr) || CodeCache::find_blob(addr) != nullptr) {
2424 // Search for a matching stub entry
2425 id = search_address(addr, _stubs_addr, _stubs_max);
2426 if (id == BAD_ADDRESS_ID) {
2427 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
2428 if (desc == nullptr) {
2429 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
2430 }
2431 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
2432 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
2433 } else {
2434 return id + _stubs_base;
2435 }
2436 } else {
2437 // Search in runtime functions
2438 id = search_address(addr, _extrs_addr, _extrs_length);
2439 if (id == BAD_ADDRESS_ID) {
2440 ResourceMark rm;
2441 const int buflen = 1024;
2442 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
2443 int offset = 0;
2444 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
2445 if (offset > 0) {
2446 // Could be address of C string
2447 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
2448 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
2449 p2i(addr), dist, (const char*)addr);
2450 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
2451 return dist;
2452 }
2453 #ifdef ASSERT
2454 reloc.print_current_on(tty);
2455 code_blob->print_on(tty);
2456 code_blob->print_code_on(tty);
2457 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
2458 #endif
2459 } else {
2460 #ifdef ASSERT
2461 reloc.print_current_on(tty);
2462 code_blob->print_on(tty);
2463 code_blob->print_code_on(tty);
2464 os::find(addr, tty);
2465 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
2466 #endif
2467 }
2468 } else {
2469 return _extrs_base + id;
2470 }
2471 }
2472 return id;
2473 }
2474
2475 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
2476
2477 void AOTRuntimeConstants::initialize_from_runtime() {
2478 BarrierSet* bs = BarrierSet::barrier_set();
2479 address card_table_base = nullptr;
2480 uint grain_shift = 0;
2481 address cset_base = nullptr;
2482 #if INCLUDE_G1GC
2483 if (bs->is_a(BarrierSet::G1BarrierSet)) {
2484 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
2485 } else
2486 #endif
2487 #if INCLUDE_SHENANDOAHGC
2488 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
2489 grain_shift = ShenandoahHeapRegion::region_size_bytes_shift_jint();
2490 cset_base = ShenandoahHeap::in_cset_fast_test_addr();
2491 } else
2492 #endif
2493 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
2494 CardTable::CardValue* base = ci_card_table_address_const();
2495 assert(base != nullptr, "unexpected byte_map_base");
2496 card_table_base = base;
2497 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
2498 grain_shift = ctbs->grain_shift();
2499 }
2500 _aot_runtime_constants._card_table_base = card_table_base;
2501 _aot_runtime_constants._grain_shift = grain_shift;
2502 _aot_runtime_constants._cset_base = cset_base;
2503 }
2504
2505 address AOTRuntimeConstants::_field_addresses_list[] = {
2506 ((address)&_aot_runtime_constants._card_table_base),
2507 ((address)&_aot_runtime_constants._grain_shift),
2508 ((address)&_aot_runtime_constants._cset_base),
2509 nullptr
2510 };
2511
2512 address AOTRuntimeConstants::card_table_base_address() {
2513 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
2514 return (address)&_aot_runtime_constants._card_table_base;
2515 }
2516
2517 // This is called after initialize() but before init2()
2518 // and _cache is not set yet.
2519 void AOTCodeCache::print_on(outputStream* st) {
2520 if (opened_cache != nullptr && opened_cache->for_use()) {
2521 st->print_cr("\nAOT Code Cache");
2522 uint count = opened_cache->_load_header->entries_count();
2523 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
2524 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
2525
2526 for (uint i = 0; i < count; i++) {
2527 // Use search_entries[] to order ouput
2528 int index = search_entries[2*i + 1];
2529 AOTCodeEntry* entry = &(load_entries[index]);
2530
2531 uint entry_position = entry->offset();
2532 uint name_offset = entry->name_offset() + entry_position;
2533 const char* saved_name = opened_cache->addr(name_offset);
2534
2535 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
2536 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
2537 }
2538 }
2539 }
2540
2541 // methods for managing entries in multi-stub blobs
2542
2543
2544 AOTStubData::AOTStubData(BlobId blob_id) :
2545 _blob_id(blob_id),
2546 _cached_blob(nullptr),
2547 _stub_cnt(0),
2548 _ranges(nullptr),
2549 _flags(0) {
2550 assert(StubInfo::is_stubgen(blob_id),
2551 "AOTStubData expects a multi-stub blob not %s",
2552 StubInfo::name(blob_id));
2553
2554 // we cannot save or restore preuniversestubs because the cache
2555 // cannot be accessed before initialising the universe
2556 if (blob_id == BlobId::stubgen_preuniverse_id) {
2557 // invalidate any attempt to use this
2558 _flags = INVALID;
2559 return;
2560 }
2561 if (AOTCodeCache::is_on()) {
2562 _flags = OPEN;
2563 // allow update of stub entry addresses
2564 if (AOTCodeCache::is_using_stub()) {
2565 // allow stub loading
2566 _flags |= USING;
2567 }
2568 if (AOTCodeCache::is_dumping_stub()) {
2569 // allow stub saving
2570 _flags |= DUMPING;
2571 }
2572 // we need to track all the blob's entries
2573 _stub_cnt = StubInfo::stub_count(_blob_id);
2574 _ranges = NEW_C_HEAP_ARRAY(StubAddrRange, _stub_cnt, mtCode);
2575 for (int i = 0; i < _stub_cnt; i++) {
2576 _ranges[i].default_init();
2577 }
2578 }
2579 }
2580
2581 bool AOTStubData::load_code_blob() {
2582 assert(is_using(), "should not call");
2583 assert(!is_invalid() && _cached_blob == nullptr, "repeated init");
2584 _cached_blob = AOTCodeCache::load_code_blob(AOTCodeEntry::StubGenBlob,
2585 _blob_id,
2586 this);
2587 if (_cached_blob == nullptr) {
2588 set_invalid();
2589 return false;
2590 } else {
2591 return true;
2592 }
2593 }
2594
2595 bool AOTStubData::store_code_blob(CodeBlob& new_blob, CodeBuffer *code_buffer) {
2596 assert(is_dumping(), "should not call");
2597 assert(_cached_blob == nullptr, "should not be loading and storing!");
2598 if (!AOTCodeCache::store_code_blob(new_blob,
2599 AOTCodeEntry::StubGenBlob,
2600 _blob_id, this, code_buffer)) {
2601 set_invalid();
2602 return false;
2603 } else {
2604 return true;
2605 }
2606 }
2607
2608 address AOTStubData::load_archive_data(StubId stub_id, address& end, GrowableArray<address>* entries, GrowableArray<address>* extras) {
2609 assert(StubInfo::blob(stub_id) == _blob_id, "sanity check");
2610 if (is_invalid()) {
2611 return nullptr;
2612 }
2613 int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2614 assert(idx >= 0 && idx < _stub_cnt, "invalid index %d for stub count %d", idx, _stub_cnt);
2615 // ensure we have a valid associated range
2616 StubAddrRange &range = _ranges[idx];
2617 int base = range.start_index();
2618 if (base < 0) {
2619 return nullptr;
2620 }
2621 int count = range.count();
2622 assert(base >= 0, "sanity");
2623 assert(count >= 2, "sanity");
2624 // first two saved addresses are start and end
2625 address start = _address_array.at(base);
2626 end = _address_array.at(base + 1);
2627 assert(start != nullptr, "failed to load start address of stub %s", StubInfo::name(stub_id));
2628 assert(end != nullptr, "failed to load end address of stub %s", StubInfo::name(stub_id));
2629 assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id));
2630
2631 int entry_count = StubInfo::entry_count(stub_id);
2632 // the address count must at least include the stub start, end
2633 // and secondary addresses
2634 assert(count >= entry_count + 1, "stub %s requires %d saved addresses but only has %d", StubInfo::name(stub_id), entry_count + 1, count);
2635
2636 // caller must retrieve secondary entries if and only if they exist
2637 assert((entry_count == 1) == (entries == nullptr), "trying to retrieve wrong number of entries for stub %s", StubInfo::name(stub_id));
2638 int index = 2;
2639 if (entries != nullptr) {
2640 assert(entries->length() == 0, "non-empty array when retrieving entries for stub %s!", StubInfo::name(stub_id));
2641 while (index < entry_count + 1) {
2642 address entry = _address_array.at(base + index++);
2643 assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id));
2644 entries->append(entry);
2645 }
2646 }
2647 // caller must retrieve extras if and only if they exist
2648 assert((index < count) == (extras != nullptr), "trying to retrieve wrong number of extras for stub %s", StubInfo::name(stub_id));
2649 if (extras != nullptr) {
2650 assert(extras->length() == 0, "non-empty array when retrieving extras for stub %s!", StubInfo::name(stub_id));
2651 while (index < count) {
2652 address extra = _address_array.at(base + index++);
2653 assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id));
2654 extras->append(extra);
2655 }
2656 }
2657
2658 return start;
2659 }
2660
2661 void AOTStubData::store_archive_data(StubId stub_id, address start, address end, GrowableArray<address>* entries, GrowableArray<address>* extras) {
2662 assert(StubInfo::blob(stub_id) == _blob_id, "sanity check");
2663 assert(start != nullptr, "start address cannot be null");
2664 assert(end != nullptr, "end address cannot be null");
2665 assert(start < end, "start address %p should be less than end %p address for stub %s", start, end, StubInfo::name(stub_id));
2666 int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2667 StubAddrRange& range = _ranges[idx];
2668 assert(range.start_index() == -1, "sanity");
2669 int base = _address_array.length();
2670 assert(base >= 0, "sanity");
2671 // first two saved addresses are start and end
2672 _address_array.append(start);
2673 _address_array.append(end);
2674 // caller must save secondary entries if and only if they exist
2675 assert((StubInfo::entry_count(stub_id) == 1) == (entries == nullptr), "trying to save wrong number of entries for stub %s", StubInfo::name(stub_id));
2676 if (entries != nullptr) {
2677 assert(entries->length() == StubInfo::entry_count(stub_id) - 1, "incorrect entry count %d when saving entries for stub %s!", entries->length(), StubInfo::name(stub_id));
2678 for (int i = 0; i < entries->length(); i++) {
2679 address entry = entries->at(i);
2680 assert(entry == nullptr || (start < entry && entry < end), "entry address %p not in range (%p, %p) for stub %s", entry, start, end, StubInfo::name(stub_id));
2681 _address_array.append(entry);
2682 }
2683 }
2684 // caller may wish to save extra addresses
2685 if (extras != nullptr) {
2686 for (int i = 0; i < extras->length(); i++) {
2687 address extra = extras->at(i);
2688 // handler range end may be end -- it gets restored as nullptr
2689 assert(extra == nullptr || (start <= extra && extra <= end), "extra address %p not in range (%p, %p) for stub %s", extra, start, end, StubInfo::name(stub_id));
2690 _address_array.append(extra);
2691 }
2692 }
2693 range.init_entry(base, _address_array.length() - base);
2694 }
2695
2696 void AOTStubData::stub_epilog(StubId stub_id) {
2697 DEBUG_ONLY(check_stored(stub_id));
2698 }
2699
2700 #ifdef ASSERT
2701 void AOTStubData::check_stored(StubId stub_id) {
2702 // Only need to check if we are dumping
2703 //
2704 // This excludes cases where the cache got closed because of error
2705 // plus the pre-universe stubs we can never store because they are
2706 // generated prior to cache opening.
2707 if (is_dumping()) {
2708 int idx = StubInfo::stubgen_offset_in_blob(_blob_id, stub_id);
2709 assert(idx >= 0 && idx < _stub_cnt, "invalid index %d for stub count %d", idx, _stub_cnt);
2710 StubAddrRange& range = _ranges[idx];
2711 assert(range.start_index() != -1, "missing store_archive_data for generated stub %s", StubInfo::name(stub_id));
2712 }
2713 }
2714 #endif