1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/barrierSetAssembler.hpp"
55 #include "gc/shared/cardTableBarrierSet.hpp"
56 #include "gc/shared/gcConfig.hpp"
57 #include "logging/logStream.hpp"
58 #include "memory/memoryReserver.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/klass.inline.hpp"
61 #include "oops/method.inline.hpp"
62 #include "oops/trainingData.hpp"
63 #include "prims/jvmtiThreadState.hpp"
64 #include "runtime/atomicAccess.hpp"
65 #include "runtime/deoptimization.hpp"
66 #include "runtime/flags/flagSetting.hpp"
67 #include "runtime/globals_extension.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/java.hpp"
70 #include "runtime/jniHandles.inline.hpp"
71 #include "runtime/mountUnmountDisabler.hpp"
72 #include "runtime/mutexLocker.hpp"
73 #include "runtime/objectMonitorTable.hpp"
74 #include "runtime/os.inline.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/stubCodeGenerator.hpp"
77 #include "runtime/stubRoutines.hpp"
78 #include "runtime/threadIdentifier.hpp"
79 #include "runtime/timerTrace.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/formatBuffer.hpp"
82 #include "utilities/ostream.hpp"
83 #include "utilities/spinYield.hpp"
84 #ifdef COMPILER1
85 #include "c1/c1_LIRAssembler.hpp"
86 #include "c1/c1_Runtime1.hpp"
87 #include "gc/g1/c1/g1BarrierSetC1.hpp"
88 #include "gc/shared/c1/barrierSetC1.hpp"
89 #if INCLUDE_SHENANDOAHGC
90 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
91 #endif // INCLUDE_SHENANDOAHGC
92 #include "gc/z/c1/zBarrierSetC1.hpp"
93 #endif // COMPILER1
94 #ifdef COMPILER2
95 #include "opto/runtime.hpp"
96 #endif
97 #if INCLUDE_JVMCI
98 #include "jvmci/jvmci.hpp"
99 #endif
100 #if INCLUDE_G1GC
101 #include "gc/g1/g1BarrierSetRuntime.hpp"
102 #include "gc/g1/g1HeapRegion.hpp"
103 #endif
104 #if INCLUDE_SHENANDOAHGC
105 #include "gc/shenandoah/shenandoahRuntime.hpp"
106 #endif
107 #if INCLUDE_ZGC
108 #include "gc/z/zBarrierSetRuntime.hpp"
109 #endif
110 #if defined(X86) && !defined(ZERO)
111 #include "rdtsc_x86.hpp"
112 #endif
113
114 #include <errno.h>
115 #include <sys/stat.h>
116
117 const char* aot_code_entry_kind_name[] = {
118 #define DECL_KIND_STRING(kind) XSTR(kind),
119 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
120 #undef DECL_KIND_STRING
121 };
122
123 static elapsedTimer _t_totalLoad;
124 static elapsedTimer _t_totalPreload;
125 static elapsedTimer _t_totalRegister;
126 static elapsedTimer _t_totalFind;
127 static elapsedTimer _t_totalStore;
128
129 static bool enable_timers() {
130 return CITime || log_is_enabled(Info, init);
131 }
132
133 static void report_load_failure() {
134 if (AbortVMOnAOTCodeFailure) {
135 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
136 }
137 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
138 AOTCodeCache::disable_caching();
139 }
140
141 static void report_store_failure() {
142 if (AbortVMOnAOTCodeFailure) {
143 tty->print_cr("Unable to create AOT Code Cache.");
144 vm_abort(false);
145 }
146 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
147 AOTCodeCache::disable_caching();
148 }
149
150 // The sequence of AOT code caching flags and parametters settings.
151 //
152 // 1. The initial AOT code caching flags setting is done
153 // during call to CDSConfig::check_vm_args_consistency().
154 //
155 // 2. The earliest AOT code state check done in compilationPolicy_init()
156 // where we set number of compiler threads for AOT assembly phase.
157 //
158 // 3. We determine presence of AOT code in AOT Cache in
159 // AOTMetaspace::open_static_archive() which is calles
160 // after compilationPolicy_init() but before codeCache_init().
161 //
162 // 4. AOTCodeCache::initialize() is called during universe_init()
163 // and does final AOT state and flags settings.
164 //
165 // 5. Finally AOTCodeCache::init2() is called after universe_init()
166 // when all GC settings are finalized.
167
168 // Next methods determine which action we do with AOT code depending
169 // on phase of AOT process: assembly or production.
170
171 bool AOTCodeCache::is_dumping_adapter() {
172 return AOTAdapterCaching && is_on_for_dump();
173 }
174
175 bool AOTCodeCache::is_using_adapter() {
176 return AOTAdapterCaching && is_on_for_use();
177 }
178
179 bool AOTCodeCache::is_dumping_stub() {
180 return AOTStubCaching && is_on_for_dump();
181 }
182
183 bool AOTCodeCache::is_using_stub() {
184 return AOTStubCaching && is_on_for_use();
185 }
186
187 bool AOTCodeCache::is_dumping_code() {
188 return AOTCodeCaching && is_on_for_dump();
189 }
190
191 bool AOTCodeCache::is_using_code() {
192 return AOTCodeCaching && is_on_for_use();
193 }
194
195 // This is used before AOTCodeCahe is initialized
196 // but after AOT (CDS) Cache flags consistency is checked.
197 bool AOTCodeCache::maybe_dumping_code() {
198 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
199 }
200
201 // Next methods could be called regardless of AOT code cache status.
202 // Initially they are called during AOT flags parsing and finilized
203 // in AOTCodeCache::initialize().
204 void AOTCodeCache::enable_caching() {
205 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
206 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
207 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
208 }
209
210 void AOTCodeCache::disable_caching() {
211 FLAG_SET_ERGO(AOTCodeCaching, false);
212 FLAG_SET_ERGO(AOTStubCaching, false);
213 FLAG_SET_ERGO(AOTAdapterCaching, false);
214 }
215
216 bool AOTCodeCache::is_caching_enabled() {
217 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
218 }
219
220 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
221 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
222 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
223 // becasue both id and kind are used to find an entry, and that combination should be unique
224 if (kind == AOTCodeEntry::Adapter) {
225 return id;
226 } else if (kind == AOTCodeEntry::SharedBlob) {
227 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
228 return id;
229 } else if (kind == AOTCodeEntry::C1Blob) {
230 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
231 return id;
232 } else {
233 // kind must be AOTCodeEntry::C2Blob
234 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
235 return id;
236 }
237 }
238
239 static uint _max_aot_code_size = 0;
240 uint AOTCodeCache::max_aot_code_size() {
241 return _max_aot_code_size;
242 }
243
244 bool AOTCodeCache::is_code_load_thread_on() {
245 return UseAOTCodeLoadThread && AOTCodeCaching;
246 }
247
248 bool AOTCodeCache::allow_const_field(ciConstant& value) {
249 ciEnv* env = CURRENT_ENV;
250 precond(env != nullptr);
251 assert(!env->is_precompile() || is_dumping_code(), "AOT compilation should be enabled");
252 return !env->is_precompile() // Restrict only when we generate AOT code
253 // Can not trust primitive too || !is_reference_type(value.basic_type())
254 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
255 ;
256 }
257
258 // It is called from AOTMetaspace::initialize_shared_spaces()
259 // which is called from universe_init().
260 // At this point all AOT class linking seetings are finilized
261 // and AOT cache is open so we can map AOT code region.
262 void AOTCodeCache::initialize() {
263 if (!is_caching_enabled()) {
264 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
265 return;
266 }
267 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
268 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
269 disable_caching();
270 return;
271 #else
272 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
273
274 // Disable stubs caching until JDK-8357398 is fixed.
275 FLAG_SET_ERGO(AOTStubCaching, false);
276
277 if (VerifyOops) {
278 // Disable AOT stubs caching when VerifyOops flag is on.
279 // Verify oops code generated a lot of C strings which overflow
280 // AOT C string table (which has fixed size).
281 // AOT C string table will be reworked later to handle such cases.
282 //
283 // Note: AOT adapters are not affected - they don't have oop operations.
284 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
285 FLAG_SET_ERGO(AOTStubCaching, false);
286 }
287
288 bool is_dumping = false;
289 bool is_using = false;
290 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
291 is_dumping = is_caching_enabled();
292 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
293 is_using = is_caching_enabled();
294 }
295 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
296 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
297 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
298 }
299 if (!(is_dumping || is_using)) {
300 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
301 disable_caching();
302 return; // AOT code caching disabled on command line
303 }
304 // Reserve AOT Cache region when we dumping AOT code.
305 _max_aot_code_size = AOTCodeMaxSize;
306 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
307 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
308 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
309 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
310 }
311 }
312 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
313 if (is_using && aot_code_size == 0) {
314 log_info(aot, codecache, init)("AOT Code Cache is empty");
315 disable_caching();
316 return;
317 }
318 if (!open_cache(is_dumping, is_using)) {
319 if (is_using) {
320 report_load_failure();
321 } else {
322 report_store_failure();
323 }
324 return;
325 }
326 if (is_dumping) {
327 FLAG_SET_DEFAULT(FoldStableValues, false);
328 FLAG_SET_DEFAULT(ForceUnreachable, true);
329 }
330 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
331 #endif // defined(AMD64) || defined(AARCH64)
332 }
333
334 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
335 AOTCodeCache* AOTCodeCache::_cache = nullptr;
336 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
337
338 // It is called after universe_init() when all GC settings are finalized.
339 void AOTCodeCache::init2() {
340 DEBUG_ONLY( _passed_init2 = true; )
341 if (opened_cache == nullptr) {
342 return;
343 }
344 // After Universe initialized
345 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
346 delete opened_cache;
347 opened_cache = nullptr;
348 report_load_failure();
349 return;
350 }
351
352 // initialize aot runtime constants as appropriate to this runtime
353 AOTRuntimeConstants::initialize_from_runtime();
354
355 // initialize the table of external routines and initial stubs so we can save
356 // generated code blobs that reference them
357 AOTCodeAddressTable* table = opened_cache->_table;
358 assert(table != nullptr, "should be initialized already");
359 table->init_extrs();
360
361 // Now cache and address table are ready for AOT code generation
362 _cache = opened_cache;
363
364 // Set ClassInitBarrierMode after all checks since it affects code generation
365 if (is_dumping_code()) {
366 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
367 } else {
368 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
369 }
370 }
371
372 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
373 opened_cache = new AOTCodeCache(is_dumping, is_using);
374 if (opened_cache->failed()) {
375 delete opened_cache;
376 opened_cache = nullptr;
377 return false;
378 }
379 return true;
380 }
381
382 static void print_helper(nmethod* nm, outputStream* st) {
383 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
384 if (e->method() == nm->method()) {
385 ResourceMark rm;
386 stringStream ss;
387 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
388 ss.print("[%s%s%s]",
389 (e->is_loaded() ? "L" : ""),
390 (e->load_fail() ? "F" : ""),
391 (e->not_entrant() ? "I" : ""));
392 ss.print("#%d", e->comp_id());
393
394 st->print(" %s", ss.freeze());
395 }
396 });
397 }
398
399 void AOTCodeCache::close() {
400 if (is_on()) {
401 delete _cache; // Free memory
402 _cache = nullptr;
403 opened_cache = nullptr;
404 }
405 }
406
407 class CachedCodeDirectory {
408 public:
409 uint _aot_code_size;
410 char* _aot_code_data;
411
412 void set_aot_code_data(uint size, char* aot_data) {
413 _aot_code_size = size;
414 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
415 }
416
417 static CachedCodeDirectory* create();
418 };
419
420 // Storing AOT code in the AOT code region (ac) of AOT Cache:
421 //
422 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
423 // E.g., you can build a hashtable to record what methods have been archived.
424 //
425 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
426 // allocated using AOTCacheAccess::allocate_aot_code_region().
427 //
428 // [3] CachedCodeDirectory must be the very first allocation.
429 //
430 // [4] Two kinds of pointer can be stored:
431 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
432 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
433 // (It's OK to point to an interior location within this buffer).
434 // Such pointers must be stored using AOTCacheAccess::set_pointer()
435 //
436 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
437 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
438 // (e.g., to account for the runtime location of the CodeCache).
439 //
440 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
441 static CachedCodeDirectory* _aot_code_directory = nullptr;
442
443 CachedCodeDirectory* CachedCodeDirectory::create() {
444 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
445 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
446 return dir;
447 }
448
449 #define DATA_ALIGNMENT HeapWordSize
450
451 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
452 _load_header(nullptr),
453 _load_buffer(nullptr),
454 _store_buffer(nullptr),
455 _C_store_buffer(nullptr),
456 _write_position(0),
457 _load_size(0),
458 _store_size(0),
459 _for_use(is_using),
460 _for_dump(is_dumping),
461 _closing(false),
462 _failed(false),
463 _lookup_failed(false),
464 _for_preload(false),
465 _has_clinit_barriers(false),
466 _table(nullptr),
467 _load_entries(nullptr),
468 _search_entries(nullptr),
469 _store_entries(nullptr),
470 _C_strings_buf(nullptr),
471 _store_entries_cnt(0),
472 _compile_id(0),
473 _comp_level(0)
474 {
475 // Read header at the begining of cache
476 if (_for_use) {
477 // Read cache
478 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
479 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
480 if (!rs.is_reserved()) {
481 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
482 set_failed();
483 return;
484 }
485 if (!AOTCacheAccess::map_aot_code_region(rs)) {
486 log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
487 set_failed();
488 return;
489 }
490 _aot_code_directory = (CachedCodeDirectory*)rs.base();
491
492 _load_size = _aot_code_directory->_aot_code_size;
493 _load_buffer = _aot_code_directory->_aot_code_data;
494 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
495 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
496
497 _load_header = (Header*)addr(0);
498 if (!_load_header->verify(_load_size)) {
499 set_failed();
500 return;
501 }
502 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
503 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Adapter], _load_header->adapters_count());
504 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::SharedBlob], _load_header->shared_blobs_count());
505 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C1Blob], _load_header->C1_blobs_count());
506 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C2Blob], _load_header->C2_blobs_count());
507 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Stub], _load_header->stubs_count());
508 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Nmethod], _load_header->nmethods_count());
509 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
510
511 // Read strings
512 load_strings();
513 }
514 if (_for_dump) {
515 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
516 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
517 // Entries allocated at the end of buffer in reverse (as on stack).
518 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
519 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
520 }
521 _table = new AOTCodeAddressTable();
522 }
523
524 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
525 // This could be concurent execution
526 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
527 _cache->invalidate_entry(entry);
528 }
529 }
530
531 void AOTCodeCache::init_early_stubs_table() {
532 AOTCodeAddressTable* table = addr_table();
533 if (table != nullptr) {
534 table->init_early_stubs();
535 }
536 }
537
538 void AOTCodeCache::init_shared_blobs_table() {
539 AOTCodeAddressTable* table = addr_table();
540 if (table != nullptr) {
541 table->init_shared_blobs();
542 }
543 }
544
545 void AOTCodeCache::init_stubs_table() {
546 AOTCodeAddressTable* table = addr_table();
547 if (table != nullptr) {
548 table->init_stubs();
549 }
550 }
551
552 void AOTCodeCache::init_early_c1_table() {
553 AOTCodeAddressTable* table = addr_table();
554 if (table != nullptr) {
555 table->init_early_c1();
556 }
557 }
558
559 void AOTCodeCache::init_c1_table() {
560 AOTCodeAddressTable* table = addr_table();
561 if (table != nullptr) {
562 table->init_c1();
563 }
564 }
565
566 void AOTCodeCache::init_c2_table() {
567 AOTCodeAddressTable* table = addr_table();
568 if (table != nullptr) {
569 table->init_c2();
570 }
571 }
572
573 AOTCodeCache::~AOTCodeCache() {
574 if (_closing) {
575 return; // Already closed
576 }
577 // Stop any further access to cache.
578 // Checked on entry to load_nmethod() and store_nmethod().
579 _closing = true;
580 if (_for_use) {
581 // Wait for all load_nmethod() finish.
582 wait_for_no_nmethod_readers();
583 }
584 // Prevent writing code into cache while we are closing it.
585 // This lock held by ciEnv::register_method() which calls store_nmethod().
586 MutexLocker ml(Compile_lock);
587 if (for_dump()) { // Finalize cache
588 finish_write();
589 }
590 _load_buffer = nullptr;
591 if (_C_store_buffer != nullptr) {
592 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
593 _C_store_buffer = nullptr;
594 _store_buffer = nullptr;
595 }
596 if (_table != nullptr) {
597 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
598 delete _table;
599 _table = nullptr;
600 }
601 }
602
603 void AOTCodeCache::Config::record(uint cpu_features_offset) {
604 _flags = 0;
605 #ifdef ASSERT
606 _flags |= debugVM;
607 #endif
608 if (UseCompressedOops) {
609 _flags |= compressedOops;
610 }
611 if (UseCompressedClassPointers) {
612 _flags |= compressedClassPointers;
613 }
614 if (UseTLAB) {
615 _flags |= useTLAB;
616 }
617 if (JavaAssertions::systemClassDefault()) {
618 _flags |= systemClassAssertions;
619 }
620 if (JavaAssertions::userClassDefault()) {
621 _flags |= userClassAssertions;
622 }
623 if (EnableContended) {
624 _flags |= enableContendedPadding;
625 }
626 if (RestrictContended) {
627 _flags |= restrictContendedPadding;
628 }
629 if (PreserveFramePointer) {
630 _flags |= preserveFramePointer;
631 }
632 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
633 _compressedOopShift = CompressedOops::shift();
634 _compressedOopBase = CompressedOops::base();
635 _compressedKlassShift = CompressedKlassPointers::shift();
636 _compressedKlassBase = CompressedKlassPointers::base();
637 _contendedPaddingWidth = ContendedPaddingWidth;
638 _objectAlignment = ObjectAlignmentInBytes;
639 _gcCardSize = GCCardSizeInBytes;
640 _gc = (uint)Universe::heap()->kind();
641 _cpu_features_offset = cpu_features_offset;
642 }
643
644 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
645 LogStreamHandle(Debug, aot, codecache, init) log;
646 uint offset = _cpu_features_offset;
647 uint cpu_features_size = *(uint *)cache->addr(offset);
648 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
649 offset += sizeof(uint);
650
651 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
652 if (log.is_enabled()) {
653 ResourceMark rm; // required for stringStream::as_string()
654 stringStream ss;
655 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
656 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
657 }
658
659 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
660 if (log.is_enabled()) {
661 ResourceMark rm; // required for stringStream::as_string()
662 stringStream ss;
663 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
664 VM_Version::store_cpu_features(runtime_cpu_features);
665 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
666 if (!ss.is_empty()) {
667 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
668 }
669 }
670 } else {
671 if (log.is_enabled()) {
672 ResourceMark rm; // required for stringStream::as_string()
673 stringStream ss;
674 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
675 VM_Version::store_cpu_features(runtime_cpu_features);
676 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
677 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
678 }
679 return false;
680 }
681 return true;
682 }
683
684 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
685 // First checks affect all cached AOT code
686 #ifdef ASSERT
687 if ((_flags & debugVM) == 0) {
688 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
689 return false;
690 }
691 #else
692 if ((_flags & debugVM) != 0) {
693 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
694 return false;
695 }
696 #endif
697
698 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
699 if (codeCacheSize > _codeCacheSize) { // Only allow smaller or equal CodeCache size in production run
700 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
701 return false;
702 }
703
704 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
705 if (aot_gc != Universe::heap()->kind()) {
706 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
707 return false;
708 }
709
710 // We don't need to cache CardTable::card_shift() if GCCardSizeInBytes stay the same
711 if (_gcCardSize != (uint)GCCardSizeInBytes) {
712 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with GCCardSizeInBytes = %d vs current %d", _gcCardSize, GCCardSizeInBytes);
713 return false;
714 }
715
716 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
717 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
718 return false;
719 }
720
721 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
722 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (EnableContended ? "false" : "true"), (EnableContended ? "true" : "false"));
723 return false;
724 }
725 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
726 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (RestrictContended ? "false" : "true"), (RestrictContended ? "true" : "false"));
727 return false;
728 }
729 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
730 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
731 return false;
732 }
733
734 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
735 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (PreserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
736 return false;
737 }
738
739 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
740 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (UseCompressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
741 return false;
742 }
743 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
744 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
745 return false;
746 }
747 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
748 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
749 return false;
750 }
751
752 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
753 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (UseCompressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
754 return false;
755 }
756 if (_compressedOopShift != (uint)CompressedOops::shift()) {
757 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
758 return false;
759 }
760 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
761 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
762 return false;
763 }
764
765 // Next affects only AOT nmethod
766 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
767 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (JavaAssertions::systemClassDefault() ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
768 FLAG_SET_ERGO(AOTCodeCaching, false);
769 }
770 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
771 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (JavaAssertions::userClassDefault() ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
772 FLAG_SET_ERGO(AOTCodeCaching, false);
773 }
774
775 if (!verify_cpu_features(cache)) {
776 return false;
777 }
778 return true;
779 }
780
781 bool AOTCodeCache::Header::verify(uint load_size) const {
782 if (_version != AOT_CODE_VERSION) {
783 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
784 return false;
785 }
786 if (load_size < _cache_size) {
787 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
788 return false;
789 }
790 return true;
791 }
792
793 volatile int AOTCodeCache::_nmethod_readers = 0;
794
795 AOTCodeCache* AOTCodeCache::open_for_use() {
796 if (AOTCodeCache::is_on_for_use()) {
797 return AOTCodeCache::cache();
798 }
799 return nullptr;
800 }
801
802 AOTCodeCache* AOTCodeCache::open_for_dump() {
803 if (AOTCodeCache::is_on_for_dump()) {
804 AOTCodeCache* cache = AOTCodeCache::cache();
805 cache->clear_lookup_failed(); // Reset bit
806 return cache;
807 }
808 return nullptr;
809 }
810
811 bool AOTCodeCache::is_address_in_aot_cache(address p) {
812 AOTCodeCache* cache = open_for_use();
813 if (cache == nullptr) {
814 return false;
815 }
816 if ((p >= (address)cache->cache_buffer()) &&
817 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
818 return true;
819 }
820 return false;
821 }
822
823 static void copy_bytes(const char* from, address to, uint size) {
824 assert((int)size > 0, "sanity");
825 memcpy(to, from, size);
826 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
827 }
828
829 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
830 _cache = cache;
831 _entry = entry;
832 _load_buffer = cache->cache_buffer();
833 _read_position = 0;
834 if (task != nullptr) {
835 _compile_id = task->compile_id();
836 _comp_level = task->comp_level();
837 _preload = task->preload();
838 } else {
839 _compile_id = 0;
840 _comp_level = 0;
841 _preload = false;
842 }
843 _lookup_failed = false;
844 }
845
846 void AOTCodeReader::set_read_position(uint pos) {
847 if (pos == _read_position) {
848 return;
849 }
850 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
851 _read_position = pos;
852 }
853
854 bool AOTCodeCache::set_write_position(uint pos) {
855 if (pos == _write_position) {
856 return true;
857 }
858 if (_store_size < _write_position) {
859 _store_size = _write_position; // Adjust during write
860 }
861 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
862 _write_position = pos;
863 return true;
864 }
865
866 static char align_buffer[256] = { 0 };
867
868 bool AOTCodeCache::align_write() {
869 // We are not executing code from cache - we copy it by bytes first.
870 // No need for big alignment (or at all).
871 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
872 if (padding == DATA_ALIGNMENT) {
873 return true;
874 }
875 uint n = write_bytes((const void*)&align_buffer, padding);
876 if (n != padding) {
877 return false;
878 }
879 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
880 return true;
881 }
882
883 // Check to see if AOT code cache has required space to store "nbytes" of data
884 address AOTCodeCache::reserve_bytes(uint nbytes) {
885 assert(for_dump(), "Code Cache file is not created");
886 uint new_position = _write_position + nbytes;
887 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
888 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
889 nbytes, _write_position);
890 set_failed();
891 report_store_failure();
892 return nullptr;
893 }
894 address buffer = (address)(_store_buffer + _write_position);
895 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
896 _write_position += nbytes;
897 if (_store_size < _write_position) {
898 _store_size = _write_position;
899 }
900 return buffer;
901 }
902
903 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
904 assert(for_dump(), "Code Cache file is not created");
905 if (nbytes == 0) {
906 return 0;
907 }
908 uint new_position = _write_position + nbytes;
909 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
910 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
911 nbytes, _write_position);
912 set_failed();
913 report_store_failure();
914 return 0;
915 }
916 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
917 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
918 _write_position += nbytes;
919 if (_store_size < _write_position) {
920 _store_size = _write_position;
921 }
922 return nbytes;
923 }
924
925 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
926 assert(is_using_code(), "AOT code caching should be enabled");
927 if (!method->in_aot_cache()) {
928 return nullptr;
929 }
930
931 MethodCounters* mc = method->method_counters();
932 if (mc != nullptr && (mc->aot_code_recompile_requested() > 0)) {
933 return nullptr; // Already requested JIT compilation
934 }
935
936 switch (comp_level) {
937 case CompLevel_simple:
938 if ((DisableAOTCode & (1 << 0)) != 0) {
939 return nullptr;
940 }
941 break;
942 case CompLevel_limited_profile:
943 if ((DisableAOTCode & (1 << 1)) != 0) {
944 return nullptr;
945 }
946 break;
947 case CompLevel_full_optimization:
948 if ((DisableAOTCode & (1 << 2)) != 0) {
949 return nullptr;
950 }
951 break;
952
953 default: return nullptr; // Level 1, 2, and 4 only
954 }
955 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
956 if (is_on() && _cache->cache_buffer() != nullptr) {
957 uint id = AOTCacheAccess::convert_method_to_offset(method());
958 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Nmethod, id, comp_level);
959 if (entry == nullptr) {
960 LogStreamHandle(Info, aot, codecache, nmethod) log;
961 if (log.is_enabled()) {
962 ResourceMark rm;
963 const char* target_name = method->name_and_sig_as_C_string();
964 log.print("Missing entry for '%s' (comp_level %d, id: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, id);
965 }
966 #ifdef ASSERT
967 } else {
968 assert(!entry->has_clinit_barriers(), "only preload code should have clinit barriers");
969 ResourceMark rm;
970 assert(method() == entry->method(), "AOTCodeCache: saved nmethod's method %p (name: %s id: " UINT32_FORMAT_X_0
971 ") is different from the method %p (name: %s, id: " UINT32_FORMAT_X_0 " being looked up" ,
972 entry->method(), entry->method()->name_and_sig_as_C_string(), entry->id(), method(), method()->name_and_sig_as_C_string(), id);
973 #endif
974 }
975
976 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
977 if (directives->IgnorePrecompiledOption) {
978 LogStreamHandle(Info, aot, codecache, compilation) log;
979 if (log.is_enabled()) {
980 log.print("Ignore AOT code entry on level %d for ", comp_level);
981 method->print_value_on(&log);
982 }
983 return nullptr;
984 }
985
986 return entry;
987 }
988 return nullptr;
989 }
990
991 Method* AOTCodeEntry::method() {
992 assert(_kind == Nmethod, "invalid kind %d", _kind);
993 assert(AOTCodeCache::is_on_for_use(), "must be");
994 return AOTCacheAccess::convert_offset_to_method(_id);
995 }
996
997 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
998 return (void*)(cache->add_entry());
999 }
1000
1001 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
1002 if (entry->kind() == kind) {
1003 assert(entry->id() == id, "sanity");
1004 if (kind != AOTCodeEntry::Nmethod || // addapters and stubs have only one version
1005 // Look only for normal AOT code entry, preload code is handled separately
1006 (!entry->not_entrant() && (entry->comp_level() == comp_level))) {
1007 return true; // Found
1008 }
1009 }
1010 return false;
1011 }
1012
1013 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
1014 assert(_for_use, "sanity");
1015 uint count = _load_header->entries_count();
1016 if (_load_entries == nullptr) {
1017 // Read it
1018 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
1019 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
1020 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1021 }
1022 // Binary search
1023 int l = 0;
1024 int h = count - 1;
1025 while (l <= h) {
1026 int mid = (l + h) >> 1;
1027 int ix = mid * 2;
1028 uint is = _search_entries[ix];
1029 if (is == id) {
1030 int index = _search_entries[ix + 1];
1031 AOTCodeEntry* entry = &(_load_entries[index]);
1032 if (check_entry(kind, id, comp_level, entry)) {
1033 return entry; // Found
1034 }
1035 // Leaner search around
1036 for (int i = mid - 1; i >= l; i--) { // search back
1037 ix = i * 2;
1038 is = _search_entries[ix];
1039 if (is != id) {
1040 break;
1041 }
1042 index = _search_entries[ix + 1];
1043 AOTCodeEntry* entry = &(_load_entries[index]);
1044 if (check_entry(kind, id, comp_level, entry)) {
1045 return entry; // Found
1046 }
1047 }
1048 for (int i = mid + 1; i <= h; i++) { // search forward
1049 ix = i * 2;
1050 is = _search_entries[ix];
1051 if (is != id) {
1052 break;
1053 }
1054 index = _search_entries[ix + 1];
1055 AOTCodeEntry* entry = &(_load_entries[index]);
1056 if (check_entry(kind, id, comp_level, entry)) {
1057 return entry; // Found
1058 }
1059 }
1060 break; // No match found
1061 } else if (is < id) {
1062 l = mid + 1;
1063 } else {
1064 h = mid - 1;
1065 }
1066 }
1067 return nullptr;
1068 }
1069
1070 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1071 assert(entry!= nullptr, "all entries should be read already");
1072 if (entry->not_entrant()) {
1073 return; // Someone invalidated it already
1074 }
1075 #ifdef ASSERT
1076 assert(_load_entries != nullptr, "sanity");
1077 {
1078 uint name_offset = entry->offset() + entry->name_offset();
1079 const char* name = _load_buffer + name_offset;;
1080 uint level = entry->comp_level();
1081 uint comp_id = entry->comp_id();
1082 bool for_preload = entry->for_preload();
1083 bool clinit_brs = entry->has_clinit_barriers();
1084 log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1085 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1086 }
1087 assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1088 bool found = false;
1089 uint i = 0;
1090 uint count = 0;
1091 if (entry->for_preload()) {
1092 count = _load_header->preload_entries_count();
1093 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1094 for (; i < count; i++) {
1095 if (entry == &preload_entry[i]) {
1096 break;
1097 }
1098 }
1099 } else {
1100 count = _load_header->entries_count();
1101 for(; i < count; i++) {
1102 if (entry == &(_load_entries[i])) {
1103 break;
1104 }
1105 }
1106 }
1107 found = (i < count);
1108 assert(found, "entry should exist");
1109 #endif
1110 entry->set_not_entrant();
1111 uint name_offset = entry->offset() + entry->name_offset();
1112 const char* name = _load_buffer + name_offset;;
1113 uint level = entry->comp_level();
1114 uint comp_id = entry->comp_id();
1115 bool for_preload = entry->for_preload();
1116 bool clinit_brs = entry->has_clinit_barriers();
1117 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1118 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1119
1120 if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1121 // Invalidate preload code if normal AOT C2 code is invalidated,
1122 // most likely because some dependencies changed during run.
1123 // We can still use normal AOT code if preload code is
1124 // invalidated - normal AOT code has less restrictions.
1125 Method* method = entry->method();
1126 AOTCodeEntry* preload_entry = method->aot_code_entry();
1127 if (preload_entry != nullptr) {
1128 assert(preload_entry->for_preload(), "expecting only such entries here");
1129 invalidate_entry(preload_entry);
1130 }
1131 }
1132 }
1133
1134 static int uint_cmp(const void *i, const void *j) {
1135 uint a = *(uint *)i;
1136 uint b = *(uint *)j;
1137 return a > b ? 1 : a < b ? -1 : 0;
1138 }
1139
1140 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1141 uint* size_ptr = (uint *)buffer;
1142 *size_ptr = buffer_size;
1143 buffer += sizeof(uint);
1144
1145 VM_Version::store_cpu_features(buffer);
1146 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1147 buffer += buffer_size;
1148 buffer = align_up(buffer, DATA_ALIGNMENT);
1149 }
1150
1151 bool AOTCodeCache::finish_write() {
1152 if (!align_write()) {
1153 return false;
1154 }
1155 // End of AOT code
1156 uint code_size = _write_position;
1157 uint strings_offset = code_size;
1158 int strings_count = store_strings();
1159 if (strings_count < 0) {
1160 return false;
1161 }
1162 if (!align_write()) {
1163 return false;
1164 }
1165 uint strings_size = _write_position - strings_offset;
1166
1167 uint code_count = _store_entries_cnt;
1168 if (code_count > 0) {
1169 _aot_code_directory = CachedCodeDirectory::create();
1170 assert(_aot_code_directory != nullptr, "Sanity check");
1171
1172 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1173 uint search_count = code_count * 2;
1174 uint search_size = search_count * sizeof(uint);
1175 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1176 // _write_position should include code and strings
1177 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1178 uint cpu_features_size = VM_Version::cpu_features_size();
1179 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1180 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
1181 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1182 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1183
1184 // Allocate in AOT Cache buffer
1185 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1186 char* start = align_up(buffer, DATA_ALIGNMENT);
1187 char* current = start + header_size; // Skip header
1188
1189 uint cpu_features_offset = current - start;
1190 store_cpu_features(current, cpu_features_size);
1191 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1192 assert(current < start + total_size, "sanity check");
1193
1194 // Create ordered search table for entries [id, index];
1195 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1196
1197 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1198 AOTCodeStats stats;
1199 uint max_size = 0;
1200 // AOTCodeEntry entries were allocated in reverse in store buffer.
1201 // Process them in reverse order to cache first code first.
1202
1203 // Store AOTCodeEntry-s for preload code
1204 current = align_up(current, DATA_ALIGNMENT);
1205 uint preload_entries_cnt = 0;
1206 uint preload_entries_offset = current - start;
1207 AOTCodeEntry* preload_entries = (AOTCodeEntry*)current;
1208 for (int i = code_count - 1; i >= 0; i--) {
1209 AOTCodeEntry* entry = &entries_address[i];
1210 if (entry->load_fail()) {
1211 continue;
1212 }
1213 if (entry->for_preload()) {
1214 if (entry->not_entrant()) {
1215 // Skip not entrant preload code:
1216 // we can't pre-load code which may have failing dependencies.
1217 log_info(aot, codecache, exit)("Skip not entrant preload code comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1218 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1219 } else {
1220 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1221 stats.collect_entry_stats(entry);
1222 current += sizeof(AOTCodeEntry);
1223 preload_entries_cnt++;
1224 }
1225 }
1226 }
1227
1228 // Now write the data for preload AOTCodeEntry
1229 for (int i = 0; i < (int)preload_entries_cnt; i++) {
1230 AOTCodeEntry* entry = &preload_entries[i];
1231 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1232 if (size > max_size) {
1233 max_size = size;
1234 }
1235 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1236 entry->set_offset(current - start); // New offset
1237 current += size;
1238 }
1239
1240 current = align_up(current, DATA_ALIGNMENT);
1241 uint entries_count = 0;
1242 uint new_entries_offset = current - start;
1243 AOTCodeEntry* code_entries = (AOTCodeEntry*)current;
1244 // Now scan normal entries
1245 for (int i = code_count - 1; i >= 0; i--) {
1246 AOTCodeEntry* entry = &entries_address[i];
1247 if (entry->load_fail() || entry->for_preload()) {
1248 continue;
1249 }
1250 if (entry->not_entrant()) {
1251 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1252 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1253 entry->set_entrant(); // Reset
1254 }
1255 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1256 stats.collect_entry_stats(entry);
1257 current += sizeof(AOTCodeEntry);
1258 search[entries_count*2 + 0] = entry->id();
1259 search[entries_count*2 + 1] = entries_count;
1260 entries_count++;
1261 }
1262
1263 // Now write the data for normal AOTCodeEntry
1264 for (int i = 0; i < (int)entries_count; i++) {
1265 AOTCodeEntry* entry = &code_entries[i];
1266 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1267 if (size > max_size) {
1268 max_size = size;
1269 }
1270 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1271 entry->set_offset(current - start); // New offset
1272 current += size;
1273 }
1274
1275 if (preload_entries_cnt == 0 && entries_count == 0) {
1276 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entries");
1277 FREE_C_HEAP_ARRAY(uint, search);
1278 return true; // Nothing to write
1279 }
1280 uint total_entries_cnt = preload_entries_cnt + entries_count;
1281 assert(total_entries_cnt <= code_count, "%d > %d", total_entries_cnt, code_count);
1282 // Write strings
1283 if (strings_count > 0) {
1284 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1285 strings_offset = (current - start); // New offset
1286 current += strings_size;
1287 }
1288
1289 uint search_table_offset = current - start;
1290 // Sort and store search table
1291 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1292 search_size = 2 * entries_count * sizeof(uint);
1293 copy_bytes((const char*)search, (address)current, search_size);
1294 FREE_C_HEAP_ARRAY(uint, search);
1295 current += search_size;
1296
1297 log_stats_on_exit(stats);
1298
1299 uint size = (current - start);
1300 assert(size <= total_size, "%d > %d", size , total_size);
1301 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes", size);
1302 log_debug(aot, codecache, exit)(" header size: %u", header_size);
1303 log_debug(aot, codecache, exit)(" total code size: %u (max code's size: %u)", code_size, max_size);
1304 log_debug(aot, codecache, exit)(" entries size: %u", entries_size);
1305 log_debug(aot, codecache, exit)(" entry search table: %u", search_size);
1306 log_debug(aot, codecache, exit)(" C strings size: %u", strings_size);
1307 log_debug(aot, codecache, exit)(" CPU features data: %u", total_cpu_features_size);
1308
1309 // Finalize header
1310 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1311 header->init(size, (uint)strings_count, strings_offset,
1312 entries_count, search_table_offset, new_entries_offset,
1313 preload_entries_cnt, preload_entries_offset,
1314 stats.entry_count(AOTCodeEntry::Adapter), stats.entry_count(AOTCodeEntry::SharedBlob),
1315 stats.entry_count(AOTCodeEntry::C1Blob), stats.entry_count(AOTCodeEntry::C2Blob),
1316 stats.entry_count(AOTCodeEntry::Stub), cpu_features_offset);
1317
1318 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", total_entries_cnt);
1319
1320 _aot_code_directory->set_aot_code_data(size, start);
1321 }
1322 return true;
1323 }
1324
1325 //------------------Store/Load AOT code ----------------------
1326
1327 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1328 AOTCodeCache* cache = open_for_dump();
1329 if (cache == nullptr) {
1330 return false;
1331 }
1332 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1333
1334 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1335 return false;
1336 }
1337 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1338 return false;
1339 }
1340 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1341
1342 #ifdef ASSERT
1343 LogStreamHandle(Trace, aot, codecache, stubs) log;
1344 if (log.is_enabled()) {
1345 FlagSetting fs(PrintRelocations, true);
1346 blob.print_on(&log);
1347 }
1348 #endif
1349 // we need to take a lock to prevent race between compiler threads generating AOT code
1350 // and the main thread generating adapter
1351 MutexLocker ml(Compile_lock);
1352 if (!is_on()) {
1353 return false; // AOT code cache was already dumped and closed.
1354 }
1355 if (!cache->align_write()) {
1356 return false;
1357 }
1358 uint entry_position = cache->_write_position;
1359
1360 // Write name
1361 uint name_offset = cache->_write_position - entry_position;
1362 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1363 uint n = cache->write_bytes(name, name_size);
1364 if (n != name_size) {
1365 return false;
1366 }
1367
1368 // Write CodeBlob
1369 if (!cache->align_write()) {
1370 return false;
1371 }
1372 uint blob_offset = cache->_write_position - entry_position;
1373 address archive_buffer = cache->reserve_bytes(blob.size());
1374 if (archive_buffer == nullptr) {
1375 return false;
1376 }
1377 CodeBlob::archive_blob(&blob, archive_buffer);
1378
1379 uint reloc_data_size = blob.relocation_size();
1380 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1381 if (n != reloc_data_size) {
1382 return false;
1383 }
1384
1385 bool has_oop_maps = false;
1386 if (blob.oop_maps() != nullptr) {
1387 if (!cache->write_oop_map_set(blob)) {
1388 return false;
1389 }
1390 has_oop_maps = true;
1391 }
1392
1393 #ifndef PRODUCT
1394 // Write asm remarks
1395 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1396 return false;
1397 }
1398 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1399 return false;
1400 }
1401 #endif /* PRODUCT */
1402
1403 if (!cache->write_relocations(blob)) {
1404 if (!cache->failed()) {
1405 // We may miss an address in AOT table - skip this code blob.
1406 cache->set_write_position(entry_position);
1407 }
1408 return false;
1409 }
1410
1411 uint entry_size = cache->_write_position - entry_position;
1412 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1413 entry_position, entry_size, name_offset, name_size,
1414 blob_offset, has_oop_maps);
1415 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1416 return true;
1417 }
1418
1419 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1420 assert(AOTCodeEntry::is_blob(entry_kind),
1421 "wrong entry kind for blob id %s", StubInfo::name(id));
1422 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1423 }
1424
1425 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1426 AOTCodeCache* cache = open_for_use();
1427 if (cache == nullptr) {
1428 return nullptr;
1429 }
1430 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1431
1432 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1433 return nullptr;
1434 }
1435 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1436 return nullptr;
1437 }
1438 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1439
1440 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1441 if (entry == nullptr) {
1442 return nullptr;
1443 }
1444 AOTCodeReader reader(cache, entry, nullptr);
1445 CodeBlob* blob = reader.compile_code_blob(name);
1446
1447 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1448 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1449 return blob;
1450 }
1451
1452 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1453 assert(AOTCodeEntry::is_blob(entry_kind),
1454 "wrong entry kind for blob id %s", StubInfo::name(id));
1455 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1456 }
1457
1458 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1459 uint entry_position = _entry->offset();
1460
1461 // Read name
1462 uint name_offset = entry_position + _entry->name_offset();
1463 uint name_size = _entry->name_size(); // Includes '/0'
1464 const char* stored_name = addr(name_offset);
1465
1466 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1467 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1468 stored_name, name);
1469 set_lookup_failed(); // Skip this blob
1470 return nullptr;
1471 }
1472
1473 // Read archived code blob
1474 uint offset = entry_position + _entry->code_offset();
1475 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1476 offset += archived_blob->size();
1477
1478 address reloc_data = (address)addr(offset);
1479 offset += archived_blob->relocation_size();
1480 set_read_position(offset);
1481
1482 ImmutableOopMapSet* oop_maps = nullptr;
1483 if (_entry->has_oop_maps()) {
1484 oop_maps = read_oop_map_set();
1485 }
1486
1487 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1488 stored_name,
1489 reloc_data,
1490 oop_maps
1491 );
1492 if (code_blob == nullptr) { // no space left in CodeCache
1493 return nullptr;
1494 }
1495
1496 #ifndef PRODUCT
1497 code_blob->asm_remarks().init();
1498 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1499 code_blob->dbg_strings().init();
1500 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1501 #endif // PRODUCT
1502
1503 fix_relocations(code_blob);
1504
1505 #ifdef ASSERT
1506 LogStreamHandle(Trace, aot, codecache, stubs) log;
1507 if (log.is_enabled()) {
1508 FlagSetting fs(PrintRelocations, true);
1509 code_blob->print_on(&log);
1510 }
1511 #endif
1512 return code_blob;
1513 }
1514
1515 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1516 if (!is_dumping_stub()) {
1517 return false;
1518 }
1519 AOTCodeCache* cache = open_for_dump();
1520 if (cache == nullptr) {
1521 return false;
1522 }
1523 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1524 if (!cache->align_write()) {
1525 return false;
1526 }
1527 #ifdef ASSERT
1528 CodeSection* cs = cgen->assembler()->code_section();
1529 if (cs->has_locs()) {
1530 uint reloc_count = cs->locs_count();
1531 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1532 // Collect additional data
1533 RelocIterator iter(cs);
1534 while (iter.next()) {
1535 switch (iter.type()) {
1536 case relocInfo::none:
1537 break;
1538 default: {
1539 iter.print_current_on(tty);
1540 fatal("stub's relocation %d unimplemented", (int)iter.type());
1541 break;
1542 }
1543 }
1544 }
1545 }
1546 #endif
1547 uint entry_position = cache->_write_position;
1548
1549 // Write code
1550 uint code_offset = 0;
1551 uint code_size = cgen->assembler()->pc() - start;
1552 uint n = cache->write_bytes(start, code_size);
1553 if (n != code_size) {
1554 return false;
1555 }
1556 // Write name
1557 uint name_offset = cache->_write_position - entry_position;
1558 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1559 n = cache->write_bytes(name, name_size);
1560 if (n != name_size) {
1561 return false;
1562 }
1563 uint entry_size = cache->_write_position - entry_position;
1564 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1565 code_offset, code_size,
1566 AOTCodeEntry::Stub, (uint32_t)id);
1567 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1568 return true;
1569 }
1570
1571 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1572 if (!is_using_stub()) {
1573 return false;
1574 }
1575 assert(start == cgen->assembler()->pc(), "wrong buffer");
1576 AOTCodeCache* cache = open_for_use();
1577 if (cache == nullptr) {
1578 return false;
1579 }
1580 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1581 if (entry == nullptr) {
1582 return false;
1583 }
1584 uint entry_position = entry->offset();
1585 // Read name
1586 uint name_offset = entry->name_offset() + entry_position;
1587 uint name_size = entry->name_size(); // Includes '/0'
1588 const char* saved_name = cache->addr(name_offset);
1589 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1590 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1591 cache->set_failed();
1592 report_load_failure();
1593 return false;
1594 }
1595 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1596 // Read code
1597 uint code_offset = entry->code_offset() + entry_position;
1598 uint code_size = entry->code_size();
1599 copy_bytes(cache->addr(code_offset), start, code_size);
1600 cgen->assembler()->code_section()->set_end(start + code_size);
1601 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1602 return true;
1603 }
1604
1605 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1606 if (!is_dumping_code()) {
1607 return nullptr;
1608 }
1609 assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1610 AOTCodeCache* cache = open_for_dump();
1611 precond(cache != nullptr);
1612 precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1613 if (!compiler->is_c1() && !compiler->is_c2()) {
1614 // Only c1 and c2 compilers
1615 return nullptr;
1616 }
1617 int comp_level = nm->comp_level();
1618 if (comp_level == CompLevel_full_profile) {
1619 // Do not cache C1 compiles with full profile i.e. tier3
1620 return nullptr;
1621 }
1622 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1623
1624 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1625 AOTCodeEntry* entry = nullptr;
1626 entry = cache->write_nmethod(nm, for_preload);
1627 if (entry == nullptr) {
1628 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1629 }
1630 // Clean up fields which could be set here
1631 cache->_for_preload = false;
1632 cache->_has_clinit_barriers = false;
1633 return entry;
1634 }
1635
1636 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1637 AOTCodeCache* cache = open_for_dump();
1638 assert(cache != nullptr, "sanity check");
1639 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1640 uint comp_id = nm->compile_id();
1641 uint comp_level = nm->comp_level();
1642 Method* method = nm->method();
1643 if (!AOTCacheAccess::can_generate_aot_code(method)) {
1644 ResourceMark rm;
1645 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1646 assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1647 return nullptr;
1648 }
1649 InstanceKlass* holder = method->method_holder();
1650 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1651 if (!builtin_loader) {
1652 ResourceMark rm;
1653 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1654 assert(builtin_loader, "sanity");
1655 return nullptr;
1656 }
1657
1658 _for_preload = for_preload;
1659 _has_clinit_barriers = nm->has_clinit_barriers();
1660 assert(!_has_clinit_barriers || _for_preload, "only preload code has clinit barriers");
1661
1662 if (!align_write()) {
1663 return nullptr;
1664 }
1665
1666 uint entry_position = _write_position;
1667
1668 // Write name
1669 uint name_offset = 0;
1670 uint name_size = 0;
1671 uint id = 0;
1672 uint n;
1673 {
1674 ResourceMark rm;
1675 const char* name = method->name_and_sig_as_C_string();
1676 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1677 comp_id, (int)comp_level, name, comp_level,
1678 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1679
1680 LogStreamHandle(Info, aot, codecache, loader) log;
1681 if (log.is_enabled()) {
1682 oop loader = holder->class_loader();
1683 oop domain = holder->protection_domain();
1684 log.print("Holder: ");
1685 holder->print_value_on(&log);
1686 log.print(" loader: ");
1687 if (loader == nullptr) {
1688 log.print("nullptr");
1689 } else {
1690 loader->print_value_on(&log);
1691 }
1692 log.print(" domain: ");
1693 if (domain == nullptr) {
1694 log.print("nullptr");
1695 } else {
1696 domain->print_value_on(&log);
1697 }
1698 log.cr();
1699 }
1700 name_offset = _write_position - entry_position;
1701 name_size = (uint)strlen(name) + 1; // Includes '/0'
1702 n = write_bytes(name, name_size);
1703 if (n != name_size) {
1704 return nullptr;
1705 }
1706 }
1707 id = AOTCacheAccess::delta_from_base_address((address)nm->method());
1708
1709 // Write CodeBlob
1710 if (!cache->align_write()) {
1711 return nullptr;
1712 }
1713 uint blob_offset = cache->_write_position - entry_position;
1714 address archive_buffer = cache->reserve_bytes(nm->size());
1715 if (archive_buffer == nullptr) {
1716 return nullptr;
1717 }
1718 CodeBlob::archive_blob(nm, archive_buffer);
1719
1720 uint reloc_data_size = nm->relocation_size();
1721 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1722 if (n != reloc_data_size) {
1723 return nullptr;
1724 }
1725
1726 // Write oops and metadata present in the nmethod's data region
1727 if (!write_oops(nm)) {
1728 if (lookup_failed() && !failed()) {
1729 // Skip this method and reposition file
1730 set_write_position(entry_position);
1731 }
1732 return nullptr;
1733 }
1734 if (!write_metadata(nm)) {
1735 if (lookup_failed() && !failed()) {
1736 // Skip this method and reposition file
1737 set_write_position(entry_position);
1738 }
1739 return nullptr;
1740 }
1741
1742 bool has_oop_maps = false;
1743 if (nm->oop_maps() != nullptr) {
1744 if (!cache->write_oop_map_set(*nm)) {
1745 return nullptr;
1746 }
1747 has_oop_maps = true;
1748 }
1749
1750 uint immutable_data_size = nm->immutable_data_size();
1751 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1752 if (n != immutable_data_size) {
1753 return nullptr;
1754 }
1755
1756 JavaThread* thread = JavaThread::current();
1757 HandleMark hm(thread);
1758 GrowableArray<Handle> oop_list;
1759 GrowableArray<Metadata*> metadata_list;
1760
1761 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1762 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1763 if (lookup_failed() && !failed()) {
1764 // Skip this method and reposition file
1765 set_write_position(entry_position);
1766 }
1767 return nullptr;
1768 }
1769
1770 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1771 return nullptr;
1772 }
1773
1774 #ifndef PRODUCT
1775 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1776 return nullptr;
1777 }
1778 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1779 return nullptr;
1780 }
1781 #endif /* PRODUCT */
1782
1783 uint entry_size = _write_position - entry_position;
1784 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Nmethod, id,
1785 entry_position, entry_size,
1786 name_offset, name_size,
1787 blob_offset, has_oop_maps,
1788 comp_level, comp_id,
1789 nm->has_clinit_barriers(), for_preload);
1790 {
1791 ResourceMark rm;
1792 const char* name = nm->method()->name_and_sig_as_C_string();
1793 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1794 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1795 }
1796 if (VerifyAOTCode) {
1797 return nullptr;
1798 }
1799 return entry;
1800 }
1801
1802 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1803 if (!is_using_code()) {
1804 return false;
1805 }
1806 AOTCodeCache* cache = open_for_use();
1807 if (cache == nullptr) {
1808 return false;
1809 }
1810 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1811 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1812 CompileTask* task = env->task();
1813 task->mark_aot_load_start(os::elapsed_counter());
1814 AOTCodeEntry* entry = task->aot_code_entry();
1815 bool preload = task->preload();
1816 assert(entry != nullptr, "sanity");
1817 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1818 VM_ENTRY_MARK;
1819 ResourceMark rm;
1820 methodHandle method(THREAD, target->get_Method());
1821 const char* target_name = method->name_and_sig_as_C_string();
1822 uint id = AOTCacheAccess::convert_method_to_offset(method());
1823 bool clinit_brs = entry->has_clinit_barriers();
1824 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (id: " UINT32_FORMAT_X_0 "%s)",
1825 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1826 target_name, id, (clinit_brs ? ", has clinit barriers" : ""));
1827 }
1828 ReadingMark rdmk;
1829 if (rdmk.failed()) {
1830 // Cache is closed, cannot touch anything.
1831 return false;
1832 }
1833
1834 AOTCodeReader reader(cache, entry, task);
1835 bool success = reader.compile_nmethod(env, target, compiler);
1836 if (success) {
1837 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1838 } else {
1839 entry->set_load_fail();
1840 entry->set_not_entrant();
1841 }
1842 task->mark_aot_load_finish(os::elapsed_counter());
1843 return success;
1844 }
1845
1846 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1847 CompileTask* task = env->task();
1848 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1849 nmethod* nm = nullptr;
1850
1851 uint entry_position = aot_code_entry->offset();
1852 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1853 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1854 set_read_position(archived_nm_offset + archived_nm->size());
1855
1856 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1857 env->set_oop_recorder(oop_recorder);
1858
1859 uint offset;
1860
1861 offset = read_position();
1862 address reloc_data = (address)addr(offset);
1863 offset += archived_nm->relocation_size();
1864 set_read_position(offset);
1865
1866 // Read oops and metadata
1867 VM_ENTRY_MARK
1868 GrowableArray<Handle> oop_list;
1869 GrowableArray<Metadata*> metadata_list;
1870
1871 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1872 return false;
1873 }
1874
1875 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1876
1877 offset = read_position();
1878 address immutable_data = (address)addr(offset);
1879 offset += archived_nm->immutable_data_size();
1880 set_read_position(offset);
1881
1882 GrowableArray<Handle> reloc_immediate_oop_list;
1883 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1884 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1885 return false;
1886 }
1887
1888 // Read Dependencies (compressed already)
1889 Dependencies* dependencies = new Dependencies(env);
1890 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1891 env->set_dependencies(dependencies);
1892
1893 const char* name = addr(entry_position + aot_code_entry->name_offset());
1894
1895 if (VerifyAOTCode) {
1896 return false;
1897 }
1898
1899 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1900 nm = env->register_aot_method(THREAD,
1901 target,
1902 compiler,
1903 archived_nm,
1904 reloc_data,
1905 oop_list,
1906 metadata_list,
1907 oopmaps,
1908 immutable_data,
1909 reloc_immediate_oop_list,
1910 reloc_immediate_metadata_list,
1911 this);
1912 bool success = task->is_success();
1913 if (success) {
1914 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1915 #ifdef ASSERT
1916 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1917 if (log.is_enabled()) {
1918 FlagSetting fs(PrintRelocations, true);
1919 nm->print_on(&log);
1920 nm->decode2(&log);
1921 }
1922 #endif
1923 }
1924
1925 return success;
1926 }
1927
1928 bool skip_preload(methodHandle mh) {
1929 if (!mh->method_holder()->is_loaded()) {
1930 return true;
1931 }
1932 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1933 if (directives->DontPreloadOption) {
1934 LogStreamHandle(Info, aot, codecache, init) log;
1935 if (log.is_enabled()) {
1936 log.print("Exclude preloading code for ");
1937 mh->print_value_on(&log);
1938 }
1939 return true;
1940 }
1941 return false;
1942 }
1943
1944 void AOTCodeCache::preload_code(JavaThread* thread) {
1945 if (!is_using_code()) {
1946 return;
1947 }
1948 if ((DisableAOTCode & (1 << 3)) != 0) {
1949 return; // no preloaded code (level 5);
1950 }
1951 _cache->preload_aot_code(thread);
1952 }
1953
1954 void AOTCodeCache::preload_aot_code(TRAPS) {
1955 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1956 // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
1957 // level we want (that is CompLevel_full_optimization).
1958 return;
1959 }
1960 TraceTime t1("Total time to preload AOT code", &_t_totalPreload, enable_timers(), false);
1961 assert(_for_use, "sanity");
1962 uint count = _load_header->entries_count();
1963 uint preload_entries_count = _load_header->preload_entries_count();
1964 if (preload_entries_count > 0) {
1965 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
1966 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1967 uint count = MIN2(preload_entries_count, AOTCodePreloadStop);
1968 for (uint i = AOTCodePreloadStart; i < count; i++) {
1969 AOTCodeEntry* entry = &preload_entry[i];
1970 if (entry->not_entrant()) {
1971 continue;
1972 }
1973 methodHandle mh(THREAD, entry->method());
1974 assert((mh.not_null() && AOTMetaspace::in_aot_cache((address)mh())), "sanity");
1975 if (skip_preload(mh)) {
1976 continue; // Exclude preloading for this method
1977 }
1978 assert(mh->method_holder()->is_loaded(), "");
1979 if (!mh->method_holder()->is_linked()) {
1980 assert(!HAS_PENDING_EXCEPTION, "");
1981 mh->method_holder()->link_class(THREAD);
1982 if (HAS_PENDING_EXCEPTION) {
1983 LogStreamHandle(Info, aot, codecache) log;
1984 if (log.is_enabled()) {
1985 ResourceMark rm;
1986 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
1987 THREAD->pending_exception()->print_value_on(&log);
1988 if (log_is_enabled(Debug, aot, codecache)) {
1989 THREAD->pending_exception()->print_on(&log);
1990 }
1991 }
1992 CLEAR_PENDING_EXCEPTION;
1993 }
1994 }
1995 if (mh->aot_code_entry() != nullptr) {
1996 // Second C2 compilation of the same method could happen for
1997 // different reasons without marking first entry as not entrant.
1998 continue; // Keep old entry to avoid issues
1999 }
2000 mh->set_aot_code_entry(entry);
2001 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
2002 }
2003 }
2004 }
2005
2006 // ------------ process code and data --------------
2007
2008 // Can't use -1. It is valid value for jump to iteself destination
2009 // used by static call stub: see NativeJump::jump_destination().
2010 #define BAD_ADDRESS_ID -2
2011
2012 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2013 GrowableArray<uint> reloc_data;
2014 RelocIterator iter(&code_blob);
2015 LogStreamHandle(Trace, aot, codecache, reloc) log;
2016 while (iter.next()) {
2017 int idx = reloc_data.append(0); // default value
2018 switch (iter.type()) {
2019 case relocInfo::none:
2020 break;
2021 case relocInfo::oop_type: {
2022 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2023 if (r->oop_is_immediate()) {
2024 assert(oop_list != nullptr, "sanity check");
2025 // store index of oop in the reloc immediate oop list
2026 Handle h(JavaThread::current(), r->oop_value());
2027 int oop_idx = oop_list->find(h);
2028 assert(oop_idx != -1, "sanity check");
2029 reloc_data.at_put(idx, (uint)oop_idx);
2030 }
2031 break;
2032 }
2033 case relocInfo::metadata_type: {
2034 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2035 if (r->metadata_is_immediate()) {
2036 assert(metadata_list != nullptr, "sanity check");
2037 // store index of metadata in the reloc immediate metadata list
2038 int metadata_idx = metadata_list->find(r->metadata_value());
2039 assert(metadata_idx != -1, "sanity check");
2040 reloc_data.at_put(idx, (uint)metadata_idx);
2041 }
2042 break;
2043 }
2044 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2045 case relocInfo::opt_virtual_call_type:
2046 case relocInfo::static_call_type: {
2047 CallRelocation* r = (CallRelocation*)iter.reloc();
2048 address dest = r->destination();
2049 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2050 dest = (address)-1; // do nothing in this case when loading this relocation
2051 }
2052 int id = _table->id_for_address(dest, iter, &code_blob);
2053 if (id == BAD_ADDRESS_ID) {
2054 return false;
2055 }
2056 reloc_data.at_put(idx, id);
2057 break;
2058 }
2059 case relocInfo::trampoline_stub_type: {
2060 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2061 int id = _table->id_for_address(dest, iter, &code_blob);
2062 if (id == BAD_ADDRESS_ID) {
2063 return false;
2064 }
2065 reloc_data.at_put(idx, id);
2066 break;
2067 }
2068 case relocInfo::static_stub_type:
2069 break;
2070 case relocInfo::runtime_call_type: {
2071 // Record offset of runtime destination
2072 CallRelocation* r = (CallRelocation*)iter.reloc();
2073 address dest = r->destination();
2074 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2075 dest = (address)-1; // do nothing in this case when loading this relocation
2076 }
2077 int id = _table->id_for_address(dest, iter, &code_blob);
2078 if (id == BAD_ADDRESS_ID) {
2079 return false;
2080 }
2081 reloc_data.at_put(idx, id);
2082 break;
2083 }
2084 case relocInfo::runtime_call_w_cp_type:
2085 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2086 return false;
2087 case relocInfo::external_word_type: {
2088 // Record offset of runtime target
2089 address target = ((external_word_Relocation*)iter.reloc())->target();
2090 int id = _table->id_for_address(target, iter, &code_blob);
2091 if (id == BAD_ADDRESS_ID) {
2092 return false;
2093 }
2094 reloc_data.at_put(idx, id);
2095 break;
2096 }
2097 case relocInfo::internal_word_type: {
2098 address target = ((internal_word_Relocation*)iter.reloc())->target();
2099 // assert to make sure that delta fits into 32 bits
2100 assert(CodeCache::contains((void *)target), "Wrong internal_word_type relocation");
2101 uint delta = (uint)(target - code_blob.content_begin());
2102 reloc_data.at_put(idx, delta);
2103 break;
2104 }
2105 case relocInfo::section_word_type: {
2106 address target = ((section_word_Relocation*)iter.reloc())->target();
2107 assert(CodeCache::contains((void *)target), "Wrong section_word_type relocation");
2108 uint delta = (uint)(target - code_blob.content_begin());
2109 reloc_data.at_put(idx, delta);
2110 break;
2111 }
2112 case relocInfo::poll_type:
2113 break;
2114 case relocInfo::poll_return_type:
2115 break;
2116 case relocInfo::post_call_nop_type:
2117 break;
2118 case relocInfo::entry_guard_type:
2119 break;
2120 default:
2121 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2122 return false;
2123 break;
2124 }
2125 if (log.is_enabled()) {
2126 iter.print_current_on(&log);
2127 }
2128 }
2129
2130 // Write additional relocation data: uint per relocation
2131 // Write the count first
2132 int count = reloc_data.length();
2133 write_bytes(&count, sizeof(int));
2134 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2135 iter != reloc_data.end(); ++iter) {
2136 uint value = *iter;
2137 int n = write_bytes(&value, sizeof(uint));
2138 if (n != sizeof(uint)) {
2139 return false;
2140 }
2141 }
2142 return true;
2143 }
2144
2145 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2146 LogStreamHandle(Trace, aot, reloc) log;
2147 uint offset = read_position();
2148 int count = *(int*)addr(offset);
2149 offset += sizeof(int);
2150 if (log.is_enabled()) {
2151 log.print_cr("======== extra relocations count=%d", count);
2152 }
2153 uint* reloc_data = (uint*)addr(offset);
2154 offset += (count * sizeof(uint));
2155 set_read_position(offset);
2156
2157 RelocIterator iter(code_blob);
2158 int j = 0;
2159 while (iter.next()) {
2160 switch (iter.type()) {
2161 case relocInfo::none:
2162 break;
2163 case relocInfo::oop_type: {
2164 assert(code_blob->is_nmethod(), "sanity check");
2165 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2166 if (r->oop_is_immediate()) {
2167 assert(oop_list != nullptr, "sanity check");
2168 Handle h = oop_list->at(reloc_data[j]);
2169 r->set_value(cast_from_oop<address>(h()));
2170 } else {
2171 r->fix_oop_relocation();
2172 }
2173 break;
2174 }
2175 case relocInfo::metadata_type: {
2176 assert(code_blob->is_nmethod(), "sanity check");
2177 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2178 Metadata* m;
2179 if (r->metadata_is_immediate()) {
2180 assert(metadata_list != nullptr, "sanity check");
2181 m = metadata_list->at(reloc_data[j]);
2182 } else {
2183 // Get already updated value from nmethod.
2184 int index = r->metadata_index();
2185 m = code_blob->as_nmethod()->metadata_at(index);
2186 }
2187 r->set_value((address)m);
2188 break;
2189 }
2190 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2191 case relocInfo::opt_virtual_call_type:
2192 case relocInfo::static_call_type: {
2193 address dest = _cache->address_for_id(reloc_data[j]);
2194 if (dest != (address)-1) {
2195 ((CallRelocation*)iter.reloc())->set_destination(dest);
2196 }
2197 break;
2198 }
2199 case relocInfo::trampoline_stub_type: {
2200 address dest = _cache->address_for_id(reloc_data[j]);
2201 if (dest != (address)-1) {
2202 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2203 }
2204 break;
2205 }
2206 case relocInfo::static_stub_type:
2207 break;
2208 case relocInfo::runtime_call_type: {
2209 address dest = _cache->address_for_id(reloc_data[j]);
2210 if (dest != (address)-1) {
2211 ((CallRelocation*)iter.reloc())->set_destination(dest);
2212 }
2213 break;
2214 }
2215 case relocInfo::runtime_call_w_cp_type:
2216 // this relocation should not be in cache (see write_relocations)
2217 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2218 break;
2219 case relocInfo::external_word_type: {
2220 address target = _cache->address_for_id(reloc_data[j]);
2221 // Add external address to global table
2222 int index = ExternalsRecorder::find_index(target);
2223 // Update index in relocation
2224 Relocation::add_jint(iter.data(), index);
2225 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2226 assert(reloc->target() == target, "sanity");
2227 reloc->set_value(target); // Patch address in the code
2228 break;
2229 }
2230 case relocInfo::internal_word_type: {
2231 uint delta = reloc_data[j];
2232 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2233 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2234 break;
2235 }
2236 case relocInfo::section_word_type: {
2237 uint delta = reloc_data[j];
2238 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2239 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2240 break;
2241 }
2242 case relocInfo::poll_type:
2243 break;
2244 case relocInfo::poll_return_type:
2245 break;
2246 case relocInfo::post_call_nop_type:
2247 break;
2248 case relocInfo::entry_guard_type:
2249 break;
2250 default:
2251 assert(false,"relocation %d unimplemented", (int)iter.type());
2252 break;
2253 }
2254 if (log.is_enabled()) {
2255 iter.print_current_on(&log);
2256 }
2257 j++;
2258 }
2259 assert(j == count, "sanity");
2260 }
2261
2262 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2263 int count = oop_list.length();
2264 if (!write_bytes(&count, sizeof(int))) {
2265 return false;
2266 }
2267 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2268 iter != oop_list.end(); ++iter) {
2269 Handle h = *iter;
2270 if (!write_oop(h())) {
2271 return false;
2272 }
2273 }
2274
2275 count = metadata_list.length();
2276 if (!write_bytes(&count, sizeof(int))) {
2277 return false;
2278 }
2279 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2280 iter != metadata_list.end(); ++iter) {
2281 Metadata* m = *iter;
2282 if (!write_metadata(m)) {
2283 return false;
2284 }
2285 }
2286 return true;
2287 }
2288
2289 bool AOTCodeCache::write_metadata(nmethod* nm) {
2290 int count = nm->metadata_count()-1;
2291 if (!write_bytes(&count, sizeof(int))) {
2292 return false;
2293 }
2294 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2295 if (!write_metadata(*p)) {
2296 return false;
2297 }
2298 }
2299 return true;
2300 }
2301
2302 bool AOTCodeCache::write_metadata(Metadata* m) {
2303 uint n = 0;
2304 if (m == nullptr) {
2305 DataKind kind = DataKind::Null;
2306 n = write_bytes(&kind, sizeof(int));
2307 if (n != sizeof(int)) {
2308 return false;
2309 }
2310 } else if (m == (Metadata*)Universe::non_oop_word()) {
2311 DataKind kind = DataKind::No_Data;
2312 n = write_bytes(&kind, sizeof(int));
2313 if (n != sizeof(int)) {
2314 return false;
2315 }
2316 } else if (m->is_klass()) {
2317 if (!write_klass((Klass*)m)) {
2318 return false;
2319 }
2320 } else if (m->is_method()) {
2321 if (!write_method((Method*)m)) {
2322 return false;
2323 }
2324 } else if (m->is_methodCounters()) {
2325 DataKind kind = DataKind::MethodCnts;
2326 n = write_bytes(&kind, sizeof(int));
2327 if (n != sizeof(int)) {
2328 return false;
2329 }
2330 if (!write_method(((MethodCounters*)m)->method())) {
2331 return false;
2332 }
2333 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2334 } else { // Not supported
2335 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2336 return false;
2337 }
2338 return true;
2339 }
2340
2341 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2342 uint code_offset = read_position();
2343 Metadata* m = nullptr;
2344 DataKind kind = *(DataKind*)addr(code_offset);
2345 code_offset += sizeof(DataKind);
2346 set_read_position(code_offset);
2347 if (kind == DataKind::Null) {
2348 m = (Metadata*)nullptr;
2349 } else if (kind == DataKind::No_Data) {
2350 m = (Metadata*)Universe::non_oop_word();
2351 } else if (kind == DataKind::Klass) {
2352 m = (Metadata*)read_klass(comp_method);
2353 } else if (kind == DataKind::Method) {
2354 m = (Metadata*)read_method(comp_method);
2355 } else if (kind == DataKind::MethodCnts) {
2356 kind = *(DataKind*)addr(code_offset);
2357 code_offset += sizeof(DataKind);
2358 set_read_position(code_offset);
2359 m = (Metadata*)read_method(comp_method);
2360 if (m != nullptr) {
2361 Method* method = (Method*)m;
2362 m = method->get_method_counters(Thread::current());
2363 if (m == nullptr) {
2364 set_lookup_failed();
2365 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2366 } else {
2367 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2368 }
2369 }
2370 } else {
2371 set_lookup_failed();
2372 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2373 }
2374 return m;
2375 }
2376
2377 bool AOTCodeCache::write_method(Method* method) {
2378 ResourceMark rm; // To method's name printing
2379 if (AOTCacheAccess::can_generate_aot_code(method)) {
2380 DataKind kind = DataKind::Method;
2381 uint n = write_bytes(&kind, sizeof(int));
2382 if (n != sizeof(int)) {
2383 return false;
2384 }
2385 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2386 n = write_bytes(&method_offset, sizeof(uint));
2387 if (n != sizeof(uint)) {
2388 return false;
2389 }
2390 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2391 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2392 return true;
2393 }
2394 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2395 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2396 set_lookup_failed();
2397 return false;
2398 }
2399
2400 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2401 uint code_offset = read_position();
2402 uint method_offset = *(uint*)addr(code_offset);
2403 code_offset += sizeof(uint);
2404 set_read_position(code_offset);
2405 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2406 if (!AOTMetaspace::in_aot_cache((address)m)) {
2407 // Something changed in CDS
2408 set_lookup_failed();
2409 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2410 return nullptr;
2411 }
2412 assert(m->is_method(), "sanity");
2413 ResourceMark rm;
2414 Klass* k = m->method_holder();
2415 if (!k->is_instance_klass()) {
2416 set_lookup_failed();
2417 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2418 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2419 return nullptr;
2420 } else if (!AOTMetaspace::in_aot_cache((address)k)) {
2421 set_lookup_failed();
2422 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2423 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2424 return nullptr;
2425 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2426 set_lookup_failed();
2427 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2428 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2429 return nullptr;
2430 } else if (!InstanceKlass::cast(k)->is_linked()) {
2431 set_lookup_failed();
2432 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2433 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2434 return nullptr;
2435 }
2436 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2437 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2438 return m;
2439 }
2440
2441 bool AOTCodeCache::write_klass(Klass* klass) {
2442 uint array_dim = 0;
2443 if (klass->is_objArray_klass()) {
2444 array_dim = ObjArrayKlass::cast(klass)->dimension();
2445 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2446 }
2447 uint init_state = 0;
2448 bool can_write = true;
2449 if (klass->is_instance_klass()) {
2450 InstanceKlass* ik = InstanceKlass::cast(klass);
2451 init_state = (ik->is_initialized() ? 1 : 0);
2452 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2453 } else {
2454 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2455 }
2456 ResourceMark rm;
2457 uint state = (array_dim << 1) | (init_state & 1);
2458 if (can_write) {
2459 DataKind kind = DataKind::Klass;
2460 uint n = write_bytes(&kind, sizeof(int));
2461 if (n != sizeof(int)) {
2462 return false;
2463 }
2464 // Record state of instance klass initialization and array dimentions.
2465 n = write_bytes(&state, sizeof(int));
2466 if (n != sizeof(int)) {
2467 return false;
2468 }
2469 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2470 n = write_bytes(&klass_offset, sizeof(uint));
2471 if (n != sizeof(uint)) {
2472 return false;
2473 }
2474 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2475 compile_id(), comp_level(), klass->external_name(),
2476 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2477 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2478 return true;
2479 }
2480 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2481 compile_id(), comp_level(), klass->external_name(),
2482 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2483 (array_dim > 0 ? " (object array)" : ""));
2484 set_lookup_failed();
2485 return false;
2486 }
2487
2488 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2489 uint code_offset = read_position();
2490 uint state = *(uint*)addr(code_offset);
2491 uint init_state = (state & 1);
2492 uint array_dim = (state >> 1);
2493 code_offset += sizeof(int);
2494 uint klass_offset = *(uint*)addr(code_offset);
2495 code_offset += sizeof(uint);
2496 set_read_position(code_offset);
2497 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2498 if (!AOTMetaspace::in_aot_cache((address)k)) {
2499 // Something changed in CDS
2500 set_lookup_failed();
2501 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2502 return nullptr;
2503 }
2504 assert(k->is_klass(), "sanity");
2505 ResourceMark rm;
2506 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2507 set_lookup_failed();
2508 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2509 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2510 return nullptr;
2511 } else
2512 // Allow not initialized klass which was uninitialized during code caching or for preload
2513 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2514 set_lookup_failed();
2515 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2516 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2517 return nullptr;
2518 }
2519 if (array_dim > 0) {
2520 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2521 Klass* ak = k->array_klass_or_null(array_dim);
2522 // FIXME: what would it take to create an array class on the fly?
2523 // Klass* ak = k->array_klass(dim, JavaThread::current());
2524 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2525 if (ak == nullptr) {
2526 set_lookup_failed();
2527 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2528 compile_id(), comp_level(), array_dim, k->external_name());
2529 }
2530 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2531 return ak;
2532 } else {
2533 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2534 compile_id(), comp_level(), k->external_name());
2535 return k;
2536 }
2537 }
2538
2539 bool AOTCodeCache::write_oop(jobject& jo) {
2540 oop obj = JNIHandles::resolve(jo);
2541 return write_oop(obj);
2542 }
2543
2544 bool AOTCodeCache::write_oop(oop obj) {
2545 DataKind kind;
2546 uint n = 0;
2547 if (obj == nullptr) {
2548 kind = DataKind::Null;
2549 n = write_bytes(&kind, sizeof(int));
2550 if (n != sizeof(int)) {
2551 return false;
2552 }
2553 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2554 kind = DataKind::No_Data;
2555 n = write_bytes(&kind, sizeof(int));
2556 if (n != sizeof(int)) {
2557 return false;
2558 }
2559 } else if (java_lang_Class::is_instance(obj)) {
2560 if (java_lang_Class::is_primitive(obj)) {
2561 int bt = (int)java_lang_Class::primitive_type(obj);
2562 kind = DataKind::Primitive;
2563 n = write_bytes(&kind, sizeof(int));
2564 if (n != sizeof(int)) {
2565 return false;
2566 }
2567 n = write_bytes(&bt, sizeof(int));
2568 if (n != sizeof(int)) {
2569 return false;
2570 }
2571 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2572 } else {
2573 Klass* klass = java_lang_Class::as_Klass(obj);
2574 if (!write_klass(klass)) {
2575 return false;
2576 }
2577 }
2578 } else if (java_lang_String::is_instance(obj)) { // herere
2579 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2580 ResourceMark rm;
2581 size_t length_sz = 0;
2582 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2583 if (k >= 0) {
2584 kind = DataKind::String;
2585 n = write_bytes(&kind, sizeof(int));
2586 if (n != sizeof(int)) {
2587 return false;
2588 }
2589 n = write_bytes(&k, sizeof(int));
2590 if (n != sizeof(int)) {
2591 return false;
2592 }
2593 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2594 return true;
2595 }
2596 // Not archived String object - bailout
2597 set_lookup_failed();
2598 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2599 compile_id(), comp_level(), p2i(obj), string);
2600 return false;
2601 } else if (java_lang_Module::is_instance(obj)) {
2602 fatal("Module object unimplemented");
2603 } else if (java_lang_ClassLoader::is_instance(obj)) {
2604 if (obj == SystemDictionary::java_system_loader()) {
2605 kind = DataKind::SysLoader;
2606 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2607 } else if (obj == SystemDictionary::java_platform_loader()) {
2608 kind = DataKind::PlaLoader;
2609 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2610 } else {
2611 ResourceMark rm;
2612 set_lookup_failed();
2613 log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2614 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2615 return false;
2616 }
2617 n = write_bytes(&kind, sizeof(int));
2618 if (n != sizeof(int)) {
2619 return false;
2620 }
2621 } else { // herere
2622 ResourceMark rm;
2623 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2624 if (k >= 0) {
2625 kind = DataKind::MH_Oop;
2626 n = write_bytes(&kind, sizeof(int));
2627 if (n != sizeof(int)) {
2628 return false;
2629 }
2630 n = write_bytes(&k, sizeof(int));
2631 if (n != sizeof(int)) {
2632 return false;
2633 }
2634 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2635 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2636 return true;
2637 }
2638 // Not archived Java object - bailout
2639 set_lookup_failed();
2640 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2641 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2642 return false;
2643 }
2644 return true;
2645 }
2646
2647 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2648 uint code_offset = read_position();
2649 oop obj = nullptr;
2650 DataKind kind = *(DataKind*)addr(code_offset);
2651 code_offset += sizeof(DataKind);
2652 set_read_position(code_offset);
2653 if (kind == DataKind::Null) {
2654 return nullptr;
2655 } else if (kind == DataKind::No_Data) {
2656 return cast_to_oop(Universe::non_oop_word());
2657 } else if (kind == DataKind::Klass) {
2658 Klass* k = read_klass(comp_method);
2659 if (k == nullptr) {
2660 return nullptr;
2661 }
2662 obj = k->java_mirror();
2663 if (obj == nullptr) {
2664 set_lookup_failed();
2665 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2666 return nullptr;
2667 }
2668 } else if (kind == DataKind::Primitive) {
2669 code_offset = read_position();
2670 int t = *(int*)addr(code_offset);
2671 code_offset += sizeof(int);
2672 set_read_position(code_offset);
2673 BasicType bt = (BasicType)t;
2674 obj = java_lang_Class::primitive_mirror(bt);
2675 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2676 } else if (kind == DataKind::String) {
2677 code_offset = read_position();
2678 int k = *(int*)addr(code_offset);
2679 code_offset += sizeof(int);
2680 set_read_position(code_offset);
2681 obj = AOTCacheAccess::get_archived_object(k);
2682 if (obj == nullptr) {
2683 set_lookup_failed();
2684 log_debug(aot, codecache, oops)("Lookup failed for String object");
2685 return nullptr;
2686 }
2687 assert(java_lang_String::is_instance(obj), "must be string");
2688
2689 ResourceMark rm;
2690 size_t length_sz = 0;
2691 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2692 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2693 } else if (kind == DataKind::SysLoader) {
2694 obj = SystemDictionary::java_system_loader();
2695 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2696 } else if (kind == DataKind::PlaLoader) {
2697 obj = SystemDictionary::java_platform_loader();
2698 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2699 } else if (kind == DataKind::MH_Oop) {
2700 code_offset = read_position();
2701 int k = *(int*)addr(code_offset);
2702 code_offset += sizeof(int);
2703 set_read_position(code_offset);
2704 obj = AOTCacheAccess::get_archived_object(k);
2705 if (obj == nullptr) {
2706 set_lookup_failed();
2707 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2708 return nullptr;
2709 }
2710 ResourceMark rm;
2711 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2712 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2713 } else {
2714 set_lookup_failed();
2715 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2716 compile_id(), comp_level(), (int)kind);
2717 return nullptr;
2718 }
2719 return obj;
2720 }
2721
2722 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2723 methodHandle comp_method(JavaThread::current(), target->get_Method());
2724 JavaThread* current = JavaThread::current();
2725 uint offset = read_position();
2726 int count = *(int *)addr(offset);
2727 offset += sizeof(int);
2728 set_read_position(offset);
2729 for (int i = 0; i < count; i++) {
2730 oop obj = read_oop(current, comp_method);
2731 if (lookup_failed()) {
2732 return false;
2733 }
2734 Handle h(thread, obj);
2735 oop_list.append(h);
2736 if (oop_recorder != nullptr) {
2737 jobject jo = JNIHandles::make_local(thread, obj);
2738 if (oop_recorder->is_real(jo)) {
2739 oop_recorder->find_index(jo);
2740 } else {
2741 oop_recorder->allocate_oop_index(jo);
2742 }
2743 }
2744 LogStreamHandle(Debug, aot, codecache, oops) log;
2745 if (log.is_enabled()) {
2746 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2747 if (obj == Universe::non_oop_word()) {
2748 log.print("non-oop word");
2749 } else if (obj == nullptr) {
2750 log.print("nullptr-oop");
2751 } else {
2752 obj->print_value_on(&log);
2753 }
2754 log.cr();
2755 }
2756 }
2757
2758 offset = read_position();
2759 count = *(int *)addr(offset);
2760 offset += sizeof(int);
2761 set_read_position(offset);
2762 for (int i = 0; i < count; i++) {
2763 Metadata* m = read_metadata(comp_method);
2764 if (lookup_failed()) {
2765 return false;
2766 }
2767 metadata_list.append(m);
2768 if (oop_recorder != nullptr) {
2769 if (oop_recorder->is_real(m)) {
2770 oop_recorder->find_index(m);
2771 } else {
2772 oop_recorder->allocate_metadata_index(m);
2773 }
2774 }
2775 LogTarget(Debug, aot, codecache, metadata) log;
2776 if (log.is_enabled()) {
2777 LogStream ls(log);
2778 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2779 if (m == (Metadata*)Universe::non_oop_word()) {
2780 ls.print("non-metadata word");
2781 } else if (m == nullptr) {
2782 ls.print("nullptr-oop");
2783 } else {
2784 Metadata::print_value_on_maybe_null(&ls, m);
2785 }
2786 ls.cr();
2787 }
2788 }
2789 return true;
2790 }
2791
2792 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2793 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2794 int oopmaps_size = oopmaps->nr_of_bytes();
2795 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2796 return false;
2797 }
2798 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2799 if (n != (uint)oopmaps->nr_of_bytes()) {
2800 return false;
2801 }
2802 return true;
2803 }
2804
2805 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2806 uint offset = read_position();
2807 int size = *(int *)addr(offset);
2808 offset += sizeof(int);
2809 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2810 offset += size;
2811 set_read_position(offset);
2812 return oopmaps;
2813 }
2814
2815 bool AOTCodeCache::write_oops(nmethod* nm) {
2816 int count = nm->oops_count()-1;
2817 if (!write_bytes(&count, sizeof(int))) {
2818 return false;
2819 }
2820 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2821 if (!write_oop(*p)) {
2822 return false;
2823 }
2824 }
2825 return true;
2826 }
2827
2828 #ifndef PRODUCT
2829 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2830 // Write asm remarks
2831 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2832 if (count_ptr == nullptr) {
2833 return false;
2834 }
2835 uint count = 0;
2836 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2837 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2838 uint n = write_bytes(&offset, sizeof(uint));
2839 if (n != sizeof(uint)) {
2840 return false;
2841 }
2842 if (use_string_table) {
2843 const char* cstr = add_C_string(str);
2844 int id = _table->id_for_C_string((address)cstr);
2845 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2846 n = write_bytes(&id, sizeof(int));
2847 if (n != sizeof(int)) {
2848 return false;
2849 }
2850 } else {
2851 n = write_bytes(str, (uint)strlen(str) + 1);
2852 if (n != strlen(str) + 1) {
2853 return false;
2854 }
2855 }
2856 count += 1;
2857 return true;
2858 });
2859 *count_ptr = count;
2860 return result;
2861 }
2862
2863 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2864 // Read asm remarks
2865 uint offset = read_position();
2866 uint count = *(uint *)addr(offset);
2867 offset += sizeof(uint);
2868 for (uint i = 0; i < count; i++) {
2869 uint remark_offset = *(uint *)addr(offset);
2870 offset += sizeof(uint);
2871 const char* remark = nullptr;
2872 if (use_string_table) {
2873 int remark_string_id = *(uint *)addr(offset);
2874 offset += sizeof(int);
2875 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2876 } else {
2877 remark = (const char*)addr(offset);
2878 offset += (uint)strlen(remark)+1;
2879 }
2880 asm_remarks.insert(remark_offset, remark);
2881 }
2882 set_read_position(offset);
2883 }
2884
2885 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2886 // Write dbg strings
2887 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2888 if (count_ptr == nullptr) {
2889 return false;
2890 }
2891 uint count = 0;
2892 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2893 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2894 if (use_string_table) {
2895 const char* cstr = add_C_string(str);
2896 int id = _table->id_for_C_string((address)cstr);
2897 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2898 uint n = write_bytes(&id, sizeof(int));
2899 if (n != sizeof(int)) {
2900 return false;
2901 }
2902 } else {
2903 uint n = write_bytes(str, (uint)strlen(str) + 1);
2904 if (n != strlen(str) + 1) {
2905 return false;
2906 }
2907 }
2908 count += 1;
2909 return true;
2910 });
2911 *count_ptr = count;
2912 return result;
2913 }
2914
2915 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2916 // Read dbg strings
2917 uint offset = read_position();
2918 uint count = *(uint *)addr(offset);
2919 offset += sizeof(uint);
2920 for (uint i = 0; i < count; i++) {
2921 const char* str = nullptr;
2922 if (use_string_table) {
2923 int string_id = *(uint *)addr(offset);
2924 offset += sizeof(int);
2925 str = (const char*)_cache->address_for_C_string(string_id);
2926 } else {
2927 str = (const char*)addr(offset);
2928 offset += (uint)strlen(str)+1;
2929 }
2930 dbg_strings.insert(str);
2931 }
2932 set_read_position(offset);
2933 }
2934 #endif // PRODUCT
2935
2936 //======================= AOTCodeAddressTable ===============
2937
2938 // address table ids for generated routines, external addresses and C
2939 // string addresses are partitioned into positive integer ranges
2940 // defined by the following positive base and max values
2941 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2942 // [_stubs_base, _stubs_base + _stubs_max -1],
2943 // ...
2944 // [_c_str_base, _c_str_base + _c_str_max -1],
2945 #define _extrs_max 140
2946 #define _stubs_max 210
2947 #define _shared_blobs_max 25
2948 #define _C1_blobs_max 50
2949 #define _C2_blobs_max 25
2950 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2951 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2952
2953 #define _extrs_base 0
2954 #define _stubs_base (_extrs_base + _extrs_max)
2955 #define _shared_blobs_base (_stubs_base + _stubs_max)
2956 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2957 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2958 #define _blobs_end (_shared_blobs_base + _blobs_max)
2959 #if (_C2_blobs_base >= _all_max)
2960 #error AOTCodeAddressTable ranges need adjusting
2961 #endif
2962
2963 #define SET_ADDRESS(type, addr) \
2964 { \
2965 type##_addr[type##_length++] = (address) (addr); \
2966 assert(type##_length <= type##_max, "increase size"); \
2967 }
2968
2969 static bool initializing_extrs = false;
2970
2971 void AOTCodeAddressTable::init_extrs() {
2972 if (_extrs_complete || initializing_extrs) return; // Done already
2973
2974 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
2975
2976 initializing_extrs = true;
2977 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
2978
2979 _extrs_length = 0;
2980
2981 // Record addresses of VM runtime methods
2982 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
2983 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
2984 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
2985 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
2986 {
2987 // Required by Shared blobs
2988 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
2989 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
2990 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
2991 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
2992 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
2993 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
2994 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
2995 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
2996 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
2997 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
2998 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
2999 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3000 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3001 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
3002 }
3003 {
3004 // Required by initial stubs
3005 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3006 #if defined(AMD64)
3007 SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
3008 #endif
3009 }
3010
3011 #ifdef COMPILER1
3012 {
3013 // Required by C1 blobs
3014 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3015 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3016 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3017 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3018 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3019 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3020 SET_ADDRESS(_extrs, Runtime1::new_instance);
3021 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3022 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3023 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3024 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3025 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3026 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3027 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3028 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3029 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3030 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3031 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3032 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3033 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3034 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3035 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3036 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3037 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3038 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3039 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3040 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3041 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3042 #ifdef X86
3043 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3044 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3045 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3046 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3047 #endif
3048 #ifndef PRODUCT
3049 SET_ADDRESS(_extrs, os::breakpoint);
3050 #endif
3051 }
3052 #endif // COMPILER1
3053
3054 #ifdef COMPILER2
3055 {
3056 // Required by C2 blobs
3057 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3058 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3059 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3060 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3061 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3062 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3063 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3064 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3065 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3066 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3067 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3068 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3069 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3070 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3071 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3072 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3073 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3074 SET_ADDRESS(_extrs, OptoRuntime::compile_method_C);
3075 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
3076 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
3077 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
3078 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
3079 #if defined(AMD64)
3080 // Use by C2 intinsic
3081 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3082 #endif
3083 }
3084 #endif // COMPILER2
3085 #if INCLUDE_G1GC
3086 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3087 #endif
3088
3089 #if INCLUDE_SHENANDOAHGC
3090 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3091 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3092 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3093 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3094 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3095 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3096 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3097 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3098 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3099 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
3100 #endif
3101
3102 #if INCLUDE_ZGC
3103 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
3104 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3105 #if defined(AMD64)
3106 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3107 #endif
3108 #if defined(AARCH64)
3109 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
3110 SET_ADDRESS(_extrs, bs_asm->patching_epoch_addr());
3111 #endif
3112 #endif // INCLUDE_ZGC
3113
3114 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3115 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3116 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3117
3118 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3119 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3120 #if defined(AMD64) && !defined(ZERO)
3121 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3122 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3123 #endif // AMD64
3124 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3125 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3126 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3127 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3128 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3129 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3130 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3131 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3132 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3133 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3134 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3135 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3136 #ifndef ZERO
3137 SET_ADDRESS(_extrs, SharedRuntime::drem);
3138 SET_ADDRESS(_extrs, SharedRuntime::frem);
3139 #endif
3140 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3141 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3142 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3143 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3144 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3145
3146 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3147 SET_ADDRESS(_extrs, Thread::current);
3148 SET_ADDRESS(_extrs, ObjectMonitorTable::current_table_address());
3149
3150 SET_ADDRESS(_extrs, os::javaTimeMillis);
3151 SET_ADDRESS(_extrs, os::javaTimeNanos);
3152 // For JFR
3153 SET_ADDRESS(_extrs, os::elapsed_counter);
3154 #if defined(X86) && !defined(ZERO)
3155 SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3156 #endif
3157
3158 #if INCLUDE_JVMTI
3159 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3160 #endif /* INCLUDE_JVMTI */
3161 SET_ADDRESS(_extrs, MountUnmountDisabler::notify_jvmti_events_address());
3162 SET_ADDRESS(_extrs, MountUnmountDisabler::global_vthread_transition_disable_count_address());
3163
3164 #ifndef PRODUCT
3165 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3166 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3167 #endif
3168
3169 #ifndef ZERO
3170 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3171 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3172 #endif
3173 #if defined(AARCH64)
3174 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3175 #endif
3176 #endif // ZERO
3177
3178 // addresses of fields in AOT runtime constants area
3179 address* p = AOTRuntimeConstants::field_addresses_list();
3180 while (*p != nullptr) {
3181 SET_ADDRESS(_extrs, *p++);
3182 }
3183
3184 _extrs_complete = true;
3185 log_info(aot, codecache, init)("External addresses recorded");
3186 }
3187
3188 static bool initializing_early_stubs = false;
3189
3190 void AOTCodeAddressTable::init_early_stubs() {
3191 if (_complete || initializing_early_stubs) return; // Done already
3192 initializing_early_stubs = true;
3193 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3194 _stubs_length = 0;
3195 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3196
3197 {
3198 // Required by C1 blobs
3199 #if defined(AMD64) && !defined(ZERO)
3200 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3201 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3202 #endif // AMD64
3203 }
3204
3205 _early_stubs_complete = true;
3206 log_info(aot, codecache, init)("Early stubs recorded");
3207 }
3208
3209 static bool initializing_shared_blobs = false;
3210
3211 void AOTCodeAddressTable::init_shared_blobs() {
3212 if (_complete || initializing_shared_blobs) return; // Done already
3213 initializing_shared_blobs = true;
3214 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3215
3216 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3217 _shared_blobs_addr = blobs_addr;
3218 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3219 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3220
3221 _shared_blobs_length = 0;
3222 _C1_blobs_length = 0;
3223 _C2_blobs_length = 0;
3224
3225 // clear the address table
3226 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3227
3228 // Record addresses of generated code blobs
3229 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3230 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3231 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3232 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3233 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3234 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3235 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3236 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3237 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3238 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3239 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3240 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3241 #ifdef COMPILER2
3242 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3243 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3244 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3245 }
3246 #endif
3247 #if INCLUDE_JVMCI
3248 if (EnableJVMCI) {
3249 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3250 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3251 }
3252 #endif
3253 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3254 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3255 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3256 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3257 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3258
3259 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3260 _shared_blobs_complete = true;
3261 log_info(aot, codecache, init)("All shared blobs recorded");
3262 }
3263
3264 static bool initializing_stubs = false;
3265 void AOTCodeAddressTable::init_stubs() {
3266 if (_complete || initializing_stubs) return; // Done already
3267 assert(_early_stubs_complete, "early stubs whould be initialized");
3268 initializing_stubs = true;
3269
3270 // Stubs
3271 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3272 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3273 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3274 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3275 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3276 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3277
3278 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3279 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3280 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3281
3282 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3283
3284 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3285 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3286 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3287 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3288 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3289 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3290
3291 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3292 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3293 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3294 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3295 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3296 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3297
3298 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3299 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3300 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3301 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3302 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3303 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3304
3305 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3306 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3307 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3308 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3309 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3310 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3311
3312 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3313 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3314
3315 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3316 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3317
3318 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3319 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3320 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3321 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3322 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3323 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3324
3325 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3326 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3327
3328 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3329 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3330 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3331 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3332 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3333 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3334 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3335 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3336 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3337 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3338 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3339 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3340 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3341 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3342 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3343 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3344 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3345 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3346 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3347 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3348 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3349 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3350 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3351 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3352 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3353 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3354 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3355 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3356 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3357 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3358 SET_ADDRESS(_stubs, StubRoutines::kyber12To16());
3359
3360 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3361 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3362 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3363
3364 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3365 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3366 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3367 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3368 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3369 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3370 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3371 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3372
3373 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3374
3375 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3376
3377 SET_ADDRESS(_stubs, StubRoutines::dexp());
3378 SET_ADDRESS(_stubs, StubRoutines::dlog());
3379 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3380 SET_ADDRESS(_stubs, StubRoutines::dpow());
3381 SET_ADDRESS(_stubs, StubRoutines::dsin());
3382 SET_ADDRESS(_stubs, StubRoutines::dcos());
3383 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3384 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3385 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3386 SET_ADDRESS(_stubs, StubRoutines::dtan());
3387
3388 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3389 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3390
3391 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3392 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3393 }
3394 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3395
3396 #if defined(AMD64) && !defined(ZERO)
3397 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3398 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3399 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3400 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3401 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3402 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3403 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3404 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3405 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3406 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3407 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3408 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3409 SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3410 SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3411 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3412 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3413 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_byte_mask());
3414 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3415 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3416 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3417 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3418 // See C2_MacroAssembler::load_iota_indices().
3419 for (int i = 0; i < 6; i++) {
3420 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3421 }
3422 #ifdef COMPILER2
3423 for (int i = 0; i < 4; i++) {
3424 SET_ADDRESS(_stubs, StubRoutines::_string_indexof_array[i]);
3425 }
3426 #endif
3427 #endif
3428 #if defined(AARCH64) && !defined(ZERO)
3429 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3430 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3431 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3432 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3433 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3434 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3435 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3436 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3437 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3438 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3439 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3440 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3441 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3442
3443 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3444 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3445 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3446 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3447 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3448 #endif
3449
3450 _complete = true;
3451 log_info(aot, codecache, init)("Stubs recorded");
3452 }
3453
3454 void AOTCodeAddressTable::init_early_c1() {
3455 #ifdef COMPILER1
3456 // Runtime1 Blobs
3457 StubId id = StubInfo::stub_base(StubGroup::C1);
3458 // include forward_exception in range we publish
3459 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
3460 for (; id != limit; id = StubInfo::next(id)) {
3461 if (Runtime1::blob_for(id) == nullptr) {
3462 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3463 continue;
3464 }
3465 if (Runtime1::entry_for(id) == nullptr) {
3466 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3467 continue;
3468 }
3469 address entry = Runtime1::entry_for(id);
3470 SET_ADDRESS(_C1_blobs, entry);
3471 }
3472 #endif // COMPILER1
3473 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3474 _early_c1_complete = true;
3475 }
3476
3477 void AOTCodeAddressTable::init_c1() {
3478 #ifdef COMPILER1
3479 // Runtime1 Blobs
3480 assert(_early_c1_complete, "early C1 blobs should be initialized");
3481 StubId id = StubInfo::next(StubId::c1_forward_exception_id);
3482 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
3483 for (; id != limit; id = StubInfo::next(id)) {
3484 if (Runtime1::blob_for(id) == nullptr) {
3485 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3486 continue;
3487 }
3488 if (Runtime1::entry_for(id) == nullptr) {
3489 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3490 continue;
3491 }
3492 address entry = Runtime1::entry_for(id);
3493 SET_ADDRESS(_C1_blobs, entry);
3494 }
3495 #if INCLUDE_G1GC
3496 if (UseG1GC) {
3497 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3498 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3499 SET_ADDRESS(_C1_blobs, entry);
3500 }
3501 #endif // INCLUDE_G1GC
3502 #if INCLUDE_ZGC
3503 if (UseZGC) {
3504 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3505 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3506 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3507 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3508 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3509 }
3510 #endif // INCLUDE_ZGC
3511 #if INCLUDE_SHENANDOAHGC
3512 if (UseShenandoahGC) {
3513 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3514 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3515 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3516 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3517 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3518 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3519 }
3520 #endif // INCLUDE_SHENANDOAHGC
3521 #endif // COMPILER1
3522
3523 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3524 _c1_complete = true;
3525 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3526 }
3527
3528 void AOTCodeAddressTable::init_c2() {
3529 #ifdef COMPILER2
3530 // OptoRuntime Blobs
3531 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3532 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3533 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3534 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3535 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3536 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3537 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3538 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3539 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3540 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3541 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3542 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3543 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3544 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3545 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3546 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3547 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3548 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3549 SET_ADDRESS(_C2_blobs, OptoRuntime::compile_method_Java());
3550 #if INCLUDE_JVMTI
3551 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_first_transition_Java());
3552 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_final_transition_Java());
3553 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_transition_Java());
3554 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_transition_Java());
3555 #endif /* INCLUDE_JVMTI */
3556 #endif
3557
3558 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3559 _c2_complete = true;
3560 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3561 }
3562 #undef SET_ADDRESS
3563
3564 AOTCodeAddressTable::~AOTCodeAddressTable() {
3565 if (_extrs_addr != nullptr) {
3566 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3567 }
3568 if (_stubs_addr != nullptr) {
3569 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3570 }
3571 if (_shared_blobs_addr != nullptr) {
3572 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3573 }
3574 }
3575
3576 #ifdef PRODUCT
3577 #define MAX_STR_COUNT 200
3578 #else
3579 #define MAX_STR_COUNT 500
3580 #endif
3581 #define _c_str_max MAX_STR_COUNT
3582 static const int _c_str_base = _all_max;
3583
3584 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3585 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3586 static int _C_strings_count = 0;
3587 static int _C_strings_s[MAX_STR_COUNT] = {0};
3588 static int _C_strings_id[MAX_STR_COUNT] = {0};
3589 static int _C_strings_used = 0;
3590
3591 void AOTCodeCache::load_strings() {
3592 uint strings_count = _load_header->strings_count();
3593 if (strings_count == 0) {
3594 return;
3595 }
3596 uint strings_offset = _load_header->strings_offset();
3597 uint* string_lengths = (uint*)addr(strings_offset);
3598 strings_offset += (strings_count * sizeof(uint));
3599 uint strings_size = _load_header->search_table_offset() - strings_offset;
3600 // We have to keep cached strings longer than _cache buffer
3601 // because they are refernced from compiled code which may
3602 // still be executed on VM exit after _cache is freed.
3603 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3604 memcpy(p, addr(strings_offset), strings_size);
3605 _C_strings_buf = p;
3606 assert(strings_count <= MAX_STR_COUNT, "sanity");
3607 for (uint i = 0; i < strings_count; i++) {
3608 _C_strings[i] = p;
3609 uint len = string_lengths[i];
3610 _C_strings_s[i] = i;
3611 _C_strings_id[i] = i;
3612 p += len;
3613 }
3614 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3615 _C_strings_count = strings_count;
3616 _C_strings_used = strings_count;
3617 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3618 }
3619
3620 int AOTCodeCache::store_strings() {
3621 if (_C_strings_used > 0) {
3622 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3623 uint offset = _write_position;
3624 uint length = 0;
3625 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
3626 if (lengths == nullptr) {
3627 return -1;
3628 }
3629 for (int i = 0; i < _C_strings_used; i++) {
3630 const char* str = _C_strings[_C_strings_s[i]];
3631 uint len = (uint)strlen(str) + 1;
3632 length += len;
3633 assert(len < 1000, "big string: %s", str);
3634 lengths[i] = len;
3635 uint n = write_bytes(str, len);
3636 if (n != len) {
3637 return -1;
3638 }
3639 }
3640 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
3641 _C_strings_used, length, offset);
3642 }
3643 return _C_strings_used;
3644 }
3645
3646 const char* AOTCodeCache::add_C_string(const char* str) {
3647 if (is_on_for_dump() && str != nullptr) {
3648 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3649 AOTCodeAddressTable* table = addr_table();
3650 if (table != nullptr) {
3651 return table->add_C_string(str);
3652 }
3653 }
3654 return str;
3655 }
3656
3657 const char* AOTCodeAddressTable::add_C_string(const char* str) {
3658 if (_extrs_complete) {
3659 // Check previous strings address
3660 for (int i = 0; i < _C_strings_count; i++) {
3661 if (_C_strings_in[i] == str) {
3662 return _C_strings[i]; // Found previous one - return our duplicate
3663 } else if (strcmp(_C_strings[i], str) == 0) {
3664 return _C_strings[i];
3665 }
3666 }
3667 // Add new one
3668 if (_C_strings_count < MAX_STR_COUNT) {
3669 // Passed in string can be freed and used space become inaccessible.
3670 // Keep original address but duplicate string for future compare.
3671 _C_strings_id[_C_strings_count] = -1; // Init
3672 _C_strings_in[_C_strings_count] = str;
3673 const char* dup = os::strdup(str);
3674 _C_strings[_C_strings_count++] = dup;
3675 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
3676 return dup;
3677 } else {
3678 assert(false, "Number of C strings >= MAX_STR_COUNT");
3679 }
3680 }
3681 return str;
3682 }
3683
3684 int AOTCodeAddressTable::id_for_C_string(address str) {
3685 if (str == nullptr) {
3686 return -1;
3687 }
3688 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3689 for (int i = 0; i < _C_strings_count; i++) {
3690 if (_C_strings[i] == (const char*)str) { // found
3691 int id = _C_strings_id[i];
3692 if (id >= 0) {
3693 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
3694 return id; // Found recorded
3695 }
3696 // Not found in recorded, add new
3697 id = _C_strings_used++;
3698 _C_strings_s[id] = i;
3699 _C_strings_id[i] = id;
3700 return id;
3701 }
3702 }
3703 return -1;
3704 }
3705
3706 address AOTCodeAddressTable::address_for_C_string(int idx) {
3707 assert(idx < _C_strings_count, "sanity");
3708 return (address)_C_strings[idx];
3709 }
3710
3711 static int search_address(address addr, address* table, uint length) {
3712 for (int i = 0; i < (int)length; i++) {
3713 if (table[i] == addr) {
3714 return i;
3715 }
3716 }
3717 return BAD_ADDRESS_ID;
3718 }
3719
3720 address AOTCodeAddressTable::address_for_id(int idx) {
3721 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3722 if (idx == -1) {
3723 return (address)-1;
3724 }
3725 uint id = (uint)idx;
3726 // special case for symbols based relative to os::init
3727 if (id > (_c_str_base + _c_str_max)) {
3728 return (address)os::init + idx;
3729 }
3730 if (idx < 0) {
3731 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3732 return nullptr;
3733 }
3734 // no need to compare unsigned id against 0
3735 if (/* id >= _extrs_base && */ id < _extrs_length) {
3736 return _extrs_addr[id - _extrs_base];
3737 }
3738 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3739 return _stubs_addr[id - _stubs_base];
3740 }
3741 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3742 return _stubs_addr[id - _stubs_base];
3743 }
3744 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3745 return _shared_blobs_addr[id - _shared_blobs_base];
3746 }
3747 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3748 return _C1_blobs_addr[id - _C1_blobs_base];
3749 }
3750 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3751 return _C1_blobs_addr[id - _C1_blobs_base];
3752 }
3753 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3754 return _C2_blobs_addr[id - _C2_blobs_base];
3755 }
3756 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3757 return address_for_C_string(id - _c_str_base);
3758 }
3759 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3760 return nullptr;
3761 }
3762
3763 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3764 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3765 int id = -1;
3766 if (addr == (address)-1) { // Static call stub has jump to itself
3767 return id;
3768 }
3769 // Check card_table_base address first since it can point to any address
3770 BarrierSet* bs = BarrierSet::barrier_set();
3771 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
3772 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
3773
3774 // Seach for C string
3775 id = id_for_C_string(addr);
3776 if (id >= 0) {
3777 return id + _c_str_base;
3778 }
3779 if (StubRoutines::contains(addr)) {
3780 // Search in stubs
3781 id = search_address(addr, _stubs_addr, _stubs_length);
3782 if (id == BAD_ADDRESS_ID) {
3783 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3784 if (desc == nullptr) {
3785 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3786 }
3787 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3788 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3789 } else {
3790 return _stubs_base + id;
3791 }
3792 } else {
3793 CodeBlob* cb = CodeCache::find_blob(addr);
3794 if (cb != nullptr) {
3795 int id_base = _shared_blobs_base;
3796 // Search in code blobs
3797 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3798 if (id == BAD_ADDRESS_ID) {
3799 id_base = _C1_blobs_base;
3800 // search C1 blobs
3801 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3802 }
3803 if (id == BAD_ADDRESS_ID) {
3804 id_base = _C2_blobs_base;
3805 // search C2 blobs
3806 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3807 }
3808 if (id == BAD_ADDRESS_ID) {
3809 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3810 } else {
3811 return id_base + id;
3812 }
3813 } else {
3814 // Search in runtime functions
3815 id = search_address(addr, _extrs_addr, _extrs_length);
3816 if (id == BAD_ADDRESS_ID) {
3817 ResourceMark rm;
3818 const int buflen = 1024;
3819 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3820 int offset = 0;
3821 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3822 if (offset > 0) {
3823 // Could be address of C string
3824 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3825 CompileTask* task = ciEnv::current()->task();
3826 uint compile_id = 0;
3827 uint comp_level =0;
3828 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3829 compile_id = task->compile_id();
3830 comp_level = task->comp_level();
3831 }
3832 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3833 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3834 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3835 return dist;
3836 }
3837 reloc.print_current_on(tty);
3838 blob->print_on(tty);
3839 blob->print_code_on(tty);
3840 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3841 } else {
3842 reloc.print_current_on(tty);
3843 blob->print_on(tty);
3844 blob->print_code_on(tty);
3845 os::find(addr, tty);
3846 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3847 }
3848 } else {
3849 return _extrs_base + id;
3850 }
3851 }
3852 }
3853 return id;
3854 }
3855
3856 #undef _extrs_max
3857 #undef _stubs_max
3858 #undef _shared_blobs_max
3859 #undef _C1_blobs_max
3860 #undef _C2_blobs_max
3861 #undef _blobs_max
3862 #undef _extrs_base
3863 #undef _stubs_base
3864 #undef _shared_blobs_base
3865 #undef _C1_blobs_base
3866 #undef _C2_blobs_base
3867 #undef _blobs_end
3868
3869 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3870
3871 void AOTRuntimeConstants::initialize_from_runtime() {
3872 BarrierSet* bs = BarrierSet::barrier_set();
3873 address card_table_base = nullptr;
3874 uint grain_shift = 0;
3875 #if INCLUDE_G1GC
3876 if (bs->is_a(BarrierSet::G1BarrierSet)) {
3877 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
3878 } else
3879 #endif
3880 #if INCLUDE_SHENANDOAHGC
3881 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
3882 grain_shift = 0;
3883 } else
3884 #endif
3885 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3886 CardTable::CardValue* base = ci_card_table_address_const();
3887 assert(base != nullptr, "unexpected byte_map_base");
3888 card_table_base = base;
3889 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
3890 grain_shift = ctbs->grain_shift();
3891 }
3892 _aot_runtime_constants._card_table_base = card_table_base;
3893 _aot_runtime_constants._grain_shift = grain_shift;
3894 }
3895
3896 address AOTRuntimeConstants::_field_addresses_list[] = {
3897 ((address)&_aot_runtime_constants._card_table_base),
3898 ((address)&_aot_runtime_constants._grain_shift),
3899 nullptr
3900 };
3901
3902 address AOTRuntimeConstants::card_table_base_address() {
3903 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
3904 return (address)&_aot_runtime_constants._card_table_base;
3905 }
3906
3907 void AOTCodeCache::wait_for_no_nmethod_readers() {
3908 while (true) {
3909 int cur = AtomicAccess::load(&_nmethod_readers);
3910 int upd = -(cur + 1);
3911 if (cur >= 0 && AtomicAccess::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3912 // Success, no new readers should appear.
3913 break;
3914 }
3915 }
3916
3917 // Now wait for all readers to leave.
3918 SpinYield w;
3919 while (AtomicAccess::load(&_nmethod_readers) != -1) {
3920 w.wait();
3921 }
3922 }
3923
3924 AOTCodeCache::ReadingMark::ReadingMark() {
3925 while (true) {
3926 int cur = AtomicAccess::load(&_nmethod_readers);
3927 if (cur < 0) {
3928 // Cache is already closed, cannot proceed.
3929 _failed = true;
3930 return;
3931 }
3932 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3933 // Successfully recorded ourselves as entered.
3934 _failed = false;
3935 return;
3936 }
3937 }
3938 }
3939
3940 AOTCodeCache::ReadingMark::~ReadingMark() {
3941 if (_failed) {
3942 return;
3943 }
3944 while (true) {
3945 int cur = AtomicAccess::load(&_nmethod_readers);
3946 if (cur > 0) {
3947 // Cache is open, we are counting down towards 0.
3948 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3949 return;
3950 }
3951 } else {
3952 // Cache is closed, we are counting up towards -1.
3953 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3954 return;
3955 }
3956 }
3957 }
3958 }
3959
3960 void AOTCodeCache::print_timers_on(outputStream* st) {
3961 if (is_using_code()) {
3962 st->print_cr (" AOT Code Preload Time: %7.3f s", _t_totalPreload.seconds());
3963 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3964 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
3965 st->print_cr (" find AOT code entry: %7.3f s", _t_totalFind.seconds());
3966 }
3967 if (is_dumping_code()) {
3968 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
3969 }
3970 }
3971
3972 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
3973 AOTCodeStats result;
3974 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3975 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
3976 }
3977
3978 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3979 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
3980 }
3981 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
3982 return result;
3983 }
3984
3985 void AOTCodeCache::log_stats_on_exit(AOTCodeStats& stats) {
3986 LogStreamHandle(Debug, aot, codecache, exit) log;
3987 if (log.is_enabled()) {
3988 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3989 log.print_cr(" %s: total=%u", aot_code_entry_kind_name[kind], stats.entry_count(kind));
3990 if (kind == AOTCodeEntry::Nmethod) {
3991 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3992 log.print_cr(" Tier %d: total=%u", lvl, stats.nmethod_count(lvl));
3993 }
3994 }
3995 }
3996 }
3997 }
3998
3999 static void print_helper1(outputStream* st, const char* name, int count) {
4000 if (count > 0) {
4001 st->print(" %s=%d", name, count);
4002 }
4003 }
4004
4005 void AOTCodeCache::print_statistics_on(outputStream* st) {
4006 AOTCodeCache* cache = open_for_use();
4007 if (cache != nullptr) {
4008 ReadingMark rdmk;
4009 if (rdmk.failed()) {
4010 // Cache is closed, cannot touch anything.
4011 return;
4012 }
4013 AOTCodeStats stats;
4014
4015 uint preload_count = cache->_load_header->preload_entries_count();
4016 AOTCodeEntry* preload_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->preload_entries_offset());
4017 for (uint i = 0; i < preload_count; i++) {
4018 stats.collect_all_stats(&preload_entries[i]);
4019 }
4020
4021 uint count = cache->_load_header->entries_count();
4022 AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
4023 for (uint i = 0; i < count; i++) {
4024 stats.collect_all_stats(&load_entries[i]);
4025 }
4026
4027 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4028 if (stats.entry_count(kind) > 0) {
4029 st->print(" %s:", aot_code_entry_kind_name[kind]);
4030 print_helper1(st, "total", stats.entry_count(kind));
4031 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4032 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4033 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4034 st->cr();
4035 }
4036 if (kind == AOTCodeEntry::Nmethod) {
4037 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4038 if (stats.nmethod_count(lvl) > 0) {
4039 st->print(" AOT Code T%d", lvl);
4040 print_helper1(st, "total", stats.nmethod_count(lvl));
4041 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4042 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4043 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4044 if (lvl == AOTCompLevel_count-1) {
4045 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4046 }
4047 st->cr();
4048 }
4049 }
4050 }
4051 }
4052 LogStreamHandle(Debug, aot, codecache, init) log;
4053 if (log.is_enabled()) {
4054 AOTCodeCache::print_unused_entries_on(&log);
4055 }
4056 LogStreamHandle(Trace, aot, codecache) aot_info;
4057 // need a lock to traverse the code cache
4058 if (aot_info.is_enabled()) {
4059 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4060 NMethodIterator iter(NMethodIterator::all);
4061 while (iter.next()) {
4062 nmethod* nm = iter.method();
4063 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4064 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4065 (nm->method()->in_aot_cache() ? 'S' : ' '),
4066 (nm->is_aot() ? 'A' : ' '),
4067 (nm->preloaded() ? 'P' : ' '),
4068 nm->comp_level());
4069 print_helper(nm, &aot_info);
4070 aot_info.print(": ");
4071 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4072 LogStreamHandle(Trace, aot, codecache) aot_debug;
4073 if (aot_debug.is_enabled()) {
4074 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4075 if (mtd != nullptr) {
4076 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4077 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4078 });
4079 }
4080 }
4081 }
4082 }
4083 }
4084 }
4085 }
4086
4087 void AOTCodeEntry::print(outputStream* st) const {
4088 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4089 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4090 (_not_entrant? "not_entrant" : "entrant"),
4091 (_loaded ? ", loaded" : ""),
4092 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4093 (_for_preload ? ", for_preload" : ""));
4094 }
4095
4096 // This is called after initialize() but before init2()
4097 // and _cache is not set yet.
4098 void AOTCodeCache::print_on(outputStream* st) {
4099 if (opened_cache != nullptr && opened_cache->for_use()) {
4100 ReadingMark rdmk;
4101 if (rdmk.failed()) {
4102 // Cache is closed, cannot touch anything.
4103 return;
4104 }
4105
4106 st->print_cr("\nAOT Code Cache Preload entries");
4107
4108 uint preload_count = opened_cache->_load_header->preload_entries_count();
4109 AOTCodeEntry* preload_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->preload_entries_offset());
4110 for (uint i = 0; i < preload_count; i++) {
4111 AOTCodeEntry* entry = &preload_entries[i];
4112
4113 uint entry_position = entry->offset();
4114 uint name_offset = entry->name_offset() + entry_position;
4115 const char* saved_name = opened_cache->addr(name_offset);
4116
4117 st->print_cr("%4u: %10s Id:%u L%u size=%u '%s' %s%s%s",
4118 i, aot_code_entry_kind_name[entry->kind()], entry->id(), entry->comp_level(),
4119 entry->size(), saved_name,
4120 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4121 entry->is_loaded() ? " loaded" : "",
4122 entry->not_entrant() ? " not_entrant" : "");
4123
4124 st->print_raw(" ");
4125 AOTCodeReader reader(opened_cache, entry, nullptr);
4126 reader.print_on(st);
4127 }
4128
4129 st->print_cr("\nAOT Code Cache entries");
4130
4131 uint count = opened_cache->_load_header->entries_count();
4132 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4133 AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4134
4135 for (uint i = 0; i < count; i++) {
4136 int index = search_entries[2*i + 1];
4137 AOTCodeEntry* entry = &(load_entries[index]);
4138
4139 uint entry_position = entry->offset();
4140 uint name_offset = entry->name_offset() + entry_position;
4141 const char* saved_name = opened_cache->addr(name_offset);
4142
4143 st->print_cr("%4u: %10s idx:%4u Id:%u L%u size=%u '%s' %s%s%s%s",
4144 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4145 entry->size(), saved_name,
4146 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4147 entry->for_preload() ? " for_preload" : "",
4148 entry->is_loaded() ? " loaded" : "",
4149 entry->not_entrant() ? " not_entrant" : "");
4150
4151 st->print_raw(" ");
4152 AOTCodeReader reader(opened_cache, entry, nullptr);
4153 reader.print_on(st);
4154 }
4155 }
4156 }
4157
4158 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4159 LogStreamHandle(Info, aot, codecache, init) info;
4160 if (info.is_enabled()) {
4161 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4162 if (entry->is_nmethod() && !entry->is_loaded()) {
4163 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4164 if (mtd != nullptr) {
4165 if (mtd->has_holder()) {
4166 if (mtd->holder()->method_holder()->is_initialized()) {
4167 ResourceMark rm;
4168 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4169 if ((uint)ctd->level() == entry->comp_level()) {
4170 if (ctd->init_deps_left_acquire() == 0) {
4171 nmethod* nm = mtd->holder()->code();
4172 if (nm == nullptr) {
4173 if (mtd->holder()->queued_for_compilation()) {
4174 return; // scheduled for compilation
4175 }
4176 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4177 return; // already online compiled and superseded by a more optimal method
4178 }
4179 info.print("AOT Code Cache entry not loaded: ");
4180 ctd->print_on(&info);
4181 info.cr();
4182 }
4183 }
4184 });
4185 } else {
4186 // not yet initialized
4187 }
4188 } else {
4189 info.print("AOT Code Cache entry doesn't have a holder: ");
4190 mtd->print_on(&info);
4191 info.cr();
4192 }
4193 }
4194 }
4195 });
4196 }
4197 }
4198
4199 void AOTCodeReader::print_on(outputStream* st) {
4200 uint entry_position = _entry->offset();
4201 set_read_position(entry_position);
4202
4203 // Read name
4204 uint name_offset = entry_position + _entry->name_offset();
4205 uint name_size = _entry->name_size(); // Includes '/0'
4206 const char* name = addr(name_offset);
4207
4208 st->print_cr(" name: %s", name);
4209 }
4210