1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/barrierSetAssembler.hpp"
55 #include "gc/shared/cardTableBarrierSet.hpp"
56 #include "gc/shared/gcConfig.hpp"
57 #include "logging/logStream.hpp"
58 #include "memory/memoryReserver.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/klass.inline.hpp"
61 #include "oops/method.inline.hpp"
62 #include "oops/trainingData.hpp"
63 #include "prims/jvmtiThreadState.hpp"
64 #include "runtime/atomicAccess.hpp"
65 #include "runtime/deoptimization.hpp"
66 #include "runtime/flags/flagSetting.hpp"
67 #include "runtime/globals_extension.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/java.hpp"
70 #include "runtime/jniHandles.inline.hpp"
71 #include "runtime/mountUnmountDisabler.hpp"
72 #include "runtime/mutexLocker.hpp"
73 #include "runtime/objectMonitorTable.hpp"
74 #include "runtime/os.inline.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/stubCodeGenerator.hpp"
77 #include "runtime/stubRoutines.hpp"
78 #include "runtime/threadIdentifier.hpp"
79 #include "runtime/timerTrace.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/formatBuffer.hpp"
82 #include "utilities/ostream.hpp"
83 #include "utilities/spinYield.hpp"
84 #ifdef COMPILER1
85 #include "c1/c1_LIRAssembler.hpp"
86 #include "c1/c1_Runtime1.hpp"
87 #include "gc/g1/c1/g1BarrierSetC1.hpp"
88 #include "gc/shared/c1/barrierSetC1.hpp"
89 #if INCLUDE_SHENANDOAHGC
90 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
91 #endif // INCLUDE_SHENANDOAHGC
92 #include "gc/z/c1/zBarrierSetC1.hpp"
93 #endif // COMPILER1
94 #ifdef COMPILER2
95 #include "opto/runtime.hpp"
96 #endif
97 #if INCLUDE_JVMCI
98 #include "jvmci/jvmci.hpp"
99 #endif
100 #if INCLUDE_G1GC
101 #include "gc/g1/g1BarrierSetRuntime.hpp"
102 #include "gc/g1/g1HeapRegion.hpp"
103 #endif
104 #if INCLUDE_SHENANDOAHGC
105 #include "gc/shenandoah/shenandoahRuntime.hpp"
106 #endif
107 #if INCLUDE_ZGC
108 #include "gc/z/zBarrierSetRuntime.hpp"
109 #endif
110 #if defined(X86) && !defined(ZERO)
111 #include "rdtsc_x86.hpp"
112 #endif
113
114 #include <errno.h>
115 #include <sys/stat.h>
116
117 const char* aot_code_entry_kind_name[] = {
118 #define DECL_KIND_STRING(kind) XSTR(kind),
119 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
120 #undef DECL_KIND_STRING
121 };
122
123 static elapsedTimer _t_totalLoad;
124 static elapsedTimer _t_totalPreload;
125 static elapsedTimer _t_totalRegister;
126 static elapsedTimer _t_totalFind;
127 static elapsedTimer _t_totalStore;
128
129 static bool enable_timers() {
130 return CITime || log_is_enabled(Info, init);
131 }
132
133 static void report_load_failure() {
134 if (AbortVMOnAOTCodeFailure) {
135 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
136 }
137 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
138 AOTCodeCache::disable_caching();
139 }
140
141 static void report_store_failure() {
142 if (AbortVMOnAOTCodeFailure) {
143 tty->print_cr("Unable to create AOT Code Cache.");
144 vm_abort(false);
145 }
146 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
147 AOTCodeCache::disable_caching();
148 }
149
150 // The sequence of AOT code caching flags and parametters settings.
151 //
152 // 1. The initial AOT code caching flags setting is done
153 // during call to CDSConfig::check_vm_args_consistency().
154 //
155 // 2. The earliest AOT code state check done in compilationPolicy_init()
156 // where we set number of compiler threads for AOT assembly phase.
157 //
158 // 3. We determine presence of AOT code in AOT Cache in
159 // AOTMetaspace::open_static_archive() which is calles
160 // after compilationPolicy_init() but before codeCache_init().
161 //
162 // 4. AOTCodeCache::initialize() is called during universe_init()
163 // and does final AOT state and flags settings.
164 //
165 // 5. Finally AOTCodeCache::init2() is called after universe_init()
166 // when all GC settings are finalized.
167
168 // Next methods determine which action we do with AOT code depending
169 // on phase of AOT process: assembly or production.
170
171 bool AOTCodeCache::is_dumping_adapter() {
172 return AOTAdapterCaching && is_on_for_dump();
173 }
174
175 bool AOTCodeCache::is_using_adapter() {
176 return AOTAdapterCaching && is_on_for_use();
177 }
178
179 bool AOTCodeCache::is_dumping_stub() {
180 return AOTStubCaching && is_on_for_dump();
181 }
182
183 bool AOTCodeCache::is_using_stub() {
184 return AOTStubCaching && is_on_for_use();
185 }
186
187 bool AOTCodeCache::is_dumping_code() {
188 return AOTCodeCaching && is_on_for_dump();
189 }
190
191 bool AOTCodeCache::is_using_code() {
192 return AOTCodeCaching && is_on_for_use();
193 }
194
195 // This is used before AOTCodeCahe is initialized
196 // but after AOT (CDS) Cache flags consistency is checked.
197 bool AOTCodeCache::maybe_dumping_code() {
198 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
199 }
200
201 // Next methods could be called regardless of AOT code cache status.
202 // Initially they are called during AOT flags parsing and finilized
203 // in AOTCodeCache::initialize().
204 void AOTCodeCache::enable_caching() {
205 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
206 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
207 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
208 }
209
210 void AOTCodeCache::disable_caching() {
211 FLAG_SET_ERGO(AOTCodeCaching, false);
212 FLAG_SET_ERGO(AOTStubCaching, false);
213 FLAG_SET_ERGO(AOTAdapterCaching, false);
214 }
215
216 bool AOTCodeCache::is_caching_enabled() {
217 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
218 }
219
220 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
221 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
222 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
223 // becasue both id and kind are used to find an entry, and that combination should be unique
224 if (kind == AOTCodeEntry::Adapter) {
225 return id;
226 } else if (kind == AOTCodeEntry::SharedBlob) {
227 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
228 return id;
229 } else if (kind == AOTCodeEntry::C1Blob) {
230 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
231 return id;
232 } else {
233 // kind must be AOTCodeEntry::C2Blob
234 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
235 return id;
236 }
237 }
238
239 static uint _max_aot_code_size = 0;
240 uint AOTCodeCache::max_aot_code_size() {
241 return _max_aot_code_size;
242 }
243
244 bool AOTCodeCache::is_code_load_thread_on() {
245 return UseAOTCodeLoadThread && AOTCodeCaching;
246 }
247
248 bool AOTCodeCache::allow_const_field(ciConstant& value) {
249 ciEnv* env = CURRENT_ENV;
250 precond(env != nullptr);
251 assert(!env->is_aot_compile() || is_dumping_code(), "AOT compilation should be enabled");
252 return !env->is_aot_compile() // Restrict only when we generate AOT code
253 // Can not trust primitive too || !is_reference_type(value.basic_type())
254 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
255 ;
256 }
257
258 // It is called from AOTMetaspace::initialize_shared_spaces()
259 // which is called from universe_init().
260 // At this point all AOT class linking seetings are finilized
261 // and AOT cache is open so we can map AOT code region.
262 void AOTCodeCache::initialize() {
263 if (!is_caching_enabled()) {
264 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
265 return;
266 }
267 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
268 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
269 disable_caching();
270 return;
271 #else
272 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
273
274 // Disable stubs caching until JDK-8357398 is fixed.
275 FLAG_SET_ERGO(AOTStubCaching, false);
276
277 if (VerifyOops) {
278 // Disable AOT stubs caching when VerifyOops flag is on.
279 // Verify oops code generated a lot of C strings which overflow
280 // AOT C string table (which has fixed size).
281 // AOT C string table will be reworked later to handle such cases.
282 //
283 // Note: AOT adapters are not affected - they don't have oop operations.
284 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
285 FLAG_SET_ERGO(AOTStubCaching, false);
286 }
287
288 bool is_dumping = false;
289 bool is_using = false;
290 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
291 is_dumping = is_caching_enabled();
292 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
293 is_using = is_caching_enabled();
294 }
295 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
296 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
297 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
298 }
299 if (!(is_dumping || is_using)) {
300 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
301 disable_caching();
302 return; // AOT code caching disabled on command line
303 }
304 // Reserve AOT Cache region when we dumping AOT code.
305 _max_aot_code_size = AOTCodeMaxSize;
306 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
307 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
308 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
309 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
310 }
311 }
312 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
313 if (is_using && aot_code_size == 0) {
314 log_info(aot, codecache, init)("AOT Code Cache is empty");
315 disable_caching();
316 return;
317 }
318 if (!open_cache(is_dumping, is_using)) {
319 if (is_using) {
320 report_load_failure();
321 } else {
322 report_store_failure();
323 }
324 return;
325 }
326 if (is_dumping) {
327 FLAG_SET_DEFAULT(FoldStableValues, false);
328 FLAG_SET_DEFAULT(ForceUnreachable, true);
329 }
330 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
331 #endif // defined(AMD64) || defined(AARCH64)
332 }
333
334 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
335 AOTCodeCache* AOTCodeCache::_cache = nullptr;
336 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
337
338 // It is called after universe_init() when all GC settings are finalized.
339 void AOTCodeCache::init2() {
340 DEBUG_ONLY( _passed_init2 = true; )
341 if (opened_cache == nullptr) {
342 return;
343 }
344 // After Universe initialized
345 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
346 delete opened_cache;
347 opened_cache = nullptr;
348 report_load_failure();
349 return;
350 }
351
352 // initialize aot runtime constants as appropriate to this runtime
353 AOTRuntimeConstants::initialize_from_runtime();
354
355 // initialize the table of external routines and initial stubs so we can save
356 // generated code blobs that reference them
357 AOTCodeAddressTable* table = opened_cache->_table;
358 assert(table != nullptr, "should be initialized already");
359 table->init_extrs();
360
361 // Now cache and address table are ready for AOT code generation
362 _cache = opened_cache;
363
364 // Set ClassInitBarrierMode after all checks since it affects code generation
365 if (is_dumping_code()) {
366 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
367 } else {
368 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
369 }
370 }
371
372 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
373 opened_cache = new AOTCodeCache(is_dumping, is_using);
374 if (opened_cache->failed()) {
375 delete opened_cache;
376 opened_cache = nullptr;
377 return false;
378 }
379 return true;
380 }
381
382 static void print_helper(nmethod* nm, outputStream* st) {
383 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
384 if (e->method() == nm->method()) {
385 ResourceMark rm;
386 stringStream ss;
387 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
388 ss.print("[%s%s%s]",
389 (e->is_loaded() ? "L" : ""),
390 (e->load_fail() ? "F" : ""),
391 (e->not_entrant() ? "I" : ""));
392 ss.print("#%d", e->comp_id());
393
394 st->print(" %s", ss.freeze());
395 }
396 });
397 }
398
399 void AOTCodeCache::close() {
400 if (is_on()) {
401 delete _cache; // Free memory
402 _cache = nullptr;
403 opened_cache = nullptr;
404 }
405 }
406
407 class CachedCodeDirectory {
408 public:
409 uint _aot_code_size;
410 char* _aot_code_data;
411
412 void set_aot_code_data(uint size, char* aot_data) {
413 _aot_code_size = size;
414 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
415 }
416
417 static CachedCodeDirectory* create();
418 };
419
420 // Storing AOT code in the AOT code region (ac) of AOT Cache:
421 //
422 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
423 // E.g., you can build a hashtable to record what methods have been archived.
424 //
425 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
426 // allocated using AOTCacheAccess::allocate_aot_code_region().
427 //
428 // [3] CachedCodeDirectory must be the very first allocation.
429 //
430 // [4] Two kinds of pointer can be stored:
431 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
432 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
433 // (It's OK to point to an interior location within this buffer).
434 // Such pointers must be stored using AOTCacheAccess::set_pointer()
435 //
436 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
437 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
438 // (e.g., to account for the runtime location of the CodeCache).
439 //
440 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
441 static CachedCodeDirectory* _aot_code_directory = nullptr;
442
443 CachedCodeDirectory* CachedCodeDirectory::create() {
444 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
445 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
446 return dir;
447 }
448
449 #define DATA_ALIGNMENT HeapWordSize
450
451 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
452 _load_header(nullptr),
453 _load_buffer(nullptr),
454 _store_buffer(nullptr),
455 _C_store_buffer(nullptr),
456 _write_position(0),
457 _load_size(0),
458 _store_size(0),
459 _for_use(is_using),
460 _for_dump(is_dumping),
461 _closing(false),
462 _failed(false),
463 _lookup_failed(false),
464 _for_preload(false),
465 _has_clinit_barriers(false),
466 _table(nullptr),
467 _load_entries(nullptr),
468 _search_entries(nullptr),
469 _store_entries(nullptr),
470 _C_strings_buf(nullptr),
471 _store_entries_cnt(0),
472 _compile_id(0),
473 _comp_level(0)
474 {
475 // Read header at the begining of cache
476 if (_for_use) {
477 // Read cache
478 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
479 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
480 if (!rs.is_reserved()) {
481 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
482 set_failed();
483 return;
484 }
485 if (!AOTCacheAccess::map_aot_code_region(rs)) {
486 log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
487 set_failed();
488 return;
489 }
490 _aot_code_directory = (CachedCodeDirectory*)rs.base();
491
492 _load_size = _aot_code_directory->_aot_code_size;
493 _load_buffer = _aot_code_directory->_aot_code_data;
494 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
495 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
496
497 _load_header = (Header*)addr(0);
498 if (!_load_header->verify(_load_size)) {
499 set_failed();
500 return;
501 }
502 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
503 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Adapter], _load_header->adapters_count());
504 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::SharedBlob], _load_header->shared_blobs_count());
505 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C1Blob], _load_header->C1_blobs_count());
506 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C2Blob], _load_header->C2_blobs_count());
507 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Stub], _load_header->stubs_count());
508 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Nmethod], _load_header->nmethods_count());
509 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
510
511 // Read strings
512 load_strings();
513 }
514 if (_for_dump) {
515 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
516 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
517 // Entries allocated at the end of buffer in reverse (as on stack).
518 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
519 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
520 }
521 _table = new AOTCodeAddressTable();
522 }
523
524 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
525 // This could be concurent execution
526 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
527 _cache->invalidate_entry(entry);
528 }
529 }
530
531 void AOTCodeCache::init_early_stubs_table() {
532 AOTCodeAddressTable* table = addr_table();
533 if (table != nullptr) {
534 table->init_early_stubs();
535 }
536 }
537
538 void AOTCodeCache::init_shared_blobs_table() {
539 AOTCodeAddressTable* table = addr_table();
540 if (table != nullptr) {
541 table->init_shared_blobs();
542 }
543 }
544
545 void AOTCodeCache::init_stubs_table() {
546 AOTCodeAddressTable* table = addr_table();
547 if (table != nullptr) {
548 table->init_stubs();
549 }
550 }
551
552 void AOTCodeCache::init_early_c1_table() {
553 AOTCodeAddressTable* table = addr_table();
554 if (table != nullptr) {
555 table->init_early_c1();
556 }
557 }
558
559 void AOTCodeCache::init_c1_table() {
560 AOTCodeAddressTable* table = addr_table();
561 if (table != nullptr) {
562 table->init_c1();
563 }
564 }
565
566 void AOTCodeCache::init_c2_table() {
567 AOTCodeAddressTable* table = addr_table();
568 if (table != nullptr) {
569 table->init_c2();
570 }
571 }
572
573 AOTCodeCache::~AOTCodeCache() {
574 if (_closing) {
575 return; // Already closed
576 }
577 // Stop any further access to cache.
578 // Checked on entry to load_nmethod() and store_nmethod().
579 _closing = true;
580 if (_for_use) {
581 // Wait for all load_nmethod() finish.
582 wait_for_no_nmethod_readers();
583 }
584 // Prevent writing code into cache while we are closing it.
585 // This lock held by ciEnv::register_method() which calls store_nmethod().
586 MutexLocker ml(Compile_lock);
587 if (for_dump()) { // Finalize cache
588 finish_write();
589 }
590 _load_buffer = nullptr;
591 if (_C_store_buffer != nullptr) {
592 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
593 _C_store_buffer = nullptr;
594 _store_buffer = nullptr;
595 }
596 if (_table != nullptr) {
597 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
598 delete _table;
599 _table = nullptr;
600 }
601 }
602
603 void AOTCodeCache::Config::record(uint cpu_features_offset) {
604 _flags = 0;
605 #ifdef ASSERT
606 _flags |= debugVM;
607 #endif
608 if (UseCompressedOops) {
609 _flags |= compressedOops;
610 }
611 if (UseCompressedClassPointers) {
612 _flags |= compressedClassPointers;
613 }
614 if (UseTLAB) {
615 _flags |= useTLAB;
616 }
617 if (JavaAssertions::systemClassDefault()) {
618 _flags |= systemClassAssertions;
619 }
620 if (JavaAssertions::userClassDefault()) {
621 _flags |= userClassAssertions;
622 }
623 if (EnableContended) {
624 _flags |= enableContendedPadding;
625 }
626 if (RestrictContended) {
627 _flags |= restrictContendedPadding;
628 }
629 if (PreserveFramePointer) {
630 _flags |= preserveFramePointer;
631 }
632 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
633 _compressedOopShift = CompressedOops::shift();
634 _compressedOopBase = CompressedOops::base();
635 _compressedKlassShift = CompressedKlassPointers::shift();
636 _compressedKlassBase = CompressedKlassPointers::base();
637 _contendedPaddingWidth = ContendedPaddingWidth;
638 _objectAlignment = ObjectAlignmentInBytes;
639 _gcCardSize = GCCardSizeInBytes;
640 _gc = (uint)Universe::heap()->kind();
641 _maxVectorSize = MaxVectorSize;
642 _arrayOperationPartialInlineSize = ArrayOperationPartialInlineSize;
643 _allocatePrefetchLines = AllocatePrefetchLines;
644 _allocateInstancePrefetchLines = AllocateInstancePrefetchLines;
645 _allocatePrefetchDistance = AllocatePrefetchDistance;
646 _allocatePrefetchStepSize = AllocatePrefetchStepSize;
647 _cpu_features_offset = cpu_features_offset;
648 }
649
650 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
651 LogStreamHandle(Debug, aot, codecache, init) log;
652 uint offset = _cpu_features_offset;
653 uint cpu_features_size = *(uint *)cache->addr(offset);
654 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
655 offset += sizeof(uint);
656
657 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
658 if (log.is_enabled()) {
659 ResourceMark rm; // required for stringStream::as_string()
660 stringStream ss;
661 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
662 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
663 }
664
665 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
666 if (log.is_enabled()) {
667 ResourceMark rm; // required for stringStream::as_string()
668 stringStream ss;
669 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
670 VM_Version::store_cpu_features(runtime_cpu_features);
671 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
672 if (!ss.is_empty()) {
673 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
674 }
675 }
676 } else {
677 if (log.is_enabled()) {
678 ResourceMark rm; // required for stringStream::as_string()
679 stringStream ss;
680 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
681 VM_Version::store_cpu_features(runtime_cpu_features);
682 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
683 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
684 }
685 return false;
686 }
687 return true;
688 }
689
690 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
691 // First checks affect all cached AOT code
692 #ifdef ASSERT
693 if ((_flags & debugVM) == 0) {
694 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
695 return false;
696 }
697 #else
698 if ((_flags & debugVM) != 0) {
699 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
700 return false;
701 }
702 #endif
703
704 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
705 if (codeCacheSize > _codeCacheSize) { // Only allow smaller or equal CodeCache size in production run
706 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
707 return false;
708 }
709
710 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
711 if (aot_gc != Universe::heap()->kind()) {
712 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
713 return false;
714 }
715
716 // We don't need to cache CardTable::card_shift() if GCCardSizeInBytes stay the same
717 if (_gcCardSize != (uint)GCCardSizeInBytes) {
718 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with GCCardSizeInBytes = %d vs current %d", _gcCardSize, GCCardSizeInBytes);
719 return false;
720 }
721
722 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
723 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
724 return false;
725 }
726
727 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
728 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (enableContendedPadding ? "false" : "true"), (EnableContended ? "true" : "false"));
729 return false;
730 }
731 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
732 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (restrictContendedPadding ? "false" : "true"), (RestrictContended ? "true" : "false"));
733 return false;
734 }
735 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
736 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
737 return false;
738 }
739
740 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
741 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (preserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
742 return false;
743 }
744
745 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
746 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (compressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
747 return false;
748 }
749 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
750 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
751 return false;
752 }
753 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
754 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
755 return false;
756 }
757
758 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
759 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (compressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
760 return false;
761 }
762 if (_compressedOopShift != (uint)CompressedOops::shift()) {
763 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
764 return false;
765 }
766 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
767 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
768 return false;
769 }
770
771 // Some of the following checked flags depend on CPU features. Check CPU first.
772 if (!verify_cpu_features(cache)) {
773 return false;
774 }
775
776 // TLAB related flags
777 if (((_flags & useTLAB) != 0) != UseTLAB) {
778 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseTLAB = %s vs current %s", (useTLAB ? "false" : "true"), (UseTLAB ? "true" : "false"));
779 return false;
780 }
781 if (_allocatePrefetchLines != (uint)AllocatePrefetchLines) {
782 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchLines = %d vs current %d", _allocatePrefetchLines, AllocatePrefetchLines);
783 return false;
784 }
785 if (_allocateInstancePrefetchLines != (uint)AllocateInstancePrefetchLines) {
786 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocateInstancePrefetchLines = %d vs current %d", _allocateInstancePrefetchLines, AllocateInstancePrefetchLines);
787 return false;
788 }
789 if (_allocatePrefetchDistance != (uint)AllocatePrefetchDistance) {
790 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchDistance = %d vs current %d", _allocatePrefetchDistance, AllocatePrefetchDistance);
791 return false;
792 }
793 if (_allocatePrefetchStepSize != (uint)AllocatePrefetchStepSize) {
794 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchStepSize = %d vs current %d", _allocatePrefetchStepSize, AllocatePrefetchStepSize);
795 return false;
796 }
797
798 // Vectorization and intrinsics related flags
799 if (_maxVectorSize != (uint)MaxVectorSize) {
800 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with MaxVectorSize = %d vs current %d", _maxVectorSize, (uint)MaxVectorSize);
801 return false;
802 }
803 if (_arrayOperationPartialInlineSize != (uint)ArrayOperationPartialInlineSize) {
804 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ArrayOperationPartialInlineSize = %d vs current %d", _arrayOperationPartialInlineSize, (uint)ArrayOperationPartialInlineSize);
805 return false;
806 }
807
808 // Next affects only AOT nmethod
809 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
810 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (systemClassAssertions ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
811 FLAG_SET_ERGO(AOTCodeCaching, false);
812 }
813 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
814 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (userClassAssertions ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
815 FLAG_SET_ERGO(AOTCodeCaching, false);
816 }
817 return true;
818 }
819
820 bool AOTCodeCache::Header::verify(uint load_size) const {
821 if (_version != AOT_CODE_VERSION) {
822 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
823 return false;
824 }
825 if (load_size < _cache_size) {
826 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
827 return false;
828 }
829 return true;
830 }
831
832 volatile int AOTCodeCache::_nmethod_readers = 0;
833
834 AOTCodeCache* AOTCodeCache::open_for_use() {
835 if (AOTCodeCache::is_on_for_use()) {
836 return AOTCodeCache::cache();
837 }
838 return nullptr;
839 }
840
841 AOTCodeCache* AOTCodeCache::open_for_dump() {
842 if (AOTCodeCache::is_on_for_dump()) {
843 AOTCodeCache* cache = AOTCodeCache::cache();
844 cache->clear_lookup_failed(); // Reset bit
845 return cache;
846 }
847 return nullptr;
848 }
849
850 bool AOTCodeCache::is_address_in_aot_cache(address p) {
851 AOTCodeCache* cache = open_for_use();
852 if (cache == nullptr) {
853 return false;
854 }
855 if ((p >= (address)cache->cache_buffer()) &&
856 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
857 return true;
858 }
859 return false;
860 }
861
862 static void copy_bytes(const char* from, address to, uint size) {
863 assert((int)size > 0, "sanity");
864 memcpy(to, from, size);
865 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
866 }
867
868 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
869 _cache = cache;
870 _entry = entry;
871 _load_buffer = cache->cache_buffer();
872 _read_position = 0;
873 if (task != nullptr) {
874 _compile_id = task->compile_id();
875 _comp_level = task->comp_level();
876 _preload = task->preload();
877 } else {
878 _compile_id = 0;
879 _comp_level = 0;
880 _preload = false;
881 }
882 _lookup_failed = false;
883 }
884
885 void AOTCodeReader::set_read_position(uint pos) {
886 if (pos == _read_position) {
887 return;
888 }
889 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
890 _read_position = pos;
891 }
892
893 bool AOTCodeCache::set_write_position(uint pos) {
894 if (pos == _write_position) {
895 return true;
896 }
897 if (_store_size < _write_position) {
898 _store_size = _write_position; // Adjust during write
899 }
900 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
901 _write_position = pos;
902 return true;
903 }
904
905 static char align_buffer[256] = { 0 };
906
907 bool AOTCodeCache::align_write() {
908 // We are not executing code from cache - we copy it by bytes first.
909 // No need for big alignment (or at all).
910 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
911 if (padding == DATA_ALIGNMENT) {
912 return true;
913 }
914 uint n = write_bytes((const void*)&align_buffer, padding);
915 if (n != padding) {
916 return false;
917 }
918 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
919 return true;
920 }
921
922 // Check to see if AOT code cache has required space to store "nbytes" of data
923 address AOTCodeCache::reserve_bytes(uint nbytes) {
924 assert(for_dump(), "Code Cache file is not created");
925 uint new_position = _write_position + nbytes;
926 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
927 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
928 nbytes, _write_position);
929 set_failed();
930 report_store_failure();
931 return nullptr;
932 }
933 address buffer = (address)(_store_buffer + _write_position);
934 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
935 _write_position += nbytes;
936 if (_store_size < _write_position) {
937 _store_size = _write_position;
938 }
939 return buffer;
940 }
941
942 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
943 assert(for_dump(), "Code Cache file is not created");
944 if (nbytes == 0) {
945 return 0;
946 }
947 uint new_position = _write_position + nbytes;
948 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
949 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
950 nbytes, _write_position);
951 set_failed();
952 report_store_failure();
953 return 0;
954 }
955 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
956 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
957 _write_position += nbytes;
958 if (_store_size < _write_position) {
959 _store_size = _write_position;
960 }
961 return nbytes;
962 }
963
964 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
965 assert(is_using_code(), "AOT code caching should be enabled");
966 if (!method->in_aot_cache()) {
967 return nullptr;
968 }
969
970 MethodCounters* mc = method->method_counters();
971 if (mc != nullptr && mc->aot_code_recompile_requested()) {
972 return nullptr; // Already requested JIT compilation
973 }
974
975 switch (comp_level) {
976 case CompLevel_simple:
977 if ((DisableAOTCode & (1 << 0)) != 0) {
978 return nullptr;
979 }
980 break;
981 case CompLevel_limited_profile:
982 if ((DisableAOTCode & (1 << 1)) != 0) {
983 return nullptr;
984 }
985 break;
986 case CompLevel_full_optimization:
987 if ((DisableAOTCode & (1 << 2)) != 0) {
988 return nullptr;
989 }
990 break;
991
992 default: return nullptr; // Level 1, 2, and 4 only
993 }
994 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
995 if (is_on() && _cache->cache_buffer() != nullptr) {
996 uint id = AOTCacheAccess::convert_method_to_offset(method());
997 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Nmethod, id, comp_level);
998 if (entry == nullptr) {
999 LogStreamHandle(Info, aot, codecache, nmethod) log;
1000 if (log.is_enabled()) {
1001 ResourceMark rm;
1002 const char* target_name = method->name_and_sig_as_C_string();
1003 log.print("Missing entry for '%s' (comp_level %d, id: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, id);
1004 }
1005 #ifdef ASSERT
1006 } else {
1007 assert(!entry->has_clinit_barriers(), "only preload code should have clinit barriers");
1008 ResourceMark rm;
1009 assert(method() == entry->method(), "AOTCodeCache: saved nmethod's method %p (name: %s id: " UINT32_FORMAT_X_0
1010 ") is different from the method %p (name: %s, id: " UINT32_FORMAT_X_0 " being looked up" ,
1011 entry->method(), entry->method()->name_and_sig_as_C_string(), entry->id(), method(), method()->name_and_sig_as_C_string(), id);
1012 #endif
1013 }
1014
1015 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
1016 if (directives->IgnoreAOTCompiledOption || directives->ExcludeOption) {
1017 LogStreamHandle(Info, aot, codecache, compilation) log;
1018 if (log.is_enabled()) {
1019 log.print("Ignore AOT code entry on level %d for ", comp_level);
1020 method->print_value_on(&log);
1021 }
1022 return nullptr;
1023 }
1024
1025 return entry;
1026 }
1027 return nullptr;
1028 }
1029
1030 Method* AOTCodeEntry::method() {
1031 assert(_kind == Nmethod, "invalid kind %d", _kind);
1032 assert(AOTCodeCache::is_on_for_use(), "must be");
1033 return AOTCacheAccess::convert_offset_to_method(_id);
1034 }
1035
1036 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
1037 return (void*)(cache->add_entry());
1038 }
1039
1040 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
1041 if (entry->kind() == kind) {
1042 assert(entry->id() == id, "sanity");
1043 if (kind != AOTCodeEntry::Nmethod || // addapters and stubs have only one version
1044 // Look only for normal AOT code entry, preload code is handled separately
1045 (!entry->not_entrant() && (entry->comp_level() == comp_level))) {
1046 return true; // Found
1047 }
1048 }
1049 return false;
1050 }
1051
1052 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
1053 assert(_for_use, "sanity");
1054 uint count = _load_header->entries_count();
1055 if (_load_entries == nullptr) {
1056 // Read it
1057 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
1058 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
1059 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1060 }
1061 // Binary search
1062 int l = 0;
1063 int h = count - 1;
1064 while (l <= h) {
1065 int mid = (l + h) >> 1;
1066 int ix = mid * 2;
1067 uint is = _search_entries[ix];
1068 if (is == id) {
1069 int index = _search_entries[ix + 1];
1070 AOTCodeEntry* entry = &(_load_entries[index]);
1071 if (check_entry(kind, id, comp_level, entry)) {
1072 return entry; // Found
1073 }
1074 // Leaner search around
1075 for (int i = mid - 1; i >= l; i--) { // search back
1076 ix = i * 2;
1077 is = _search_entries[ix];
1078 if (is != id) {
1079 break;
1080 }
1081 index = _search_entries[ix + 1];
1082 AOTCodeEntry* entry = &(_load_entries[index]);
1083 if (check_entry(kind, id, comp_level, entry)) {
1084 return entry; // Found
1085 }
1086 }
1087 for (int i = mid + 1; i <= h; i++) { // search forward
1088 ix = i * 2;
1089 is = _search_entries[ix];
1090 if (is != id) {
1091 break;
1092 }
1093 index = _search_entries[ix + 1];
1094 AOTCodeEntry* entry = &(_load_entries[index]);
1095 if (check_entry(kind, id, comp_level, entry)) {
1096 return entry; // Found
1097 }
1098 }
1099 break; // No match found
1100 } else if (is < id) {
1101 l = mid + 1;
1102 } else {
1103 h = mid - 1;
1104 }
1105 }
1106 return nullptr;
1107 }
1108
1109 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1110 assert(entry!= nullptr, "all entries should be read already");
1111 if (entry->not_entrant()) {
1112 return; // Someone invalidated it already
1113 }
1114 #ifdef ASSERT
1115 assert(_load_entries != nullptr, "sanity");
1116 {
1117 uint name_offset = entry->offset() + entry->name_offset();
1118 const char* name = _load_buffer + name_offset;;
1119 uint level = entry->comp_level();
1120 uint comp_id = entry->comp_id();
1121 bool for_preload = entry->for_preload();
1122 bool clinit_brs = entry->has_clinit_barriers();
1123 log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1124 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1125 }
1126 assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1127 bool found = false;
1128 uint i = 0;
1129 uint count = 0;
1130 if (entry->for_preload()) {
1131 count = _load_header->preload_entries_count();
1132 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1133 for (; i < count; i++) {
1134 if (entry == &preload_entry[i]) {
1135 break;
1136 }
1137 }
1138 } else {
1139 count = _load_header->entries_count();
1140 for(; i < count; i++) {
1141 if (entry == &(_load_entries[i])) {
1142 break;
1143 }
1144 }
1145 }
1146 found = (i < count);
1147 assert(found, "entry should exist");
1148 #endif
1149 entry->set_not_entrant();
1150 uint name_offset = entry->offset() + entry->name_offset();
1151 const char* name = _load_buffer + name_offset;;
1152 uint level = entry->comp_level();
1153 uint comp_id = entry->comp_id();
1154 bool for_preload = entry->for_preload();
1155 bool clinit_brs = entry->has_clinit_barriers();
1156 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1157 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1158
1159 if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1160 // Invalidate preload code if normal AOT C2 code is invalidated,
1161 // most likely because some dependencies changed during run.
1162 // We can still use normal AOT code if preload code is
1163 // invalidated - normal AOT code has less restrictions.
1164 Method* method = entry->method();
1165 MethodCounters* mc = entry->method()->method_counters();
1166 if (mc != nullptr && mc->aot_preload_code_entry() != nullptr) {
1167 AOTCodeEntry* preload_entry = mc->aot_preload_code_entry();
1168 if (preload_entry != nullptr) {
1169 assert(preload_entry->for_preload(), "expecting only such entries here");
1170 invalidate_entry(preload_entry);
1171 }
1172 }
1173 }
1174 }
1175
1176 static int uint_cmp(const void *i, const void *j) {
1177 uint a = *(uint *)i;
1178 uint b = *(uint *)j;
1179 return a > b ? 1 : a < b ? -1 : 0;
1180 }
1181
1182 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1183 uint* size_ptr = (uint *)buffer;
1184 *size_ptr = buffer_size;
1185 buffer += sizeof(uint);
1186
1187 VM_Version::store_cpu_features(buffer);
1188 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1189 buffer += buffer_size;
1190 buffer = align_up(buffer, DATA_ALIGNMENT);
1191 }
1192
1193 bool AOTCodeCache::finish_write() {
1194 if (!align_write()) {
1195 return false;
1196 }
1197 // End of AOT code
1198 uint code_size = _write_position;
1199 uint strings_offset = code_size;
1200 int strings_count = store_strings();
1201 if (strings_count < 0) {
1202 return false;
1203 }
1204 if (!align_write()) {
1205 return false;
1206 }
1207 uint strings_size = _write_position - strings_offset;
1208
1209 uint code_count = _store_entries_cnt;
1210 if (code_count > 0) {
1211 _aot_code_directory = CachedCodeDirectory::create();
1212 assert(_aot_code_directory != nullptr, "Sanity check");
1213
1214 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1215 uint search_count = code_count * 2;
1216 uint search_size = search_count * sizeof(uint);
1217 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1218 // _write_position should include code and strings
1219 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1220 uint cpu_features_size = VM_Version::cpu_features_size();
1221 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1222 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
1223 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1224 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1225
1226 // Allocate in AOT Cache buffer
1227 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1228 char* start = align_up(buffer, DATA_ALIGNMENT);
1229 char* current = start + header_size; // Skip header
1230
1231 uint cpu_features_offset = current - start;
1232 store_cpu_features(current, cpu_features_size);
1233 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1234 assert(current < start + total_size, "sanity check");
1235
1236 // Create ordered search table for entries [id, index];
1237 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1238
1239 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1240 AOTCodeStats stats;
1241 uint max_size = 0;
1242 // AOTCodeEntry entries were allocated in reverse in store buffer.
1243 // Process them in reverse order to cache first code first.
1244
1245 // Store AOTCodeEntry-s for preload code
1246 current = align_up(current, DATA_ALIGNMENT);
1247 uint preload_entries_cnt = 0;
1248 uint preload_entries_offset = current - start;
1249 AOTCodeEntry* preload_entries = (AOTCodeEntry*)current;
1250 for (int i = code_count - 1; i >= 0; i--) {
1251 AOTCodeEntry* entry = &entries_address[i];
1252 if (entry->load_fail()) {
1253 continue;
1254 }
1255 if (entry->for_preload()) {
1256 if (entry->not_entrant()) {
1257 // Skip not entrant preload code:
1258 // we can't pre-load code which may have failing dependencies.
1259 log_info(aot, codecache, exit)("Skip not entrant preload code comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1260 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1261 } else {
1262 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1263 stats.collect_entry_stats(entry);
1264 current += sizeof(AOTCodeEntry);
1265 preload_entries_cnt++;
1266 }
1267 }
1268 }
1269
1270 // Now write the data for preload AOTCodeEntry
1271 for (int i = 0; i < (int)preload_entries_cnt; i++) {
1272 AOTCodeEntry* entry = &preload_entries[i];
1273 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1274 if (size > max_size) {
1275 max_size = size;
1276 }
1277 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1278 entry->set_offset(current - start); // New offset
1279 current += size;
1280 }
1281
1282 current = align_up(current, DATA_ALIGNMENT);
1283 uint entries_count = 0;
1284 uint new_entries_offset = current - start;
1285 AOTCodeEntry* code_entries = (AOTCodeEntry*)current;
1286 // Now scan normal entries
1287 for (int i = code_count - 1; i >= 0; i--) {
1288 AOTCodeEntry* entry = &entries_address[i];
1289 if (entry->load_fail() || entry->for_preload()) {
1290 continue;
1291 }
1292 if (entry->not_entrant()) {
1293 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1294 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1295 entry->set_entrant(); // Reset
1296 }
1297 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1298 stats.collect_entry_stats(entry);
1299 current += sizeof(AOTCodeEntry);
1300 search[entries_count*2 + 0] = entry->id();
1301 search[entries_count*2 + 1] = entries_count;
1302 entries_count++;
1303 }
1304
1305 // Now write the data for normal AOTCodeEntry
1306 for (int i = 0; i < (int)entries_count; i++) {
1307 AOTCodeEntry* entry = &code_entries[i];
1308 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1309 if (size > max_size) {
1310 max_size = size;
1311 }
1312 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1313 entry->set_offset(current - start); // New offset
1314 current += size;
1315 }
1316
1317 if (preload_entries_cnt == 0 && entries_count == 0) {
1318 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entries");
1319 FREE_C_HEAP_ARRAY(uint, search);
1320 return true; // Nothing to write
1321 }
1322 uint total_entries_cnt = preload_entries_cnt + entries_count;
1323 assert(total_entries_cnt <= code_count, "%d > %d", total_entries_cnt, code_count);
1324 // Write strings
1325 if (strings_count > 0) {
1326 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1327 strings_offset = (current - start); // New offset
1328 current += strings_size;
1329 }
1330
1331 uint search_table_offset = current - start;
1332 // Sort and store search table
1333 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1334 search_size = 2 * entries_count * sizeof(uint);
1335 copy_bytes((const char*)search, (address)current, search_size);
1336 FREE_C_HEAP_ARRAY(uint, search);
1337 current += search_size;
1338
1339 log_stats_on_exit(stats);
1340
1341 uint size = (current - start);
1342 assert(size <= total_size, "%d > %d", size , total_size);
1343 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes", size);
1344 log_debug(aot, codecache, exit)(" header size: %u", header_size);
1345 log_debug(aot, codecache, exit)(" total code size: %u (max code's size: %u)", code_size, max_size);
1346 log_debug(aot, codecache, exit)(" entries size: %u", entries_size);
1347 log_debug(aot, codecache, exit)(" entry search table: %u", search_size);
1348 log_debug(aot, codecache, exit)(" C strings size: %u", strings_size);
1349 log_debug(aot, codecache, exit)(" CPU features data: %u", total_cpu_features_size);
1350
1351 // Finalize header
1352 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1353 header->init(size, (uint)strings_count, strings_offset,
1354 entries_count, search_table_offset, new_entries_offset,
1355 preload_entries_cnt, preload_entries_offset,
1356 stats.entry_count(AOTCodeEntry::Adapter), stats.entry_count(AOTCodeEntry::SharedBlob),
1357 stats.entry_count(AOTCodeEntry::C1Blob), stats.entry_count(AOTCodeEntry::C2Blob),
1358 stats.entry_count(AOTCodeEntry::Stub), cpu_features_offset);
1359
1360 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", total_entries_cnt);
1361
1362 _aot_code_directory->set_aot_code_data(size, start);
1363 }
1364 return true;
1365 }
1366
1367 //------------------Store/Load AOT code ----------------------
1368
1369 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1370 AOTCodeCache* cache = open_for_dump();
1371 if (cache == nullptr) {
1372 return false;
1373 }
1374 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1375
1376 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1377 return false;
1378 }
1379 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1380 return false;
1381 }
1382 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1383
1384 #ifdef ASSERT
1385 LogStreamHandle(Trace, aot, codecache, stubs) log;
1386 if (log.is_enabled()) {
1387 FlagSetting fs(PrintRelocations, true);
1388 blob.print_on(&log);
1389 }
1390 #endif
1391 // we need to take a lock to prevent race between compiler threads generating AOT code
1392 // and the main thread generating adapter
1393 MutexLocker ml(Compile_lock);
1394 if (!is_on()) {
1395 return false; // AOT code cache was already dumped and closed.
1396 }
1397 if (!cache->align_write()) {
1398 return false;
1399 }
1400 uint entry_position = cache->_write_position;
1401
1402 // Write name
1403 uint name_offset = cache->_write_position - entry_position;
1404 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1405 uint n = cache->write_bytes(name, name_size);
1406 if (n != name_size) {
1407 return false;
1408 }
1409
1410 // Write CodeBlob
1411 if (!cache->align_write()) {
1412 return false;
1413 }
1414 uint blob_offset = cache->_write_position - entry_position;
1415 address archive_buffer = cache->reserve_bytes(blob.size());
1416 if (archive_buffer == nullptr) {
1417 return false;
1418 }
1419 CodeBlob::archive_blob(&blob, archive_buffer);
1420
1421 uint reloc_data_size = blob.relocation_size();
1422 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1423 if (n != reloc_data_size) {
1424 return false;
1425 }
1426
1427 bool has_oop_maps = false;
1428 if (blob.oop_maps() != nullptr) {
1429 if (!cache->write_oop_map_set(blob)) {
1430 return false;
1431 }
1432 has_oop_maps = true;
1433 }
1434
1435 #ifndef PRODUCT
1436 // Write asm remarks
1437 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1438 return false;
1439 }
1440 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1441 return false;
1442 }
1443 #endif /* PRODUCT */
1444
1445 if (!cache->write_relocations(blob)) {
1446 if (!cache->failed()) {
1447 // We may miss an address in AOT table - skip this code blob.
1448 cache->set_write_position(entry_position);
1449 }
1450 return false;
1451 }
1452
1453 uint entry_size = cache->_write_position - entry_position;
1454 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1455 entry_position, entry_size, name_offset, name_size,
1456 blob_offset, has_oop_maps);
1457 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1458 return true;
1459 }
1460
1461 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1462 assert(AOTCodeEntry::is_blob(entry_kind),
1463 "wrong entry kind for blob id %s", StubInfo::name(id));
1464 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1465 }
1466
1467 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1468 AOTCodeCache* cache = open_for_use();
1469 if (cache == nullptr) {
1470 return nullptr;
1471 }
1472 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1473
1474 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1475 return nullptr;
1476 }
1477 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1478 return nullptr;
1479 }
1480 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1481
1482 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1483 if (entry == nullptr) {
1484 return nullptr;
1485 }
1486 AOTCodeReader reader(cache, entry, nullptr);
1487 CodeBlob* blob = reader.compile_code_blob(name);
1488
1489 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1490 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1491 return blob;
1492 }
1493
1494 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1495 assert(AOTCodeEntry::is_blob(entry_kind),
1496 "wrong entry kind for blob id %s", StubInfo::name(id));
1497 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1498 }
1499
1500 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1501 uint entry_position = _entry->offset();
1502
1503 // Read name
1504 uint name_offset = entry_position + _entry->name_offset();
1505 uint name_size = _entry->name_size(); // Includes '/0'
1506 const char* stored_name = addr(name_offset);
1507
1508 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1509 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1510 stored_name, name);
1511 set_lookup_failed(); // Skip this blob
1512 return nullptr;
1513 }
1514
1515 // Read archived code blob
1516 uint offset = entry_position + _entry->code_offset();
1517 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1518 offset += archived_blob->size();
1519
1520 address reloc_data = (address)addr(offset);
1521 offset += archived_blob->relocation_size();
1522 set_read_position(offset);
1523
1524 ImmutableOopMapSet* oop_maps = nullptr;
1525 if (_entry->has_oop_maps()) {
1526 oop_maps = read_oop_map_set();
1527 }
1528
1529 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1530 stored_name,
1531 reloc_data,
1532 oop_maps
1533 );
1534 if (code_blob == nullptr) { // no space left in CodeCache
1535 return nullptr;
1536 }
1537
1538 #ifndef PRODUCT
1539 code_blob->asm_remarks().init();
1540 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1541 code_blob->dbg_strings().init();
1542 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1543 #endif // PRODUCT
1544
1545 fix_relocations(code_blob);
1546
1547 #ifdef ASSERT
1548 LogStreamHandle(Trace, aot, codecache, stubs) log;
1549 if (log.is_enabled()) {
1550 FlagSetting fs(PrintRelocations, true);
1551 code_blob->print_on(&log);
1552 }
1553 #endif
1554 return code_blob;
1555 }
1556
1557 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1558 if (!is_dumping_stub()) {
1559 return false;
1560 }
1561 AOTCodeCache* cache = open_for_dump();
1562 if (cache == nullptr) {
1563 return false;
1564 }
1565 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1566 if (!cache->align_write()) {
1567 return false;
1568 }
1569 #ifdef ASSERT
1570 CodeSection* cs = cgen->assembler()->code_section();
1571 if (cs->has_locs()) {
1572 uint reloc_count = cs->locs_count();
1573 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1574 // Collect additional data
1575 RelocIterator iter(cs);
1576 while (iter.next()) {
1577 switch (iter.type()) {
1578 case relocInfo::none:
1579 break;
1580 default: {
1581 iter.print_current_on(tty);
1582 fatal("stub's relocation %d unimplemented", (int)iter.type());
1583 break;
1584 }
1585 }
1586 }
1587 }
1588 #endif
1589 uint entry_position = cache->_write_position;
1590
1591 // Write code
1592 uint code_offset = 0;
1593 uint code_size = cgen->assembler()->pc() - start;
1594 uint n = cache->write_bytes(start, code_size);
1595 if (n != code_size) {
1596 return false;
1597 }
1598 // Write name
1599 uint name_offset = cache->_write_position - entry_position;
1600 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1601 n = cache->write_bytes(name, name_size);
1602 if (n != name_size) {
1603 return false;
1604 }
1605 uint entry_size = cache->_write_position - entry_position;
1606 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1607 code_offset, code_size,
1608 AOTCodeEntry::Stub, (uint32_t)id);
1609 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1610 return true;
1611 }
1612
1613 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1614 if (!is_using_stub()) {
1615 return false;
1616 }
1617 assert(start == cgen->assembler()->pc(), "wrong buffer");
1618 AOTCodeCache* cache = open_for_use();
1619 if (cache == nullptr) {
1620 return false;
1621 }
1622 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1623 if (entry == nullptr) {
1624 return false;
1625 }
1626 uint entry_position = entry->offset();
1627 // Read name
1628 uint name_offset = entry->name_offset() + entry_position;
1629 uint name_size = entry->name_size(); // Includes '/0'
1630 const char* saved_name = cache->addr(name_offset);
1631 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1632 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1633 cache->set_failed();
1634 report_load_failure();
1635 return false;
1636 }
1637 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1638 // Read code
1639 uint code_offset = entry->code_offset() + entry_position;
1640 uint code_size = entry->code_size();
1641 copy_bytes(cache->addr(code_offset), start, code_size);
1642 cgen->assembler()->code_section()->set_end(start + code_size);
1643 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1644 return true;
1645 }
1646
1647 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1648 if (!is_dumping_code()) {
1649 return nullptr;
1650 }
1651 assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1652 AOTCodeCache* cache = open_for_dump();
1653 precond(cache != nullptr);
1654 precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1655 if (!compiler->is_c1() && !compiler->is_c2()) {
1656 // Only c1 and c2 compilers
1657 return nullptr;
1658 }
1659 int comp_level = nm->comp_level();
1660 if (comp_level == CompLevel_full_profile) {
1661 // Do not cache C1 compiles with full profile i.e. tier3
1662 return nullptr;
1663 }
1664 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1665
1666 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1667 AOTCodeEntry* entry = nullptr;
1668 entry = cache->write_nmethod(nm, for_preload);
1669 if (entry == nullptr) {
1670 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1671 }
1672 // Clean up fields which could be set here
1673 cache->_for_preload = false;
1674 cache->_has_clinit_barriers = false;
1675 return entry;
1676 }
1677
1678 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1679 AOTCodeCache* cache = open_for_dump();
1680 assert(cache != nullptr, "sanity check");
1681 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1682 uint comp_id = nm->compile_id();
1683 uint comp_level = nm->comp_level();
1684 Method* method = nm->method();
1685 if (!AOTCacheAccess::can_generate_aot_code(method)) {
1686 ResourceMark rm;
1687 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1688 assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1689 return nullptr;
1690 }
1691 InstanceKlass* holder = method->method_holder();
1692 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1693 if (!builtin_loader) {
1694 ResourceMark rm;
1695 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1696 assert(builtin_loader, "sanity");
1697 return nullptr;
1698 }
1699
1700 _for_preload = for_preload;
1701 _has_clinit_barriers = nm->has_clinit_barriers();
1702 assert(!_has_clinit_barriers || _for_preload, "only preload code has clinit barriers");
1703
1704 if (!align_write()) {
1705 return nullptr;
1706 }
1707
1708 uint entry_position = _write_position;
1709
1710 // Write name
1711 uint name_offset = 0;
1712 uint name_size = 0;
1713 uint id = 0;
1714 uint n;
1715 {
1716 ResourceMark rm;
1717 const char* name = method->name_and_sig_as_C_string();
1718 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1719 comp_id, (int)comp_level, name, comp_level,
1720 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1721
1722 LogStreamHandle(Info, aot, codecache, loader) log;
1723 if (log.is_enabled()) {
1724 oop loader = holder->class_loader();
1725 oop domain = holder->protection_domain();
1726 log.print("Holder: ");
1727 holder->print_value_on(&log);
1728 log.print(" loader: ");
1729 if (loader == nullptr) {
1730 log.print("nullptr");
1731 } else {
1732 loader->print_value_on(&log);
1733 }
1734 log.print(" domain: ");
1735 if (domain == nullptr) {
1736 log.print("nullptr");
1737 } else {
1738 domain->print_value_on(&log);
1739 }
1740 log.cr();
1741 }
1742 name_offset = _write_position - entry_position;
1743 name_size = (uint)strlen(name) + 1; // Includes '/0'
1744 n = write_bytes(name, name_size);
1745 if (n != name_size) {
1746 return nullptr;
1747 }
1748 }
1749 id = AOTCacheAccess::delta_from_base_address((address)nm->method());
1750
1751 // Write CodeBlob
1752 if (!cache->align_write()) {
1753 return nullptr;
1754 }
1755 uint blob_offset = cache->_write_position - entry_position;
1756 address archive_buffer = cache->reserve_bytes(nm->size());
1757 if (archive_buffer == nullptr) {
1758 return nullptr;
1759 }
1760 CodeBlob::archive_blob(nm, archive_buffer);
1761
1762 uint reloc_data_size = nm->relocation_size();
1763 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1764 if (n != reloc_data_size) {
1765 return nullptr;
1766 }
1767
1768 // Write oops and metadata present in the nmethod's data region
1769 if (!write_oops(nm)) {
1770 if (lookup_failed() && !failed()) {
1771 // Skip this method and reposition file
1772 set_write_position(entry_position);
1773 }
1774 return nullptr;
1775 }
1776 if (!write_metadata(nm)) {
1777 if (lookup_failed() && !failed()) {
1778 // Skip this method and reposition file
1779 set_write_position(entry_position);
1780 }
1781 return nullptr;
1782 }
1783
1784 bool has_oop_maps = false;
1785 if (nm->oop_maps() != nullptr) {
1786 if (!cache->write_oop_map_set(*nm)) {
1787 return nullptr;
1788 }
1789 has_oop_maps = true;
1790 }
1791
1792 uint immutable_data_size = nm->immutable_data_size();
1793 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1794 if (n != immutable_data_size) {
1795 return nullptr;
1796 }
1797
1798 JavaThread* thread = JavaThread::current();
1799 HandleMark hm(thread);
1800 GrowableArray<Handle> oop_list;
1801 GrowableArray<Metadata*> metadata_list;
1802
1803 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1804 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1805 if (lookup_failed() && !failed()) {
1806 // Skip this method and reposition file
1807 set_write_position(entry_position);
1808 }
1809 return nullptr;
1810 }
1811
1812 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1813 return nullptr;
1814 }
1815
1816 #ifndef PRODUCT
1817 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1818 return nullptr;
1819 }
1820 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1821 return nullptr;
1822 }
1823 #endif /* PRODUCT */
1824
1825 uint entry_size = _write_position - entry_position;
1826 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Nmethod, id,
1827 entry_position, entry_size,
1828 name_offset, name_size,
1829 blob_offset, has_oop_maps,
1830 comp_level, comp_id,
1831 nm->has_clinit_barriers(), for_preload);
1832 {
1833 ResourceMark rm;
1834 const char* name = nm->method()->name_and_sig_as_C_string();
1835 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1836 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1837 }
1838 if (VerifyAOTCode) {
1839 return nullptr;
1840 }
1841 return entry;
1842 }
1843
1844 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1845 if (!is_using_code()) {
1846 return false;
1847 }
1848 AOTCodeCache* cache = open_for_use();
1849 if (cache == nullptr) {
1850 return false;
1851 }
1852 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1853 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1854 CompileTask* task = env->task();
1855 task->mark_aot_load_start(os::elapsed_counter());
1856 AOTCodeEntry* entry = task->aot_code_entry();
1857 bool preload = task->preload();
1858 assert(entry != nullptr, "sanity");
1859 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1860 VM_ENTRY_MARK;
1861 ResourceMark rm;
1862 methodHandle method(THREAD, target->get_Method());
1863 const char* target_name = method->name_and_sig_as_C_string();
1864 uint id = AOTCacheAccess::convert_method_to_offset(method());
1865 bool clinit_brs = entry->has_clinit_barriers();
1866 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (id: " UINT32_FORMAT_X_0 "%s)",
1867 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1868 target_name, id, (clinit_brs ? ", has clinit barriers" : ""));
1869 }
1870 ReadingMark rdmk;
1871 if (rdmk.failed()) {
1872 // Cache is closed, cannot touch anything.
1873 return false;
1874 }
1875
1876 AOTCodeReader reader(cache, entry, task);
1877 bool success = reader.compile_nmethod(env, target, compiler);
1878 if (success) {
1879 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1880 } else {
1881 entry->set_load_fail();
1882 entry->set_not_entrant();
1883 }
1884 task->mark_aot_load_finish(os::elapsed_counter());
1885 return success;
1886 }
1887
1888 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1889 CompileTask* task = env->task();
1890 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1891 nmethod* nm = nullptr;
1892
1893 uint entry_position = aot_code_entry->offset();
1894 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1895 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1896 set_read_position(archived_nm_offset + archived_nm->size());
1897
1898 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1899 env->set_oop_recorder(oop_recorder);
1900
1901 uint offset;
1902
1903 offset = read_position();
1904 address reloc_data = (address)addr(offset);
1905 offset += archived_nm->relocation_size();
1906 set_read_position(offset);
1907
1908 // Read oops and metadata
1909 VM_ENTRY_MARK
1910 GrowableArray<Handle> oop_list;
1911 GrowableArray<Metadata*> metadata_list;
1912
1913 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1914 return false;
1915 }
1916
1917 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1918
1919 offset = read_position();
1920 address immutable_data = (address)addr(offset);
1921 offset += archived_nm->immutable_data_size();
1922 set_read_position(offset);
1923
1924 GrowableArray<Handle> reloc_immediate_oop_list;
1925 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1926 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1927 return false;
1928 }
1929
1930 // Read Dependencies (compressed already)
1931 Dependencies* dependencies = new Dependencies(env);
1932 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1933 env->set_dependencies(dependencies);
1934
1935 const char* name = addr(entry_position + aot_code_entry->name_offset());
1936
1937 if (VerifyAOTCode) {
1938 return false;
1939 }
1940
1941 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1942 nm = env->register_aot_method(THREAD,
1943 target,
1944 compiler,
1945 archived_nm,
1946 reloc_data,
1947 oop_list,
1948 metadata_list,
1949 oopmaps,
1950 immutable_data,
1951 reloc_immediate_oop_list,
1952 reloc_immediate_metadata_list,
1953 this);
1954 bool success = task->is_success();
1955 if (success) {
1956 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1957 #ifdef ASSERT
1958 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1959 if (log.is_enabled()) {
1960 FlagSetting fs(PrintRelocations, true);
1961 nm->print_on(&log);
1962 nm->decode2(&log);
1963 }
1964 #endif
1965 }
1966
1967 return success;
1968 }
1969
1970 bool skip_preload(methodHandle mh) {
1971 if (!mh->method_holder()->is_loaded()) {
1972 return true;
1973 }
1974 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1975 if (directives->DontPreloadOption || directives->ExcludeOption) {
1976 LogStreamHandle(Info, aot, codecache, init) log;
1977 if (log.is_enabled()) {
1978 log.print("Exclude preloading code for ");
1979 mh->print_value_on(&log);
1980 }
1981 return true;
1982 }
1983 return false;
1984 }
1985
1986 void AOTCodeCache::preload_code(JavaThread* thread) {
1987 if (!is_using_code()) {
1988 return;
1989 }
1990 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
1991 if (comp == nullptr) {
1992 log_debug(aot, codecache, init)("AOT preload code skipped: C2 compiler disabled");
1993 return;
1994 }
1995
1996 if ((DisableAOTCode & (1 << 3)) != 0) {
1997 return; // no preloaded code (level 5);
1998 }
1999 _cache->preload_aot_code(thread);
2000 }
2001
2002 void AOTCodeCache::preload_aot_code(TRAPS) {
2003 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
2004 // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
2005 // level we want (that is CompLevel_full_optimization).
2006 return;
2007 }
2008 TraceTime t1("Total time to preload AOT code", &_t_totalPreload, enable_timers(), false);
2009 assert(_for_use, "sanity");
2010 uint count = _load_header->entries_count();
2011 uint preload_entries_count = _load_header->preload_entries_count();
2012 if (preload_entries_count > 0) {
2013 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
2014 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
2015 uint count = MIN2(preload_entries_count, AOTCodePreloadStop);
2016 for (uint i = AOTCodePreloadStart; i < count; i++) {
2017 AOTCodeEntry* entry = &preload_entry[i];
2018 if (entry->not_entrant()) {
2019 continue;
2020 }
2021 methodHandle mh(THREAD, entry->method());
2022 assert((mh.not_null() && AOTMetaspace::in_aot_cache((address)mh())), "sanity");
2023 if (skip_preload(mh)) {
2024 continue; // Exclude preloading for this method
2025 }
2026 assert(mh->method_holder()->is_loaded(), "");
2027 if (!mh->method_holder()->is_linked()) {
2028 ResourceMark rm;
2029 log_debug(aot, codecache, init)("Preload AOT code for %s skipped: method holder is not linked",
2030 mh->name_and_sig_as_C_string());
2031 continue; // skip
2032 }
2033 CompileBroker::preload_aot_method(mh, entry, CHECK);
2034 }
2035 }
2036 }
2037
2038 // ------------ process code and data --------------
2039
2040 // Can't use -1. It is valid value for jump to iteself destination
2041 // used by static call stub: see NativeJump::jump_destination().
2042 #define BAD_ADDRESS_ID -2
2043
2044 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2045 GrowableArray<uint> reloc_data;
2046 RelocIterator iter(&code_blob);
2047 LogStreamHandle(Trace, aot, codecache, reloc) log;
2048 while (iter.next()) {
2049 int idx = reloc_data.append(0); // default value
2050 switch (iter.type()) {
2051 case relocInfo::none:
2052 break;
2053 case relocInfo::oop_type: {
2054 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2055 if (r->oop_is_immediate()) {
2056 assert(oop_list != nullptr, "sanity check");
2057 // store index of oop in the reloc immediate oop list
2058 Handle h(JavaThread::current(), r->oop_value());
2059 int oop_idx = oop_list->find(h);
2060 assert(oop_idx != -1, "sanity check");
2061 reloc_data.at_put(idx, (uint)oop_idx);
2062 }
2063 break;
2064 }
2065 case relocInfo::metadata_type: {
2066 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2067 if (r->metadata_is_immediate()) {
2068 assert(metadata_list != nullptr, "sanity check");
2069 // store index of metadata in the reloc immediate metadata list
2070 int metadata_idx = metadata_list->find(r->metadata_value());
2071 assert(metadata_idx != -1, "sanity check");
2072 reloc_data.at_put(idx, (uint)metadata_idx);
2073 }
2074 break;
2075 }
2076 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2077 case relocInfo::opt_virtual_call_type:
2078 case relocInfo::static_call_type: {
2079 CallRelocation* r = (CallRelocation*)iter.reloc();
2080 address dest = r->destination();
2081 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2082 dest = (address)-1; // do nothing in this case when loading this relocation
2083 }
2084 int id = _table->id_for_address(dest, iter, &code_blob);
2085 if (id == BAD_ADDRESS_ID) {
2086 return false;
2087 }
2088 reloc_data.at_put(idx, id);
2089 break;
2090 }
2091 case relocInfo::trampoline_stub_type: {
2092 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2093 int id = _table->id_for_address(dest, iter, &code_blob);
2094 if (id == BAD_ADDRESS_ID) {
2095 return false;
2096 }
2097 reloc_data.at_put(idx, id);
2098 break;
2099 }
2100 case relocInfo::static_stub_type:
2101 break;
2102 case relocInfo::runtime_call_type: {
2103 // Record offset of runtime destination
2104 CallRelocation* r = (CallRelocation*)iter.reloc();
2105 address dest = r->destination();
2106 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2107 dest = (address)-1; // do nothing in this case when loading this relocation
2108 }
2109 int id = _table->id_for_address(dest, iter, &code_blob);
2110 if (id == BAD_ADDRESS_ID) {
2111 return false;
2112 }
2113 reloc_data.at_put(idx, id);
2114 break;
2115 }
2116 case relocInfo::runtime_call_w_cp_type:
2117 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2118 return false;
2119 case relocInfo::external_word_type: {
2120 // Record offset of runtime target
2121 address target = ((external_word_Relocation*)iter.reloc())->target();
2122 int id = _table->id_for_address(target, iter, &code_blob);
2123 if (id == BAD_ADDRESS_ID) {
2124 return false;
2125 }
2126 reloc_data.at_put(idx, id);
2127 break;
2128 }
2129 case relocInfo::internal_word_type: {
2130 address target = ((internal_word_Relocation*)iter.reloc())->target();
2131 // assert to make sure that delta fits into 32 bits
2132 assert(CodeCache::contains((void *)target), "Wrong internal_word_type relocation");
2133 uint delta = (uint)(target - code_blob.content_begin());
2134 reloc_data.at_put(idx, delta);
2135 break;
2136 }
2137 case relocInfo::section_word_type: {
2138 address target = ((section_word_Relocation*)iter.reloc())->target();
2139 assert(CodeCache::contains((void *)target), "Wrong section_word_type relocation");
2140 uint delta = (uint)(target - code_blob.content_begin());
2141 reloc_data.at_put(idx, delta);
2142 break;
2143 }
2144 case relocInfo::poll_type:
2145 break;
2146 case relocInfo::poll_return_type:
2147 break;
2148 case relocInfo::post_call_nop_type:
2149 break;
2150 case relocInfo::entry_guard_type:
2151 break;
2152 default:
2153 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2154 return false;
2155 break;
2156 }
2157 if (log.is_enabled()) {
2158 iter.print_current_on(&log);
2159 }
2160 }
2161
2162 // Write additional relocation data: uint per relocation
2163 // Write the count first
2164 int count = reloc_data.length();
2165 write_bytes(&count, sizeof(int));
2166 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2167 iter != reloc_data.end(); ++iter) {
2168 uint value = *iter;
2169 int n = write_bytes(&value, sizeof(uint));
2170 if (n != sizeof(uint)) {
2171 return false;
2172 }
2173 }
2174 return true;
2175 }
2176
2177 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2178 LogStreamHandle(Trace, aot, reloc) log;
2179 uint offset = read_position();
2180 int count = *(int*)addr(offset);
2181 offset += sizeof(int);
2182 if (log.is_enabled()) {
2183 log.print_cr("======== extra relocations count=%d", count);
2184 }
2185 uint* reloc_data = (uint*)addr(offset);
2186 offset += (count * sizeof(uint));
2187 set_read_position(offset);
2188
2189 RelocIterator iter(code_blob);
2190 int j = 0;
2191 while (iter.next()) {
2192 switch (iter.type()) {
2193 case relocInfo::none:
2194 break;
2195 case relocInfo::oop_type: {
2196 assert(code_blob->is_nmethod(), "sanity check");
2197 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2198 if (r->oop_is_immediate()) {
2199 assert(oop_list != nullptr, "sanity check");
2200 Handle h = oop_list->at(reloc_data[j]);
2201 r->set_value(cast_from_oop<address>(h()));
2202 } else {
2203 r->fix_oop_relocation();
2204 }
2205 break;
2206 }
2207 case relocInfo::metadata_type: {
2208 assert(code_blob->is_nmethod(), "sanity check");
2209 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2210 Metadata* m;
2211 if (r->metadata_is_immediate()) {
2212 assert(metadata_list != nullptr, "sanity check");
2213 m = metadata_list->at(reloc_data[j]);
2214 } else {
2215 // Get already updated value from nmethod.
2216 int index = r->metadata_index();
2217 m = code_blob->as_nmethod()->metadata_at(index);
2218 }
2219 r->set_value((address)m);
2220 break;
2221 }
2222 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2223 case relocInfo::opt_virtual_call_type:
2224 case relocInfo::static_call_type: {
2225 address dest = _cache->address_for_id(reloc_data[j]);
2226 if (dest != (address)-1) {
2227 ((CallRelocation*)iter.reloc())->set_destination(dest);
2228 }
2229 break;
2230 }
2231 case relocInfo::trampoline_stub_type: {
2232 address dest = _cache->address_for_id(reloc_data[j]);
2233 if (dest != (address)-1) {
2234 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2235 }
2236 break;
2237 }
2238 case relocInfo::static_stub_type:
2239 break;
2240 case relocInfo::runtime_call_type: {
2241 address dest = _cache->address_for_id(reloc_data[j]);
2242 if (dest != (address)-1) {
2243 ((CallRelocation*)iter.reloc())->set_destination(dest);
2244 }
2245 break;
2246 }
2247 case relocInfo::runtime_call_w_cp_type:
2248 // this relocation should not be in cache (see write_relocations)
2249 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2250 break;
2251 case relocInfo::external_word_type: {
2252 address target = _cache->address_for_id(reloc_data[j]);
2253 // Add external address to global table
2254 int index = ExternalsRecorder::find_index(target);
2255 // Update index in relocation
2256 Relocation::add_jint(iter.data(), index);
2257 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2258 assert(reloc->target() == target, "sanity");
2259 reloc->set_value(target); // Patch address in the code
2260 break;
2261 }
2262 case relocInfo::internal_word_type: {
2263 uint delta = reloc_data[j];
2264 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2265 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2266 break;
2267 }
2268 case relocInfo::section_word_type: {
2269 uint delta = reloc_data[j];
2270 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2271 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2272 break;
2273 }
2274 case relocInfo::poll_type:
2275 break;
2276 case relocInfo::poll_return_type:
2277 break;
2278 case relocInfo::post_call_nop_type:
2279 break;
2280 case relocInfo::entry_guard_type:
2281 break;
2282 default:
2283 assert(false,"relocation %d unimplemented", (int)iter.type());
2284 break;
2285 }
2286 if (log.is_enabled()) {
2287 iter.print_current_on(&log);
2288 }
2289 j++;
2290 }
2291 assert(j == count, "sanity");
2292 }
2293
2294 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2295 int count = oop_list.length();
2296 if (!write_bytes(&count, sizeof(int))) {
2297 return false;
2298 }
2299 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2300 iter != oop_list.end(); ++iter) {
2301 Handle h = *iter;
2302 if (!write_oop(h())) {
2303 return false;
2304 }
2305 }
2306
2307 count = metadata_list.length();
2308 if (!write_bytes(&count, sizeof(int))) {
2309 return false;
2310 }
2311 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2312 iter != metadata_list.end(); ++iter) {
2313 Metadata* m = *iter;
2314 if (!write_metadata(m)) {
2315 return false;
2316 }
2317 }
2318 return true;
2319 }
2320
2321 bool AOTCodeCache::write_metadata(nmethod* nm) {
2322 int count = nm->metadata_count()-1;
2323 if (!write_bytes(&count, sizeof(int))) {
2324 return false;
2325 }
2326 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2327 if (!write_metadata(*p)) {
2328 return false;
2329 }
2330 }
2331 return true;
2332 }
2333
2334 bool AOTCodeCache::write_metadata(Metadata* m) {
2335 uint n = 0;
2336 if (m == nullptr) {
2337 DataKind kind = DataKind::Null;
2338 n = write_bytes(&kind, sizeof(int));
2339 if (n != sizeof(int)) {
2340 return false;
2341 }
2342 } else if (m == (Metadata*)Universe::non_oop_word()) {
2343 DataKind kind = DataKind::No_Data;
2344 n = write_bytes(&kind, sizeof(int));
2345 if (n != sizeof(int)) {
2346 return false;
2347 }
2348 } else if (m->is_klass()) {
2349 if (!write_klass((Klass*)m)) {
2350 return false;
2351 }
2352 } else if (m->is_method()) {
2353 if (!write_method((Method*)m)) {
2354 return false;
2355 }
2356 } else if (m->is_methodCounters()) {
2357 DataKind kind = DataKind::MethodCnts;
2358 n = write_bytes(&kind, sizeof(int));
2359 if (n != sizeof(int)) {
2360 return false;
2361 }
2362 if (!write_method(((MethodCounters*)m)->method())) {
2363 return false;
2364 }
2365 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2366 } else { // Not supported
2367 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2368 return false;
2369 }
2370 return true;
2371 }
2372
2373 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2374 uint code_offset = read_position();
2375 Metadata* m = nullptr;
2376 DataKind kind = *(DataKind*)addr(code_offset);
2377 code_offset += sizeof(DataKind);
2378 set_read_position(code_offset);
2379 if (kind == DataKind::Null) {
2380 m = (Metadata*)nullptr;
2381 } else if (kind == DataKind::No_Data) {
2382 m = (Metadata*)Universe::non_oop_word();
2383 } else if (kind == DataKind::Klass) {
2384 m = (Metadata*)read_klass(comp_method);
2385 } else if (kind == DataKind::Method) {
2386 m = (Metadata*)read_method(comp_method);
2387 } else if (kind == DataKind::MethodCnts) {
2388 kind = *(DataKind*)addr(code_offset);
2389 code_offset += sizeof(DataKind);
2390 set_read_position(code_offset);
2391 m = (Metadata*)read_method(comp_method);
2392 if (m != nullptr) {
2393 Method* method = (Method*)m;
2394 m = method->get_method_counters(Thread::current());
2395 if (m == nullptr) {
2396 set_lookup_failed();
2397 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2398 } else {
2399 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2400 }
2401 }
2402 } else {
2403 set_lookup_failed();
2404 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2405 }
2406 return m;
2407 }
2408
2409 bool AOTCodeCache::write_method(Method* method) {
2410 ResourceMark rm; // To method's name printing
2411 if (AOTCacheAccess::can_generate_aot_code(method)) {
2412 DataKind kind = DataKind::Method;
2413 uint n = write_bytes(&kind, sizeof(int));
2414 if (n != sizeof(int)) {
2415 return false;
2416 }
2417 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2418 n = write_bytes(&method_offset, sizeof(uint));
2419 if (n != sizeof(uint)) {
2420 return false;
2421 }
2422 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2423 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2424 return true;
2425 }
2426 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2427 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2428 set_lookup_failed();
2429 return false;
2430 }
2431
2432 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2433 uint code_offset = read_position();
2434 uint method_offset = *(uint*)addr(code_offset);
2435 code_offset += sizeof(uint);
2436 set_read_position(code_offset);
2437 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2438 if (!AOTMetaspace::in_aot_cache((address)m)) {
2439 // Something changed in CDS
2440 set_lookup_failed();
2441 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2442 return nullptr;
2443 }
2444 assert(m->is_method(), "sanity");
2445 ResourceMark rm;
2446 Klass* k = m->method_holder();
2447 if (!k->is_instance_klass()) {
2448 set_lookup_failed();
2449 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2450 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2451 return nullptr;
2452 } else if (!AOTMetaspace::in_aot_cache((address)k)) {
2453 set_lookup_failed();
2454 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2455 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2456 return nullptr;
2457 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2458 set_lookup_failed();
2459 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2460 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2461 return nullptr;
2462 } else if (!InstanceKlass::cast(k)->is_linked()) {
2463 set_lookup_failed();
2464 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2465 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2466 return nullptr;
2467 }
2468 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2469 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2470 return m;
2471 }
2472
2473 bool AOTCodeCache::write_klass(Klass* klass) {
2474 uint array_dim = 0;
2475 if (klass->is_objArray_klass()) {
2476 array_dim = ObjArrayKlass::cast(klass)->dimension();
2477 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2478 }
2479 uint init_state = 0;
2480 bool can_write = true;
2481 if (klass->is_instance_klass()) {
2482 InstanceKlass* ik = InstanceKlass::cast(klass);
2483 init_state = (ik->is_initialized() ? 1 : 0);
2484 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2485 } else {
2486 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2487 }
2488 ResourceMark rm;
2489 uint state = (array_dim << 1) | (init_state & 1);
2490 if (can_write) {
2491 DataKind kind = DataKind::Klass;
2492 uint n = write_bytes(&kind, sizeof(int));
2493 if (n != sizeof(int)) {
2494 return false;
2495 }
2496 // Record state of instance klass initialization and array dimentions.
2497 n = write_bytes(&state, sizeof(int));
2498 if (n != sizeof(int)) {
2499 return false;
2500 }
2501 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2502 n = write_bytes(&klass_offset, sizeof(uint));
2503 if (n != sizeof(uint)) {
2504 return false;
2505 }
2506 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2507 compile_id(), comp_level(), klass->external_name(),
2508 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2509 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2510 return true;
2511 }
2512 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2513 compile_id(), comp_level(), klass->external_name(),
2514 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2515 (array_dim > 0 ? " (object array)" : ""));
2516 set_lookup_failed();
2517 return false;
2518 }
2519
2520 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2521 uint code_offset = read_position();
2522 uint state = *(uint*)addr(code_offset);
2523 uint init_state = (state & 1);
2524 uint array_dim = (state >> 1);
2525 code_offset += sizeof(int);
2526 uint klass_offset = *(uint*)addr(code_offset);
2527 code_offset += sizeof(uint);
2528 set_read_position(code_offset);
2529 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2530 if (!AOTMetaspace::in_aot_cache((address)k)) {
2531 // Something changed in CDS
2532 set_lookup_failed();
2533 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2534 return nullptr;
2535 }
2536 assert(k->is_klass(), "sanity");
2537 ResourceMark rm;
2538 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2539 set_lookup_failed();
2540 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2541 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2542 return nullptr;
2543 } else
2544 // Allow not initialized klass which was uninitialized during code caching or for preload
2545 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2546 set_lookup_failed();
2547 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2548 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2549 return nullptr;
2550 }
2551 if (array_dim > 0) {
2552 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2553 Klass* ak = k->array_klass_or_null(array_dim);
2554 // FIXME: what would it take to create an array class on the fly?
2555 // Klass* ak = k->array_klass(dim, JavaThread::current());
2556 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2557 if (ak == nullptr) {
2558 set_lookup_failed();
2559 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2560 compile_id(), comp_level(), array_dim, k->external_name());
2561 }
2562 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2563 return ak;
2564 } else {
2565 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2566 compile_id(), comp_level(), k->external_name());
2567 return k;
2568 }
2569 }
2570
2571 bool AOTCodeCache::write_oop(jobject& jo) {
2572 oop obj = JNIHandles::resolve(jo);
2573 return write_oop(obj);
2574 }
2575
2576 bool AOTCodeCache::write_oop(oop obj) {
2577 DataKind kind;
2578 uint n = 0;
2579 if (obj == nullptr) {
2580 kind = DataKind::Null;
2581 n = write_bytes(&kind, sizeof(int));
2582 if (n != sizeof(int)) {
2583 return false;
2584 }
2585 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2586 kind = DataKind::No_Data;
2587 n = write_bytes(&kind, sizeof(int));
2588 if (n != sizeof(int)) {
2589 return false;
2590 }
2591 } else if (java_lang_Class::is_instance(obj)) {
2592 if (java_lang_Class::is_primitive(obj)) {
2593 int bt = (int)java_lang_Class::primitive_type(obj);
2594 kind = DataKind::Primitive;
2595 n = write_bytes(&kind, sizeof(int));
2596 if (n != sizeof(int)) {
2597 return false;
2598 }
2599 n = write_bytes(&bt, sizeof(int));
2600 if (n != sizeof(int)) {
2601 return false;
2602 }
2603 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2604 } else {
2605 Klass* klass = java_lang_Class::as_Klass(obj);
2606 if (!write_klass(klass)) {
2607 return false;
2608 }
2609 }
2610 } else if (java_lang_String::is_instance(obj)) { // herere
2611 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2612 ResourceMark rm;
2613 size_t length_sz = 0;
2614 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2615 if (k >= 0) {
2616 kind = DataKind::String;
2617 n = write_bytes(&kind, sizeof(int));
2618 if (n != sizeof(int)) {
2619 return false;
2620 }
2621 n = write_bytes(&k, sizeof(int));
2622 if (n != sizeof(int)) {
2623 return false;
2624 }
2625 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2626 return true;
2627 }
2628 // Not archived String object - bailout
2629 set_lookup_failed();
2630 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2631 compile_id(), comp_level(), p2i(obj), string);
2632 return false;
2633 } else if (java_lang_Module::is_instance(obj)) {
2634 fatal("Module object unimplemented");
2635 } else if (java_lang_ClassLoader::is_instance(obj)) {
2636 if (obj == SystemDictionary::java_system_loader()) {
2637 kind = DataKind::SysLoader;
2638 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2639 } else if (obj == SystemDictionary::java_platform_loader()) {
2640 kind = DataKind::PlaLoader;
2641 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2642 } else {
2643 ResourceMark rm;
2644 set_lookup_failed();
2645 log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2646 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2647 return false;
2648 }
2649 n = write_bytes(&kind, sizeof(int));
2650 if (n != sizeof(int)) {
2651 return false;
2652 }
2653 } else { // herere
2654 ResourceMark rm;
2655 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2656 if (k >= 0) {
2657 kind = DataKind::MH_Oop;
2658 n = write_bytes(&kind, sizeof(int));
2659 if (n != sizeof(int)) {
2660 return false;
2661 }
2662 n = write_bytes(&k, sizeof(int));
2663 if (n != sizeof(int)) {
2664 return false;
2665 }
2666 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2667 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2668 return true;
2669 }
2670 // Not archived Java object - bailout
2671 set_lookup_failed();
2672 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2673 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2674 return false;
2675 }
2676 return true;
2677 }
2678
2679 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2680 uint code_offset = read_position();
2681 oop obj = nullptr;
2682 DataKind kind = *(DataKind*)addr(code_offset);
2683 code_offset += sizeof(DataKind);
2684 set_read_position(code_offset);
2685 if (kind == DataKind::Null) {
2686 return nullptr;
2687 } else if (kind == DataKind::No_Data) {
2688 return cast_to_oop(Universe::non_oop_word());
2689 } else if (kind == DataKind::Klass) {
2690 Klass* k = read_klass(comp_method);
2691 if (k == nullptr) {
2692 return nullptr;
2693 }
2694 obj = k->java_mirror();
2695 if (obj == nullptr) {
2696 set_lookup_failed();
2697 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2698 return nullptr;
2699 }
2700 } else if (kind == DataKind::Primitive) {
2701 code_offset = read_position();
2702 int t = *(int*)addr(code_offset);
2703 code_offset += sizeof(int);
2704 set_read_position(code_offset);
2705 BasicType bt = (BasicType)t;
2706 obj = java_lang_Class::primitive_mirror(bt);
2707 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2708 } else if (kind == DataKind::String) {
2709 code_offset = read_position();
2710 int k = *(int*)addr(code_offset);
2711 code_offset += sizeof(int);
2712 set_read_position(code_offset);
2713 obj = AOTCacheAccess::get_archived_object(k);
2714 if (obj == nullptr) {
2715 set_lookup_failed();
2716 log_debug(aot, codecache, oops)("Lookup failed for String object");
2717 return nullptr;
2718 }
2719 assert(java_lang_String::is_instance(obj), "must be string");
2720
2721 ResourceMark rm;
2722 size_t length_sz = 0;
2723 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2724 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2725 } else if (kind == DataKind::SysLoader) {
2726 obj = SystemDictionary::java_system_loader();
2727 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2728 } else if (kind == DataKind::PlaLoader) {
2729 obj = SystemDictionary::java_platform_loader();
2730 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2731 } else if (kind == DataKind::MH_Oop) {
2732 code_offset = read_position();
2733 int k = *(int*)addr(code_offset);
2734 code_offset += sizeof(int);
2735 set_read_position(code_offset);
2736 obj = AOTCacheAccess::get_archived_object(k);
2737 if (obj == nullptr) {
2738 set_lookup_failed();
2739 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2740 return nullptr;
2741 }
2742 ResourceMark rm;
2743 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2744 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2745 } else {
2746 set_lookup_failed();
2747 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2748 compile_id(), comp_level(), (int)kind);
2749 return nullptr;
2750 }
2751 return obj;
2752 }
2753
2754 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2755 methodHandle comp_method(JavaThread::current(), target->get_Method());
2756 JavaThread* current = JavaThread::current();
2757 uint offset = read_position();
2758 int count = *(int *)addr(offset);
2759 offset += sizeof(int);
2760 set_read_position(offset);
2761 for (int i = 0; i < count; i++) {
2762 oop obj = read_oop(current, comp_method);
2763 if (lookup_failed()) {
2764 return false;
2765 }
2766 Handle h(thread, obj);
2767 oop_list.append(h);
2768 if (oop_recorder != nullptr) {
2769 jobject jo = JNIHandles::make_local(thread, obj);
2770 if (oop_recorder->is_real(jo)) {
2771 oop_recorder->find_index(jo);
2772 } else {
2773 oop_recorder->allocate_oop_index(jo);
2774 }
2775 }
2776 LogStreamHandle(Debug, aot, codecache, oops) log;
2777 if (log.is_enabled()) {
2778 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2779 if (obj == Universe::non_oop_word()) {
2780 log.print("non-oop word");
2781 } else if (obj == nullptr) {
2782 log.print("nullptr-oop");
2783 } else {
2784 obj->print_value_on(&log);
2785 }
2786 log.cr();
2787 }
2788 }
2789
2790 offset = read_position();
2791 count = *(int *)addr(offset);
2792 offset += sizeof(int);
2793 set_read_position(offset);
2794 for (int i = 0; i < count; i++) {
2795 Metadata* m = read_metadata(comp_method);
2796 if (lookup_failed()) {
2797 return false;
2798 }
2799 metadata_list.append(m);
2800 if (oop_recorder != nullptr) {
2801 if (oop_recorder->is_real(m)) {
2802 oop_recorder->find_index(m);
2803 } else {
2804 oop_recorder->allocate_metadata_index(m);
2805 }
2806 }
2807 LogTarget(Debug, aot, codecache, metadata) log;
2808 if (log.is_enabled()) {
2809 LogStream ls(log);
2810 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2811 if (m == (Metadata*)Universe::non_oop_word()) {
2812 ls.print("non-metadata word");
2813 } else if (m == nullptr) {
2814 ls.print("nullptr-oop");
2815 } else {
2816 Metadata::print_value_on_maybe_null(&ls, m);
2817 }
2818 ls.cr();
2819 }
2820 }
2821 return true;
2822 }
2823
2824 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2825 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2826 int oopmaps_size = oopmaps->nr_of_bytes();
2827 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2828 return false;
2829 }
2830 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2831 if (n != (uint)oopmaps->nr_of_bytes()) {
2832 return false;
2833 }
2834 return true;
2835 }
2836
2837 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2838 uint offset = read_position();
2839 int size = *(int *)addr(offset);
2840 offset += sizeof(int);
2841 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2842 offset += size;
2843 set_read_position(offset);
2844 return oopmaps;
2845 }
2846
2847 bool AOTCodeCache::write_oops(nmethod* nm) {
2848 int count = nm->oops_count()-1;
2849 if (!write_bytes(&count, sizeof(int))) {
2850 return false;
2851 }
2852 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2853 if (!write_oop(*p)) {
2854 return false;
2855 }
2856 }
2857 return true;
2858 }
2859
2860 #ifndef PRODUCT
2861 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2862 // Write asm remarks
2863 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2864 if (count_ptr == nullptr) {
2865 return false;
2866 }
2867 uint count = 0;
2868 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2869 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2870 uint n = write_bytes(&offset, sizeof(uint));
2871 if (n != sizeof(uint)) {
2872 return false;
2873 }
2874 if (use_string_table) {
2875 const char* cstr = add_C_string(str);
2876 int id = _table->id_for_C_string((address)cstr);
2877 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2878 n = write_bytes(&id, sizeof(int));
2879 if (n != sizeof(int)) {
2880 return false;
2881 }
2882 } else {
2883 n = write_bytes(str, (uint)strlen(str) + 1);
2884 if (n != strlen(str) + 1) {
2885 return false;
2886 }
2887 }
2888 count += 1;
2889 return true;
2890 });
2891 *count_ptr = count;
2892 return result;
2893 }
2894
2895 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2896 // Read asm remarks
2897 uint offset = read_position();
2898 uint count = *(uint *)addr(offset);
2899 offset += sizeof(uint);
2900 for (uint i = 0; i < count; i++) {
2901 uint remark_offset = *(uint *)addr(offset);
2902 offset += sizeof(uint);
2903 const char* remark = nullptr;
2904 if (use_string_table) {
2905 int remark_string_id = *(uint *)addr(offset);
2906 offset += sizeof(int);
2907 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2908 } else {
2909 remark = (const char*)addr(offset);
2910 offset += (uint)strlen(remark)+1;
2911 }
2912 asm_remarks.insert(remark_offset, remark);
2913 }
2914 set_read_position(offset);
2915 }
2916
2917 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2918 // Write dbg strings
2919 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2920 if (count_ptr == nullptr) {
2921 return false;
2922 }
2923 uint count = 0;
2924 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2925 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2926 if (use_string_table) {
2927 const char* cstr = add_C_string(str);
2928 int id = _table->id_for_C_string((address)cstr);
2929 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2930 uint n = write_bytes(&id, sizeof(int));
2931 if (n != sizeof(int)) {
2932 return false;
2933 }
2934 } else {
2935 uint n = write_bytes(str, (uint)strlen(str) + 1);
2936 if (n != strlen(str) + 1) {
2937 return false;
2938 }
2939 }
2940 count += 1;
2941 return true;
2942 });
2943 *count_ptr = count;
2944 return result;
2945 }
2946
2947 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2948 // Read dbg strings
2949 uint offset = read_position();
2950 uint count = *(uint *)addr(offset);
2951 offset += sizeof(uint);
2952 for (uint i = 0; i < count; i++) {
2953 const char* str = nullptr;
2954 if (use_string_table) {
2955 int string_id = *(uint *)addr(offset);
2956 offset += sizeof(int);
2957 str = (const char*)_cache->address_for_C_string(string_id);
2958 } else {
2959 str = (const char*)addr(offset);
2960 offset += (uint)strlen(str)+1;
2961 }
2962 dbg_strings.insert(str);
2963 }
2964 set_read_position(offset);
2965 }
2966 #endif // PRODUCT
2967
2968 //======================= AOTCodeAddressTable ===============
2969
2970 // address table ids for generated routines, external addresses and C
2971 // string addresses are partitioned into positive integer ranges
2972 // defined by the following positive base and max values
2973 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2974 // [_stubs_base, _stubs_base + _stubs_max -1],
2975 // ...
2976 // [_c_str_base, _c_str_base + _c_str_max -1],
2977 #define _extrs_max 140
2978 #define _stubs_max 210
2979 #define _shared_blobs_max 25
2980 #define _C1_blobs_max 50
2981 #define _C2_blobs_max 25
2982 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2983 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2984
2985 #define _extrs_base 0
2986 #define _stubs_base (_extrs_base + _extrs_max)
2987 #define _shared_blobs_base (_stubs_base + _stubs_max)
2988 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2989 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2990 #define _blobs_end (_shared_blobs_base + _blobs_max)
2991 #if (_C2_blobs_base >= _all_max)
2992 #error AOTCodeAddressTable ranges need adjusting
2993 #endif
2994
2995 #define SET_ADDRESS(type, addr) \
2996 { \
2997 type##_addr[type##_length++] = (address) (addr); \
2998 assert(type##_length <= type##_max, "increase size"); \
2999 }
3000
3001 static bool initializing_extrs = false;
3002
3003 void AOTCodeAddressTable::init_extrs() {
3004 if (_extrs_complete || initializing_extrs) return; // Done already
3005
3006 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
3007
3008 initializing_extrs = true;
3009 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3010
3011 _extrs_length = 0;
3012
3013 // Record addresses of VM runtime methods
3014 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3015 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3016 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3017 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3018 {
3019 // Required by Shared blobs
3020 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
3021 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
3022 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
3023 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3024 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3025 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3026 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3027 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
3028 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
3029 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
3030 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
3031 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3032 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3033 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
3034 }
3035 {
3036 // Required by initial stubs
3037 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3038 #if defined(AMD64)
3039 SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
3040 #endif
3041 }
3042
3043 #ifdef COMPILER1
3044 {
3045 // Required by C1 blobs
3046 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3047 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3048 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3049 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3050 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3051 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3052 SET_ADDRESS(_extrs, Runtime1::new_instance);
3053 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3054 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3055 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3056 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3057 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3058 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3059 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3060 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3061 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3062 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3063 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3064 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3065 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3066 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3067 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3068 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3069 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3070 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3071 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3072 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3073 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3074 #ifdef X86
3075 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3076 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3077 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3078 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3079 #endif
3080 #ifndef PRODUCT
3081 SET_ADDRESS(_extrs, os::breakpoint);
3082 #endif
3083 }
3084 #endif // COMPILER1
3085
3086 #ifdef COMPILER2
3087 {
3088 // Required by C2 blobs
3089 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3090 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3091 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3092 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3093 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3094 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3095 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3096 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3097 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3098 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3099 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3100 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3101 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3102 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3103 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3104 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3105 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3106 SET_ADDRESS(_extrs, OptoRuntime::compile_method_C);
3107 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
3108 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
3109 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
3110 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
3111 #if defined(AMD64)
3112 // Use by C2 intinsic
3113 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3114 #endif
3115 }
3116 #endif // COMPILER2
3117 #if INCLUDE_G1GC
3118 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3119 #endif
3120
3121 #if INCLUDE_SHENANDOAHGC
3122 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3123 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3124 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3125 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3126 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3127 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3128 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3129 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3130 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3131 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
3132 #endif
3133
3134 #if INCLUDE_ZGC
3135 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
3136 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3137 #if defined(AMD64)
3138 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3139 #endif
3140 #if defined(AARCH64)
3141 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
3142 SET_ADDRESS(_extrs, bs_asm->patching_epoch_addr());
3143 #endif
3144 #endif // INCLUDE_ZGC
3145
3146 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3147 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3148 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3149
3150 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3151 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3152 #if defined(AMD64) && !defined(ZERO)
3153 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3154 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3155 #endif // AMD64
3156 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3157 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3158 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3159 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3160 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3161 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3162 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3163 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3164 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3165 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3166 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3167 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3168 #ifndef ZERO
3169 SET_ADDRESS(_extrs, SharedRuntime::drem);
3170 SET_ADDRESS(_extrs, SharedRuntime::frem);
3171 #endif
3172 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3173 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3174 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3175 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3176 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3177
3178 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3179 SET_ADDRESS(_extrs, Thread::current);
3180 SET_ADDRESS(_extrs, ObjectMonitorTable::current_table_address());
3181
3182 SET_ADDRESS(_extrs, os::javaTimeMillis);
3183 SET_ADDRESS(_extrs, os::javaTimeNanos);
3184 // For JFR
3185 SET_ADDRESS(_extrs, os::elapsed_counter);
3186 #if defined(X86) && !defined(ZERO)
3187 SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3188 #endif
3189
3190 #if INCLUDE_JVMTI
3191 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3192 #endif /* INCLUDE_JVMTI */
3193 SET_ADDRESS(_extrs, MountUnmountDisabler::notify_jvmti_events_address());
3194 SET_ADDRESS(_extrs, MountUnmountDisabler::global_vthread_transition_disable_count_address());
3195
3196 #ifndef PRODUCT
3197 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3198 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3199 #endif
3200
3201 #ifndef ZERO
3202 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3203 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3204 #endif
3205 #if defined(AARCH64)
3206 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3207 #endif
3208 #endif // ZERO
3209
3210 // addresses of fields in AOT runtime constants area
3211 address* p = AOTRuntimeConstants::field_addresses_list();
3212 while (*p != nullptr) {
3213 SET_ADDRESS(_extrs, *p++);
3214 }
3215
3216 _extrs_complete = true;
3217 log_info(aot, codecache, init)("External addresses recorded");
3218 }
3219
3220 static bool initializing_early_stubs = false;
3221
3222 void AOTCodeAddressTable::init_early_stubs() {
3223 if (_complete || initializing_early_stubs) return; // Done already
3224 initializing_early_stubs = true;
3225 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3226 _stubs_length = 0;
3227 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3228
3229 {
3230 // Required by C1 blobs
3231 #if defined(AMD64) && !defined(ZERO)
3232 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3233 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3234 #endif // AMD64
3235 }
3236
3237 _early_stubs_complete = true;
3238 log_info(aot, codecache, init)("Early stubs recorded");
3239 }
3240
3241 static bool initializing_shared_blobs = false;
3242
3243 void AOTCodeAddressTable::init_shared_blobs() {
3244 if (_complete || initializing_shared_blobs) return; // Done already
3245 initializing_shared_blobs = true;
3246 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3247
3248 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3249 _shared_blobs_addr = blobs_addr;
3250 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3251 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3252
3253 _shared_blobs_length = 0;
3254 _C1_blobs_length = 0;
3255 _C2_blobs_length = 0;
3256
3257 // clear the address table
3258 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3259
3260 // Record addresses of generated code blobs
3261 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3262 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3263 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3264 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3265 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3266 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3267 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3268 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3269 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3270 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3271 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3272 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3273 #ifdef COMPILER2
3274 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3275 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3276 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3277 }
3278 #endif
3279 #if INCLUDE_JVMCI
3280 if (EnableJVMCI) {
3281 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3282 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3283 }
3284 #endif
3285 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3286 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3287 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3288 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3289 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3290
3291 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3292 _shared_blobs_complete = true;
3293 log_info(aot, codecache, init)("All shared blobs recorded");
3294 }
3295
3296 static bool initializing_stubs = false;
3297 void AOTCodeAddressTable::init_stubs() {
3298 if (_complete || initializing_stubs) return; // Done already
3299 assert(_early_stubs_complete, "early stubs whould be initialized");
3300 initializing_stubs = true;
3301
3302 // Stubs
3303 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3304 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3305 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3306 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3307 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3308 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3309
3310 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3311 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3312 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3313
3314 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3315
3316 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3317 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3318 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3319 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3320 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3321 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3322
3323 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3324 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3325 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3326 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3327 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3328 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3329
3330 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3331 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3332 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3333 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3334 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3335 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3336
3337 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3338 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3339 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3340 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3341 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3342 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3343
3344 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3345 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3346
3347 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3348 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3349
3350 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3351 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3352 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3353 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3354 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3355 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3356
3357 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3358 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3359
3360 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3361 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3362 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3363 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3364 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3365 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3366 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3367 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3368 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3369 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3370 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3371 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3372 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3373 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3374 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3375 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3376 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3377 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3378 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3379 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3380 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3381 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3382 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3383 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3384 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3385 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3386 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3387 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3388 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3389 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3390 SET_ADDRESS(_stubs, StubRoutines::kyber12To16());
3391
3392 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3393 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3394 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3395
3396 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3397 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3398 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3399 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3400 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3401 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3402 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3403 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3404
3405 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3406
3407 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3408
3409 SET_ADDRESS(_stubs, StubRoutines::dexp());
3410 SET_ADDRESS(_stubs, StubRoutines::dlog());
3411 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3412 SET_ADDRESS(_stubs, StubRoutines::dpow());
3413 SET_ADDRESS(_stubs, StubRoutines::dsin());
3414 SET_ADDRESS(_stubs, StubRoutines::dcos());
3415 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3416 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3417 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3418 SET_ADDRESS(_stubs, StubRoutines::dtan());
3419
3420 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3421 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3422
3423 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3424 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3425 }
3426 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3427
3428 #if defined(AMD64) && !defined(ZERO)
3429 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3430 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3431 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3432 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3433 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3434 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3435 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3436 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3437 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3438 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3439 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3440 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3441 SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3442 SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3443 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3444 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3445 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_byte_mask());
3446 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_short_mask());
3447 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3448 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3449 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3450 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3451 // See C2_MacroAssembler::load_iota_indices().
3452 for (int i = 0; i < 6; i++) {
3453 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3454 }
3455 #ifdef COMPILER2
3456 for (int i = 0; i < 4; i++) {
3457 SET_ADDRESS(_stubs, StubRoutines::_string_indexof_array[i]);
3458 }
3459 #endif
3460 #endif
3461 #if defined(AARCH64) && !defined(ZERO)
3462 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3463 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3464 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3465 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3466 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3467 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3468 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3469 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3470 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3471 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3472 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3473 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3474 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3475
3476 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3477 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3478 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3479 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3480 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3481 #endif
3482
3483 _complete = true;
3484 log_info(aot, codecache, init)("Stubs recorded");
3485 }
3486
3487 void AOTCodeAddressTable::init_early_c1() {
3488 #ifdef COMPILER1
3489 // Runtime1 Blobs
3490 StubId id = StubInfo::stub_base(StubGroup::C1);
3491 // include forward_exception in range we publish
3492 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
3493 for (; id != limit; id = StubInfo::next(id)) {
3494 if (Runtime1::blob_for(id) == nullptr) {
3495 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3496 continue;
3497 }
3498 if (Runtime1::entry_for(id) == nullptr) {
3499 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3500 continue;
3501 }
3502 address entry = Runtime1::entry_for(id);
3503 SET_ADDRESS(_C1_blobs, entry);
3504 }
3505 #endif // COMPILER1
3506 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3507 _early_c1_complete = true;
3508 }
3509
3510 void AOTCodeAddressTable::init_c1() {
3511 #ifdef COMPILER1
3512 // Runtime1 Blobs
3513 assert(_early_c1_complete, "early C1 blobs should be initialized");
3514 StubId id = StubInfo::next(StubId::c1_forward_exception_id);
3515 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
3516 for (; id != limit; id = StubInfo::next(id)) {
3517 if (Runtime1::blob_for(id) == nullptr) {
3518 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3519 continue;
3520 }
3521 if (Runtime1::entry_for(id) == nullptr) {
3522 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3523 continue;
3524 }
3525 address entry = Runtime1::entry_for(id);
3526 SET_ADDRESS(_C1_blobs, entry);
3527 }
3528 #if INCLUDE_G1GC
3529 if (UseG1GC) {
3530 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3531 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3532 SET_ADDRESS(_C1_blobs, entry);
3533 }
3534 #endif // INCLUDE_G1GC
3535 #if INCLUDE_ZGC
3536 if (UseZGC) {
3537 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3538 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3539 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3540 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3541 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3542 }
3543 #endif // INCLUDE_ZGC
3544 #if INCLUDE_SHENANDOAHGC
3545 if (UseShenandoahGC) {
3546 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3547 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3548 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3549 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3550 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3551 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3552 }
3553 #endif // INCLUDE_SHENANDOAHGC
3554 #endif // COMPILER1
3555
3556 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3557 _c1_complete = true;
3558 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3559 }
3560
3561 void AOTCodeAddressTable::init_c2() {
3562 #ifdef COMPILER2
3563 // OptoRuntime Blobs
3564 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3565 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3566 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3567 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3568 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3569 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3570 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3571 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3572 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3573 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3574 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3575 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3576 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3577 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3578 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3579 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3580 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3581 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3582 SET_ADDRESS(_C2_blobs, OptoRuntime::compile_method_Java());
3583 #if INCLUDE_JVMTI
3584 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_first_transition_Java());
3585 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_final_transition_Java());
3586 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_transition_Java());
3587 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_transition_Java());
3588 #endif /* INCLUDE_JVMTI */
3589 #endif
3590
3591 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3592 _c2_complete = true;
3593 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3594 }
3595 #undef SET_ADDRESS
3596
3597 AOTCodeAddressTable::~AOTCodeAddressTable() {
3598 if (_extrs_addr != nullptr) {
3599 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3600 }
3601 if (_stubs_addr != nullptr) {
3602 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3603 }
3604 if (_shared_blobs_addr != nullptr) {
3605 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3606 }
3607 }
3608
3609 #ifdef PRODUCT
3610 #define MAX_STR_COUNT 200
3611 #else
3612 #define MAX_STR_COUNT 500
3613 #endif
3614 #define _c_str_max MAX_STR_COUNT
3615 static const int _c_str_base = _all_max;
3616
3617 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3618 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3619 static int _C_strings_count = 0;
3620 static int _C_strings_s[MAX_STR_COUNT] = {0};
3621 static int _C_strings_id[MAX_STR_COUNT] = {0};
3622 static int _C_strings_used = 0;
3623
3624 void AOTCodeCache::load_strings() {
3625 uint strings_count = _load_header->strings_count();
3626 if (strings_count == 0) {
3627 return;
3628 }
3629 uint strings_offset = _load_header->strings_offset();
3630 uint* string_lengths = (uint*)addr(strings_offset);
3631 strings_offset += (strings_count * sizeof(uint));
3632 uint strings_size = _load_header->search_table_offset() - strings_offset;
3633 // We have to keep cached strings longer than _cache buffer
3634 // because they are refernced from compiled code which may
3635 // still be executed on VM exit after _cache is freed.
3636 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3637 memcpy(p, addr(strings_offset), strings_size);
3638 _C_strings_buf = p;
3639 assert(strings_count <= MAX_STR_COUNT, "sanity");
3640 for (uint i = 0; i < strings_count; i++) {
3641 _C_strings[i] = p;
3642 uint len = string_lengths[i];
3643 _C_strings_s[i] = i;
3644 _C_strings_id[i] = i;
3645 p += len;
3646 }
3647 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3648 _C_strings_count = strings_count;
3649 _C_strings_used = strings_count;
3650 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3651 }
3652
3653 int AOTCodeCache::store_strings() {
3654 if (_C_strings_used > 0) {
3655 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3656 uint offset = _write_position;
3657 uint length = 0;
3658 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
3659 if (lengths == nullptr) {
3660 return -1;
3661 }
3662 for (int i = 0; i < _C_strings_used; i++) {
3663 const char* str = _C_strings[_C_strings_s[i]];
3664 uint len = (uint)strlen(str) + 1;
3665 length += len;
3666 assert(len < 1000, "big string: %s", str);
3667 lengths[i] = len;
3668 uint n = write_bytes(str, len);
3669 if (n != len) {
3670 return -1;
3671 }
3672 }
3673 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
3674 _C_strings_used, length, offset);
3675 }
3676 return _C_strings_used;
3677 }
3678
3679 const char* AOTCodeCache::add_C_string(const char* str) {
3680 if (is_on_for_dump() && str != nullptr) {
3681 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3682 AOTCodeAddressTable* table = addr_table();
3683 if (table != nullptr) {
3684 return table->add_C_string(str);
3685 }
3686 }
3687 return str;
3688 }
3689
3690 const char* AOTCodeAddressTable::add_C_string(const char* str) {
3691 if (_extrs_complete) {
3692 // Check previous strings address
3693 for (int i = 0; i < _C_strings_count; i++) {
3694 if (_C_strings_in[i] == str) {
3695 return _C_strings[i]; // Found previous one - return our duplicate
3696 } else if (strcmp(_C_strings[i], str) == 0) {
3697 return _C_strings[i];
3698 }
3699 }
3700 // Add new one
3701 if (_C_strings_count < MAX_STR_COUNT) {
3702 // Passed in string can be freed and used space become inaccessible.
3703 // Keep original address but duplicate string for future compare.
3704 _C_strings_id[_C_strings_count] = -1; // Init
3705 _C_strings_in[_C_strings_count] = str;
3706 const char* dup = os::strdup(str);
3707 _C_strings[_C_strings_count++] = dup;
3708 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
3709 return dup;
3710 } else {
3711 assert(false, "Number of C strings >= MAX_STR_COUNT");
3712 }
3713 }
3714 return str;
3715 }
3716
3717 int AOTCodeAddressTable::id_for_C_string(address str) {
3718 if (str == nullptr) {
3719 return -1;
3720 }
3721 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3722 for (int i = 0; i < _C_strings_count; i++) {
3723 if (_C_strings[i] == (const char*)str) { // found
3724 int id = _C_strings_id[i];
3725 if (id >= 0) {
3726 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
3727 return id; // Found recorded
3728 }
3729 // Not found in recorded, add new
3730 id = _C_strings_used++;
3731 _C_strings_s[id] = i;
3732 _C_strings_id[i] = id;
3733 return id;
3734 }
3735 }
3736 return -1;
3737 }
3738
3739 address AOTCodeAddressTable::address_for_C_string(int idx) {
3740 assert(idx < _C_strings_count, "sanity");
3741 return (address)_C_strings[idx];
3742 }
3743
3744 static int search_address(address addr, address* table, uint length) {
3745 for (int i = 0; i < (int)length; i++) {
3746 if (table[i] == addr) {
3747 return i;
3748 }
3749 }
3750 return BAD_ADDRESS_ID;
3751 }
3752
3753 address AOTCodeAddressTable::address_for_id(int idx) {
3754 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3755 if (idx == -1) {
3756 return (address)-1;
3757 }
3758 uint id = (uint)idx;
3759 // special case for symbols based relative to os::init
3760 if (id > (_c_str_base + _c_str_max)) {
3761 return (address)os::init + idx;
3762 }
3763 if (idx < 0) {
3764 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3765 return nullptr;
3766 }
3767 // no need to compare unsigned id against 0
3768 if (/* id >= _extrs_base && */ id < _extrs_length) {
3769 return _extrs_addr[id - _extrs_base];
3770 }
3771 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3772 return _stubs_addr[id - _stubs_base];
3773 }
3774 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3775 return _stubs_addr[id - _stubs_base];
3776 }
3777 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3778 return _shared_blobs_addr[id - _shared_blobs_base];
3779 }
3780 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3781 return _C1_blobs_addr[id - _C1_blobs_base];
3782 }
3783 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3784 return _C1_blobs_addr[id - _C1_blobs_base];
3785 }
3786 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3787 return _C2_blobs_addr[id - _C2_blobs_base];
3788 }
3789 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3790 return address_for_C_string(id - _c_str_base);
3791 }
3792 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3793 return nullptr;
3794 }
3795
3796 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3797 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3798 int id = -1;
3799 if (addr == (address)-1) { // Static call stub has jump to itself
3800 return id;
3801 }
3802 // Check card_table_base address first since it can point to any address
3803 BarrierSet* bs = BarrierSet::barrier_set();
3804 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
3805 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
3806
3807 // Seach for C string
3808 id = id_for_C_string(addr);
3809 if (id >= 0) {
3810 return id + _c_str_base;
3811 }
3812 if (StubRoutines::contains(addr)) {
3813 // Search in stubs
3814 id = search_address(addr, _stubs_addr, _stubs_length);
3815 if (id == BAD_ADDRESS_ID) {
3816 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3817 if (desc == nullptr) {
3818 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3819 }
3820 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3821 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3822 } else {
3823 return _stubs_base + id;
3824 }
3825 } else {
3826 CodeBlob* cb = CodeCache::find_blob(addr);
3827 if (cb != nullptr) {
3828 int id_base = _shared_blobs_base;
3829 // Search in code blobs
3830 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3831 if (id == BAD_ADDRESS_ID) {
3832 id_base = _C1_blobs_base;
3833 // search C1 blobs
3834 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3835 }
3836 if (id == BAD_ADDRESS_ID) {
3837 id_base = _C2_blobs_base;
3838 // search C2 blobs
3839 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3840 }
3841 if (id == BAD_ADDRESS_ID) {
3842 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3843 } else {
3844 return id_base + id;
3845 }
3846 } else {
3847 // Search in runtime functions
3848 id = search_address(addr, _extrs_addr, _extrs_length);
3849 if (id == BAD_ADDRESS_ID) {
3850 ResourceMark rm;
3851 const int buflen = 1024;
3852 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3853 int offset = 0;
3854 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3855 if (offset > 0) {
3856 // Could be address of C string
3857 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3858 CompileTask* task = ciEnv::current()->task();
3859 uint compile_id = 0;
3860 uint comp_level =0;
3861 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3862 compile_id = task->compile_id();
3863 comp_level = task->comp_level();
3864 }
3865 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3866 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3867 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3868 return dist;
3869 }
3870 reloc.print_current_on(tty);
3871 blob->print_on(tty);
3872 blob->print_code_on(tty);
3873 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3874 } else {
3875 reloc.print_current_on(tty);
3876 blob->print_on(tty);
3877 blob->print_code_on(tty);
3878 os::find(addr, tty);
3879 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3880 }
3881 } else {
3882 return _extrs_base + id;
3883 }
3884 }
3885 }
3886 return id;
3887 }
3888
3889 #undef _extrs_max
3890 #undef _stubs_max
3891 #undef _shared_blobs_max
3892 #undef _C1_blobs_max
3893 #undef _C2_blobs_max
3894 #undef _blobs_max
3895 #undef _extrs_base
3896 #undef _stubs_base
3897 #undef _shared_blobs_base
3898 #undef _C1_blobs_base
3899 #undef _C2_blobs_base
3900 #undef _blobs_end
3901
3902 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3903
3904 void AOTRuntimeConstants::initialize_from_runtime() {
3905 BarrierSet* bs = BarrierSet::barrier_set();
3906 address card_table_base = nullptr;
3907 uint grain_shift = 0;
3908 #if INCLUDE_G1GC
3909 if (bs->is_a(BarrierSet::G1BarrierSet)) {
3910 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
3911 } else
3912 #endif
3913 #if INCLUDE_SHENANDOAHGC
3914 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
3915 grain_shift = 0;
3916 } else
3917 #endif
3918 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3919 CardTable::CardValue* base = ci_card_table_address_const();
3920 assert(base != nullptr, "unexpected byte_map_base");
3921 card_table_base = base;
3922 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
3923 grain_shift = ctbs->grain_shift();
3924 }
3925 _aot_runtime_constants._card_table_base = card_table_base;
3926 _aot_runtime_constants._grain_shift = grain_shift;
3927 }
3928
3929 address AOTRuntimeConstants::_field_addresses_list[] = {
3930 ((address)&_aot_runtime_constants._card_table_base),
3931 ((address)&_aot_runtime_constants._grain_shift),
3932 nullptr
3933 };
3934
3935 address AOTRuntimeConstants::card_table_base_address() {
3936 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
3937 return (address)&_aot_runtime_constants._card_table_base;
3938 }
3939
3940 void AOTCodeCache::wait_for_no_nmethod_readers() {
3941 while (true) {
3942 int cur = AtomicAccess::load(&_nmethod_readers);
3943 int upd = -(cur + 1);
3944 if (cur >= 0 && AtomicAccess::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3945 // Success, no new readers should appear.
3946 break;
3947 }
3948 }
3949
3950 // Now wait for all readers to leave.
3951 SpinYield w;
3952 while (AtomicAccess::load(&_nmethod_readers) != -1) {
3953 w.wait();
3954 }
3955 }
3956
3957 AOTCodeCache::ReadingMark::ReadingMark() {
3958 while (true) {
3959 int cur = AtomicAccess::load(&_nmethod_readers);
3960 if (cur < 0) {
3961 // Cache is already closed, cannot proceed.
3962 _failed = true;
3963 return;
3964 }
3965 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3966 // Successfully recorded ourselves as entered.
3967 _failed = false;
3968 return;
3969 }
3970 }
3971 }
3972
3973 AOTCodeCache::ReadingMark::~ReadingMark() {
3974 if (_failed) {
3975 return;
3976 }
3977 while (true) {
3978 int cur = AtomicAccess::load(&_nmethod_readers);
3979 if (cur > 0) {
3980 // Cache is open, we are counting down towards 0.
3981 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3982 return;
3983 }
3984 } else {
3985 // Cache is closed, we are counting up towards -1.
3986 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3987 return;
3988 }
3989 }
3990 }
3991 }
3992
3993 void AOTCodeCache::print_timers_on(outputStream* st) {
3994 if (is_using_code()) {
3995 st->print_cr (" AOT Code Preload Time: %7.3f s", _t_totalPreload.seconds());
3996 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3997 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
3998 st->print_cr (" find AOT code entry: %7.3f s", _t_totalFind.seconds());
3999 }
4000 if (is_dumping_code()) {
4001 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
4002 }
4003 }
4004
4005 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
4006 AOTCodeStats result;
4007 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4008 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
4009 }
4010
4011 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4012 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
4013 }
4014 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
4015 return result;
4016 }
4017
4018 void AOTCodeCache::log_stats_on_exit(AOTCodeStats& stats) {
4019 LogStreamHandle(Debug, aot, codecache, exit) log;
4020 if (log.is_enabled()) {
4021 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4022 log.print_cr(" %s: total=%u", aot_code_entry_kind_name[kind], stats.entry_count(kind));
4023 if (kind == AOTCodeEntry::Nmethod) {
4024 for (uint lvl = CompLevel_simple; lvl < AOTCompLevel_count; lvl++) {
4025 log.print(" Tier %d: total=%u", lvl, stats.nmethod_count(lvl));
4026 if (lvl == AOTCompLevel_count-1) { // AOT Preload
4027 log.print(", has_clinit_barriers=%u", stats.clinit_barriers_count());
4028 }
4029 log.cr();
4030 }
4031 }
4032 }
4033 }
4034 }
4035
4036 static void print_helper1(outputStream* st, const char* name, int count) {
4037 if (count > 0) {
4038 st->print(" %s=%d", name, count);
4039 }
4040 }
4041
4042 void AOTCodeCache::print_statistics_on(outputStream* st) {
4043 AOTCodeCache* cache = open_for_use();
4044 if (cache != nullptr) {
4045 ReadingMark rdmk;
4046 if (rdmk.failed()) {
4047 // Cache is closed, cannot touch anything.
4048 return;
4049 }
4050 AOTCodeStats stats;
4051
4052 uint preload_count = cache->_load_header->preload_entries_count();
4053 AOTCodeEntry* preload_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->preload_entries_offset());
4054 for (uint i = 0; i < preload_count; i++) {
4055 stats.collect_all_stats(&preload_entries[i]);
4056 }
4057
4058 uint count = cache->_load_header->entries_count();
4059 AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
4060 for (uint i = 0; i < count; i++) {
4061 stats.collect_all_stats(&load_entries[i]);
4062 }
4063
4064 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4065 if (stats.entry_count(kind) > 0) {
4066 st->print(" %s:", aot_code_entry_kind_name[kind]);
4067 print_helper1(st, "total", stats.entry_count(kind));
4068 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4069 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4070 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4071 st->cr();
4072 }
4073 if (kind == AOTCodeEntry::Nmethod) {
4074 for (uint lvl = CompLevel_simple; lvl < AOTCompLevel_count; lvl++) {
4075 if (stats.nmethod_count(lvl) > 0) {
4076 st->print(" AOT Code T%d", lvl);
4077 print_helper1(st, "total", stats.nmethod_count(lvl));
4078 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4079 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4080 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4081 if (lvl == AOTCompLevel_count-1) {
4082 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4083 }
4084 st->cr();
4085 }
4086 }
4087 }
4088 }
4089 LogStreamHandle(Debug, aot, codecache, init) log;
4090 if (log.is_enabled()) {
4091 AOTCodeCache::print_unused_entries_on(&log);
4092 }
4093 LogStreamHandle(Trace, aot, codecache) aot_info;
4094 // need a lock to traverse the code cache
4095 if (aot_info.is_enabled()) {
4096 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4097 NMethodIterator iter(NMethodIterator::all);
4098 while (iter.next()) {
4099 nmethod* nm = iter.method();
4100 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4101 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4102 (nm->method()->in_aot_cache() ? 'S' : ' '),
4103 (nm->is_aot() ? 'A' : ' '),
4104 (nm->preloaded() ? 'P' : ' '),
4105 nm->comp_level());
4106 print_helper(nm, &aot_info);
4107 aot_info.print(": ");
4108 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4109 LogStreamHandle(Trace, aot, codecache) aot_debug;
4110 if (aot_debug.is_enabled()) {
4111 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4112 if (mtd != nullptr) {
4113 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4114 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4115 });
4116 }
4117 }
4118 }
4119 }
4120 }
4121 }
4122 }
4123
4124 void AOTCodeEntry::print(outputStream* st) const {
4125 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4126 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4127 (_not_entrant? "not_entrant" : "entrant"),
4128 (_loaded ? ", loaded" : ""),
4129 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4130 (_for_preload ? ", for_preload" : ""));
4131 }
4132
4133 // This is called after initialize() but before init2()
4134 // and _cache is not set yet.
4135 void AOTCodeCache::print_on(outputStream* st) {
4136 if (opened_cache != nullptr && opened_cache->for_use()) {
4137 ReadingMark rdmk;
4138 if (rdmk.failed()) {
4139 // Cache is closed, cannot touch anything.
4140 return;
4141 }
4142
4143 st->print_cr("\nAOT Code Cache Preload entries");
4144
4145 uint preload_count = opened_cache->_load_header->preload_entries_count();
4146 AOTCodeEntry* preload_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->preload_entries_offset());
4147 for (uint i = 0; i < preload_count; i++) {
4148 AOTCodeEntry* entry = &preload_entries[i];
4149
4150 uint entry_position = entry->offset();
4151 uint name_offset = entry->name_offset() + entry_position;
4152 const char* saved_name = opened_cache->addr(name_offset);
4153
4154 st->print_cr("%4u: %10s Id:%u AP%u size=%u '%s' %s%s%s",
4155 i, aot_code_entry_kind_name[entry->kind()], entry->id(), entry->comp_level(),
4156 entry->size(), saved_name,
4157 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4158 entry->is_loaded() ? " loaded" : "",
4159 entry->not_entrant() ? " not_entrant" : "");
4160
4161 st->print_raw(" ");
4162 AOTCodeReader reader(opened_cache, entry, nullptr);
4163 reader.print_on(st);
4164 }
4165
4166 st->print_cr("\nAOT Code Cache entries");
4167
4168 uint count = opened_cache->_load_header->entries_count();
4169 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4170 AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4171
4172 for (uint i = 0; i < count; i++) {
4173 int index = search_entries[2*i + 1];
4174 AOTCodeEntry* entry = &(load_entries[index]);
4175
4176 uint entry_position = entry->offset();
4177 uint name_offset = entry->name_offset() + entry_position;
4178 const char* saved_name = opened_cache->addr(name_offset);
4179
4180 st->print_cr("%4u: %10s idx:%4u Id:%u A%u size=%u '%s' %s%s",
4181 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4182 entry->size(), saved_name,
4183 entry->is_loaded() ? " loaded" : "",
4184 entry->not_entrant() ? " not_entrant" : "");
4185
4186 st->print_raw(" ");
4187 AOTCodeReader reader(opened_cache, entry, nullptr);
4188 reader.print_on(st);
4189 }
4190 }
4191 }
4192
4193 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4194 LogStreamHandle(Info, aot, codecache, init) info;
4195 if (info.is_enabled()) {
4196 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4197 if (entry->is_nmethod() && !entry->is_loaded()) {
4198 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4199 if (mtd != nullptr) {
4200 if (mtd->has_holder()) {
4201 if (mtd->holder()->method_holder()->is_initialized()) {
4202 ResourceMark rm;
4203 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4204 if ((uint)ctd->level() == entry->comp_level()) {
4205 if (ctd->init_deps_left_acquire() == 0) {
4206 nmethod* nm = mtd->holder()->code();
4207 if (nm == nullptr) {
4208 if (mtd->holder()->queued_for_compilation()) {
4209 return; // scheduled for compilation
4210 }
4211 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4212 return; // already online compiled and superseded by a more optimal method
4213 }
4214 info.print("AOT Code Cache entry not loaded: ");
4215 ctd->print_on(&info);
4216 info.cr();
4217 }
4218 }
4219 });
4220 } else {
4221 // not yet initialized
4222 }
4223 } else {
4224 info.print("AOT Code Cache entry doesn't have a holder: ");
4225 mtd->print_on(&info);
4226 info.cr();
4227 }
4228 }
4229 }
4230 });
4231 }
4232 }
4233
4234 void AOTCodeReader::print_on(outputStream* st) {
4235 uint entry_position = _entry->offset();
4236 set_read_position(entry_position);
4237
4238 // Read name
4239 uint name_offset = entry_position + _entry->name_offset();
4240 uint name_size = _entry->name_size(); // Includes '/0'
4241 const char* name = addr(name_offset);
4242
4243 st->print_cr(" name: %s", name);
4244 }
4245