1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/barrierSetAssembler.hpp"
55 #include "gc/shared/cardTableBarrierSet.hpp"
56 #include "gc/shared/gcConfig.hpp"
57 #include "logging/logStream.hpp"
58 #include "memory/memoryReserver.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/klass.inline.hpp"
61 #include "oops/method.inline.hpp"
62 #include "oops/trainingData.hpp"
63 #include "prims/jvmtiThreadState.hpp"
64 #include "runtime/atomicAccess.hpp"
65 #include "runtime/deoptimization.hpp"
66 #include "runtime/flags/flagSetting.hpp"
67 #include "runtime/globals_extension.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/java.hpp"
70 #include "runtime/jniHandles.inline.hpp"
71 #include "runtime/mountUnmountDisabler.hpp"
72 #include "runtime/mutexLocker.hpp"
73 #include "runtime/objectMonitorTable.hpp"
74 #include "runtime/os.inline.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/stubCodeGenerator.hpp"
77 #include "runtime/stubRoutines.hpp"
78 #include "runtime/threadIdentifier.hpp"
79 #include "runtime/timerTrace.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/formatBuffer.hpp"
82 #include "utilities/ostream.hpp"
83 #include "utilities/spinYield.hpp"
84 #ifdef COMPILER1
85 #include "c1/c1_LIRAssembler.hpp"
86 #include "c1/c1_Runtime1.hpp"
87 #include "gc/g1/c1/g1BarrierSetC1.hpp"
88 #include "gc/shared/c1/barrierSetC1.hpp"
89 #if INCLUDE_SHENANDOAHGC
90 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
91 #endif // INCLUDE_SHENANDOAHGC
92 #include "gc/z/c1/zBarrierSetC1.hpp"
93 #endif // COMPILER1
94 #ifdef COMPILER2
95 #include "opto/runtime.hpp"
96 #include "opto/parse.hpp"
97 #endif
98 #if INCLUDE_JVMCI
99 #include "jvmci/jvmci.hpp"
100 #endif
101 #if INCLUDE_G1GC
102 #include "gc/g1/g1BarrierSetRuntime.hpp"
103 #include "gc/g1/g1HeapRegion.hpp"
104 #endif
105 #if INCLUDE_SHENANDOAHGC
106 #include "gc/shenandoah/shenandoahRuntime.hpp"
107 #endif
108 #if INCLUDE_ZGC
109 #include "gc/z/zBarrierSetRuntime.hpp"
110 #endif
111 #if defined(X86) && !defined(ZERO)
112 #include "rdtsc_x86.hpp"
113 #endif
114
115 #include <errno.h>
116 #include <sys/stat.h>
117
118 const char* aot_code_entry_kind_name[] = {
119 #define DECL_KIND_STRING(kind) XSTR(kind),
120 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
121 #undef DECL_KIND_STRING
122 };
123
124 static elapsedTimer _t_totalLoad;
125 static elapsedTimer _t_totalPreload;
126 static elapsedTimer _t_totalRegister;
127 static elapsedTimer _t_totalFind;
128 static elapsedTimer _t_totalStore;
129
130 static bool enable_timers() {
131 return CITime || log_is_enabled(Info, init);
132 }
133
134 static void report_load_failure() {
135 if (AbortVMOnAOTCodeFailure) {
136 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
137 }
138 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
139 AOTCodeCache::disable_caching();
140 }
141
142 static void report_store_failure() {
143 if (AbortVMOnAOTCodeFailure) {
144 tty->print_cr("Unable to create AOT Code Cache.");
145 vm_abort(false);
146 }
147 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
148 AOTCodeCache::disable_caching();
149 }
150
151 // The sequence of AOT code caching flags and parametters settings.
152 //
153 // 1. The initial AOT code caching flags setting is done
154 // during call to CDSConfig::check_vm_args_consistency().
155 //
156 // 2. The earliest AOT code state check done in compilationPolicy_init()
157 // where we set number of compiler threads for AOT assembly phase.
158 //
159 // 3. We determine presence of AOT code in AOT Cache in
160 // AOTMetaspace::open_static_archive() which is calles
161 // after compilationPolicy_init() but before codeCache_init().
162 //
163 // 4. AOTCodeCache::initialize() is called during universe_init()
164 // and does final AOT state and flags settings.
165 //
166 // 5. Finally AOTCodeCache::init2() is called after universe_init()
167 // when all GC settings are finalized.
168
169 // Next methods determine which action we do with AOT code depending
170 // on phase of AOT process: assembly or production.
171
172 bool AOTCodeCache::is_dumping_adapter() {
173 return AOTAdapterCaching && is_on_for_dump();
174 }
175
176 bool AOTCodeCache::is_using_adapter() {
177 return AOTAdapterCaching && is_on_for_use();
178 }
179
180 bool AOTCodeCache::is_dumping_stub() {
181 return AOTStubCaching && is_on_for_dump();
182 }
183
184 bool AOTCodeCache::is_using_stub() {
185 return AOTStubCaching && is_on_for_use();
186 }
187
188 bool AOTCodeCache::is_dumping_code() {
189 return AOTCodeCaching && is_on_for_dump();
190 }
191
192 bool AOTCodeCache::is_using_code() {
193 return AOTCodeCaching && is_on_for_use();
194 }
195
196 // This is used before AOTCodeCahe is initialized
197 // but after AOT (CDS) Cache flags consistency is checked.
198 bool AOTCodeCache::maybe_dumping_code() {
199 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
200 }
201
202 // Next methods could be called regardless of AOT code cache status.
203 // Initially they are called during AOT flags parsing and finilized
204 // in AOTCodeCache::initialize().
205 void AOTCodeCache::enable_caching() {
206 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
207 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
208 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
209 }
210
211 void AOTCodeCache::disable_caching() {
212 FLAG_SET_ERGO(AOTCodeCaching, false);
213 FLAG_SET_ERGO(AOTStubCaching, false);
214 FLAG_SET_ERGO(AOTAdapterCaching, false);
215 }
216
217 bool AOTCodeCache::is_caching_enabled() {
218 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
219 }
220
221 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
222 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
223 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
224 // becasue both id and kind are used to find an entry, and that combination should be unique
225 if (kind == AOTCodeEntry::Adapter) {
226 return id;
227 } else if (kind == AOTCodeEntry::SharedBlob) {
228 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
229 return id;
230 } else if (kind == AOTCodeEntry::C1Blob) {
231 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
232 return id;
233 } else {
234 // kind must be AOTCodeEntry::C2Blob
235 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
236 return id;
237 }
238 }
239
240 static uint _max_aot_code_size = 0;
241 uint AOTCodeCache::max_aot_code_size() {
242 return _max_aot_code_size;
243 }
244
245 bool AOTCodeCache::is_code_load_thread_on() {
246 return UseAOTCodeLoadThread && AOTCodeCaching;
247 }
248
249 bool AOTCodeCache::allow_const_field(ciConstant& value) {
250 ciEnv* env = CURRENT_ENV;
251 precond(env != nullptr);
252 assert(!env->is_aot_compile() || is_dumping_code(), "AOT compilation should be enabled");
253 return !env->is_aot_compile() // Restrict only when we generate AOT code
254 // Can not trust primitive too || !is_reference_type(value.basic_type())
255 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
256 ;
257 }
258
259 // It is called from AOTMetaspace::initialize_shared_spaces()
260 // which is called from universe_init().
261 // At this point all AOT class linking seetings are finilized
262 // and AOT cache is open so we can map AOT code region.
263 void AOTCodeCache::initialize() {
264 if (!is_caching_enabled()) {
265 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
266 return;
267 }
268 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
269 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
270 disable_caching();
271 return;
272 #else
273 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
274
275 // Disable stubs caching until JDK-8357398 is fixed.
276 FLAG_SET_ERGO(AOTStubCaching, false);
277
278 if (VerifyOops) {
279 // Disable AOT stubs caching when VerifyOops flag is on.
280 // Verify oops code generated a lot of C strings which overflow
281 // AOT C string table (which has fixed size).
282 // AOT C string table will be reworked later to handle such cases.
283 //
284 // Note: AOT adapters are not affected - they don't have oop operations.
285 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
286 FLAG_SET_ERGO(AOTStubCaching, false);
287 }
288
289 bool is_dumping = false;
290 bool is_using = false;
291 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
292 is_dumping = is_caching_enabled();
293 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
294 is_using = is_caching_enabled();
295 }
296 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
297 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
298 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
299 }
300 if (!(is_dumping || is_using)) {
301 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
302 disable_caching();
303 return; // AOT code caching disabled on command line
304 }
305 // Reserve AOT Cache region when we dumping AOT code.
306 _max_aot_code_size = AOTCodeMaxSize;
307 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
308 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
309 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
310 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
311 }
312 }
313 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
314 if (is_using && aot_code_size == 0) {
315 log_info(aot, codecache, init)("AOT Code Cache is empty");
316 disable_caching();
317 return;
318 }
319 if (!open_cache(is_dumping, is_using)) {
320 if (is_using) {
321 report_load_failure();
322 } else {
323 report_store_failure();
324 }
325 return;
326 }
327 if (is_dumping) {
328 FLAG_SET_DEFAULT(FoldStableValues, false);
329 FLAG_SET_DEFAULT(ForceUnreachable, true);
330 }
331 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
332 #endif // defined(AMD64) || defined(AARCH64)
333 }
334
335 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
336 AOTCodeCache* AOTCodeCache::_cache = nullptr;
337 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
338
339 // It is called after universe_init() when all GC settings are finalized.
340 void AOTCodeCache::init2() {
341 DEBUG_ONLY( _passed_init2 = true; )
342 if (opened_cache == nullptr) {
343 return;
344 }
345 // After Universe initialized
346 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
347 delete opened_cache;
348 opened_cache = nullptr;
349 report_load_failure();
350 return;
351 }
352
353 // initialize aot runtime constants as appropriate to this runtime
354 AOTRuntimeConstants::initialize_from_runtime();
355
356 // initialize the table of external routines and initial stubs so we can save
357 // generated code blobs that reference them
358 AOTCodeAddressTable* table = opened_cache->_table;
359 assert(table != nullptr, "should be initialized already");
360 table->init_extrs();
361
362 // Now cache and address table are ready for AOT code generation
363 _cache = opened_cache;
364
365 // Set ClassInitBarrierMode after all checks since it affects code generation
366 if (is_dumping_code()) {
367 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
368 } else {
369 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
370 }
371 }
372
373 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
374 opened_cache = new AOTCodeCache(is_dumping, is_using);
375 if (opened_cache->failed()) {
376 delete opened_cache;
377 opened_cache = nullptr;
378 return false;
379 }
380 return true;
381 }
382
383 static void print_helper(nmethod* nm, outputStream* st) {
384 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
385 if (e->method() == nm->method()) {
386 ResourceMark rm;
387 stringStream ss;
388 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
389 ss.print("[%s%s%s]",
390 (e->is_loaded() ? "L" : ""),
391 (e->load_fail() ? "F" : ""),
392 (e->not_entrant() ? "I" : ""));
393 ss.print("#%d", e->comp_id());
394
395 st->print(" %s", ss.freeze());
396 }
397 });
398 }
399
400 void AOTCodeCache::close() {
401 if (is_on()) {
402 delete _cache; // Free memory
403 _cache = nullptr;
404 opened_cache = nullptr;
405 }
406 }
407
408 class CachedCodeDirectory {
409 public:
410 uint _aot_code_size;
411 char* _aot_code_data;
412
413 void set_aot_code_data(uint size, char* aot_data) {
414 _aot_code_size = size;
415 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
416 }
417
418 static CachedCodeDirectory* create();
419 };
420
421 // Storing AOT code in the AOT code region (ac) of AOT Cache:
422 //
423 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
424 // E.g., you can build a hashtable to record what methods have been archived.
425 //
426 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
427 // allocated using AOTCacheAccess::allocate_aot_code_region().
428 //
429 // [3] CachedCodeDirectory must be the very first allocation.
430 //
431 // [4] Two kinds of pointer can be stored:
432 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
433 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
434 // (It's OK to point to an interior location within this buffer).
435 // Such pointers must be stored using AOTCacheAccess::set_pointer()
436 //
437 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
438 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
439 // (e.g., to account for the runtime location of the CodeCache).
440 //
441 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
442 static CachedCodeDirectory* _aot_code_directory = nullptr;
443
444 CachedCodeDirectory* CachedCodeDirectory::create() {
445 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
446 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
447 return dir;
448 }
449
450 #define DATA_ALIGNMENT HeapWordSize
451
452 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
453 _load_header(nullptr),
454 _load_buffer(nullptr),
455 _store_buffer(nullptr),
456 _C_store_buffer(nullptr),
457 _write_position(0),
458 _load_size(0),
459 _store_size(0),
460 _for_use(is_using),
461 _for_dump(is_dumping),
462 _closing(false),
463 _failed(false),
464 _lookup_failed(false),
465 _for_preload(false),
466 _has_clinit_barriers(false),
467 _table(nullptr),
468 _load_entries(nullptr),
469 _search_entries(nullptr),
470 _store_entries(nullptr),
471 _C_strings_buf(nullptr),
472 _store_entries_cnt(0),
473 _compile_id(0),
474 _comp_level(0)
475 {
476 // Read header at the begining of cache
477 if (_for_use) {
478 // Read cache
479 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
480 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
481 if (!rs.is_reserved()) {
482 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
483 set_failed();
484 return;
485 }
486 if (!AOTCacheAccess::map_aot_code_region(rs)) {
487 log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
488 set_failed();
489 return;
490 }
491 _aot_code_directory = (CachedCodeDirectory*)rs.base();
492
493 _load_size = _aot_code_directory->_aot_code_size;
494 _load_buffer = _aot_code_directory->_aot_code_data;
495 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
496 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
497
498 _load_header = (Header*)addr(0);
499 if (!_load_header->verify(_load_size)) {
500 set_failed();
501 return;
502 }
503 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
504 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Adapter], _load_header->adapters_count());
505 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::SharedBlob], _load_header->shared_blobs_count());
506 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C1Blob], _load_header->C1_blobs_count());
507 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C2Blob], _load_header->C2_blobs_count());
508 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Stub], _load_header->stubs_count());
509 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Nmethod], _load_header->nmethods_count());
510 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
511
512 // Read strings
513 load_strings();
514 }
515 if (_for_dump) {
516 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
517 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
518 // Entries allocated at the end of buffer in reverse (as on stack).
519 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
520 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
521 }
522 _table = new AOTCodeAddressTable();
523 }
524
525 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
526 // This could be concurent execution
527 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
528 _cache->invalidate_entry(entry);
529 }
530 }
531
532 void AOTCodeCache::init_early_stubs_table() {
533 AOTCodeAddressTable* table = addr_table();
534 if (table != nullptr) {
535 table->init_early_stubs();
536 }
537 }
538
539 void AOTCodeCache::init_shared_blobs_table() {
540 AOTCodeAddressTable* table = addr_table();
541 if (table != nullptr) {
542 table->init_shared_blobs();
543 }
544 }
545
546 void AOTCodeCache::init_stubs_table() {
547 AOTCodeAddressTable* table = addr_table();
548 if (table != nullptr) {
549 table->init_stubs();
550 }
551 }
552
553 void AOTCodeCache::init_early_c1_table() {
554 AOTCodeAddressTable* table = addr_table();
555 if (table != nullptr) {
556 table->init_early_c1();
557 }
558 }
559
560 void AOTCodeCache::init_c1_table() {
561 AOTCodeAddressTable* table = addr_table();
562 if (table != nullptr) {
563 table->init_c1();
564 }
565 }
566
567 void AOTCodeCache::init_c2_table() {
568 AOTCodeAddressTable* table = addr_table();
569 if (table != nullptr) {
570 table->init_c2();
571 }
572 }
573
574 AOTCodeCache::~AOTCodeCache() {
575 if (_closing) {
576 return; // Already closed
577 }
578 // Stop any further access to cache.
579 // Checked on entry to load_nmethod() and store_nmethod().
580 _closing = true;
581 if (_for_use) {
582 // Wait for all load_nmethod() finish.
583 wait_for_no_nmethod_readers();
584 }
585 // Prevent writing code into cache while we are closing it.
586 // This lock held by ciEnv::register_method() which calls store_nmethod().
587 MutexLocker ml(Compile_lock);
588 if (for_dump()) { // Finalize cache
589 finish_write();
590 }
591 _load_buffer = nullptr;
592 if (_C_store_buffer != nullptr) {
593 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
594 _C_store_buffer = nullptr;
595 _store_buffer = nullptr;
596 }
597 if (_table != nullptr) {
598 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
599 delete _table;
600 _table = nullptr;
601 }
602 }
603
604 void AOTCodeCache::Config::record(uint cpu_features_offset) {
605 _flags = 0;
606 #ifdef ASSERT
607 _flags |= debugVM;
608 #endif
609 if (UseCompressedOops) {
610 _flags |= compressedOops;
611 }
612 if (UseCompressedClassPointers) {
613 _flags |= compressedClassPointers;
614 }
615 if (UseTLAB) {
616 _flags |= useTLAB;
617 }
618 if (JavaAssertions::systemClassDefault()) {
619 _flags |= systemClassAssertions;
620 }
621 if (JavaAssertions::userClassDefault()) {
622 _flags |= userClassAssertions;
623 }
624 if (EnableContended) {
625 _flags |= enableContendedPadding;
626 }
627 if (RestrictContended) {
628 _flags |= restrictContendedPadding;
629 }
630 if (PreserveFramePointer) {
631 _flags |= preserveFramePointer;
632 }
633 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
634 _compressedOopShift = CompressedOops::shift();
635 _compressedOopBase = CompressedOops::base();
636 _compressedKlassShift = CompressedKlassPointers::shift();
637 _compressedKlassBase = CompressedKlassPointers::base();
638 _contendedPaddingWidth = ContendedPaddingWidth;
639 _objectAlignment = ObjectAlignmentInBytes;
640 _gcCardSize = GCCardSizeInBytes;
641 _gc = (uint)Universe::heap()->kind();
642 _maxVectorSize = MaxVectorSize;
643 _arrayOperationPartialInlineSize = ArrayOperationPartialInlineSize;
644 _allocatePrefetchLines = AllocatePrefetchLines;
645 _allocateInstancePrefetchLines = AllocateInstancePrefetchLines;
646 _allocatePrefetchDistance = AllocatePrefetchDistance;
647 _allocatePrefetchStepSize = AllocatePrefetchStepSize;
648 _cpu_features_offset = cpu_features_offset;
649 }
650
651 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
652 LogStreamHandle(Debug, aot, codecache, init) log;
653 uint offset = _cpu_features_offset;
654 uint cpu_features_size = *(uint *)cache->addr(offset);
655 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
656 offset += sizeof(uint);
657
658 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
659 if (log.is_enabled()) {
660 ResourceMark rm; // required for stringStream::as_string()
661 stringStream ss;
662 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
663 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
664 }
665
666 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
667 if (log.is_enabled()) {
668 ResourceMark rm; // required for stringStream::as_string()
669 stringStream ss;
670 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
671 VM_Version::store_cpu_features(runtime_cpu_features);
672 VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
673 if (!ss.is_empty()) {
674 log.print_cr("Additional runtime CPU features: %s", ss.as_string());
675 }
676 }
677 } else {
678 if (log.is_enabled()) {
679 ResourceMark rm; // required for stringStream::as_string()
680 stringStream ss;
681 char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
682 VM_Version::store_cpu_features(runtime_cpu_features);
683 VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
684 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
685 }
686 return false;
687 }
688 return true;
689 }
690
691 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
692 // First checks affect all cached AOT code
693 #ifdef ASSERT
694 if ((_flags & debugVM) == 0) {
695 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
696 return false;
697 }
698 #else
699 if ((_flags & debugVM) != 0) {
700 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
701 return false;
702 }
703 #endif
704
705 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
706 if (codeCacheSize > _codeCacheSize) { // Only allow smaller or equal CodeCache size in production run
707 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
708 return false;
709 }
710
711 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
712 if (aot_gc != Universe::heap()->kind()) {
713 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
714 return false;
715 }
716
717 // We don't need to cache CardTable::card_shift() if GCCardSizeInBytes stay the same
718 if (_gcCardSize != (uint)GCCardSizeInBytes) {
719 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with GCCardSizeInBytes = %d vs current %d", _gcCardSize, GCCardSizeInBytes);
720 return false;
721 }
722
723 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
724 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
725 return false;
726 }
727
728 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
729 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (enableContendedPadding ? "false" : "true"), (EnableContended ? "true" : "false"));
730 return false;
731 }
732 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
733 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (restrictContendedPadding ? "false" : "true"), (RestrictContended ? "true" : "false"));
734 return false;
735 }
736 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
737 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
738 return false;
739 }
740
741 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
742 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (preserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
743 return false;
744 }
745
746 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
747 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (compressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
748 return false;
749 }
750 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
751 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
752 return false;
753 }
754 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
755 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
756 return false;
757 }
758
759 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
760 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (compressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
761 return false;
762 }
763 if (_compressedOopShift != (uint)CompressedOops::shift()) {
764 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
765 return false;
766 }
767 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
768 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
769 return false;
770 }
771
772 // Some of the following checked flags depend on CPU features. Check CPU first.
773 if (!verify_cpu_features(cache)) {
774 return false;
775 }
776
777 // TLAB related flags
778 if (((_flags & useTLAB) != 0) != UseTLAB) {
779 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseTLAB = %s vs current %s", (useTLAB ? "false" : "true"), (UseTLAB ? "true" : "false"));
780 return false;
781 }
782 if (_allocatePrefetchLines != (uint)AllocatePrefetchLines) {
783 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchLines = %d vs current %d", _allocatePrefetchLines, AllocatePrefetchLines);
784 return false;
785 }
786 if (_allocateInstancePrefetchLines != (uint)AllocateInstancePrefetchLines) {
787 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocateInstancePrefetchLines = %d vs current %d", _allocateInstancePrefetchLines, AllocateInstancePrefetchLines);
788 return false;
789 }
790 if (_allocatePrefetchDistance != (uint)AllocatePrefetchDistance) {
791 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchDistance = %d vs current %d", _allocatePrefetchDistance, AllocatePrefetchDistance);
792 return false;
793 }
794 if (_allocatePrefetchStepSize != (uint)AllocatePrefetchStepSize) {
795 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchStepSize = %d vs current %d", _allocatePrefetchStepSize, AllocatePrefetchStepSize);
796 return false;
797 }
798
799 // Vectorization and intrinsics related flags
800 if (_maxVectorSize != (uint)MaxVectorSize) {
801 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with MaxVectorSize = %d vs current %d", _maxVectorSize, (uint)MaxVectorSize);
802 return false;
803 }
804 if (_arrayOperationPartialInlineSize != (uint)ArrayOperationPartialInlineSize) {
805 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ArrayOperationPartialInlineSize = %d vs current %d", _arrayOperationPartialInlineSize, (uint)ArrayOperationPartialInlineSize);
806 return false;
807 }
808
809 // Next affects only AOT nmethod
810 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
811 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (systemClassAssertions ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
812 FLAG_SET_ERGO(AOTCodeCaching, false);
813 }
814 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
815 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (userClassAssertions ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
816 FLAG_SET_ERGO(AOTCodeCaching, false);
817 }
818 return true;
819 }
820
821 bool AOTCodeCache::Header::verify(uint load_size) const {
822 if (_version != AOT_CODE_VERSION) {
823 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
824 return false;
825 }
826 if (load_size < _cache_size) {
827 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
828 return false;
829 }
830 return true;
831 }
832
833 volatile int AOTCodeCache::_nmethod_readers = 0;
834
835 AOTCodeCache* AOTCodeCache::open_for_use() {
836 if (AOTCodeCache::is_on_for_use()) {
837 return AOTCodeCache::cache();
838 }
839 return nullptr;
840 }
841
842 AOTCodeCache* AOTCodeCache::open_for_dump() {
843 if (AOTCodeCache::is_on_for_dump()) {
844 AOTCodeCache* cache = AOTCodeCache::cache();
845 cache->clear_lookup_failed(); // Reset bit
846 return cache;
847 }
848 return nullptr;
849 }
850
851 bool AOTCodeCache::is_address_in_aot_cache(address p) {
852 AOTCodeCache* cache = open_for_use();
853 if (cache == nullptr) {
854 return false;
855 }
856 if ((p >= (address)cache->cache_buffer()) &&
857 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
858 return true;
859 }
860 return false;
861 }
862
863 static void copy_bytes(const char* from, address to, uint size) {
864 assert((int)size > 0, "sanity");
865 memcpy(to, from, size);
866 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
867 }
868
869 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
870 _cache = cache;
871 _entry = entry;
872 _load_buffer = cache->cache_buffer();
873 _read_position = 0;
874 if (task != nullptr) {
875 _compile_id = task->compile_id();
876 _comp_level = task->comp_level();
877 _preload = task->preload();
878 } else {
879 _compile_id = 0;
880 _comp_level = 0;
881 _preload = false;
882 }
883 _lookup_failed = false;
884 }
885
886 void AOTCodeReader::set_read_position(uint pos) {
887 if (pos == _read_position) {
888 return;
889 }
890 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
891 _read_position = pos;
892 }
893
894 bool AOTCodeCache::set_write_position(uint pos) {
895 if (pos == _write_position) {
896 return true;
897 }
898 if (_store_size < _write_position) {
899 _store_size = _write_position; // Adjust during write
900 }
901 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
902 _write_position = pos;
903 return true;
904 }
905
906 static char align_buffer[256] = { 0 };
907
908 bool AOTCodeCache::align_write() {
909 // We are not executing code from cache - we copy it by bytes first.
910 // No need for big alignment (or at all).
911 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
912 if (padding == DATA_ALIGNMENT) {
913 return true;
914 }
915 uint n = write_bytes((const void*)&align_buffer, padding);
916 if (n != padding) {
917 return false;
918 }
919 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
920 return true;
921 }
922
923 // Check to see if AOT code cache has required space to store "nbytes" of data
924 address AOTCodeCache::reserve_bytes(uint nbytes) {
925 assert(for_dump(), "Code Cache file is not created");
926 uint new_position = _write_position + nbytes;
927 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
928 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
929 nbytes, _write_position);
930 set_failed();
931 report_store_failure();
932 return nullptr;
933 }
934 address buffer = (address)(_store_buffer + _write_position);
935 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
936 _write_position += nbytes;
937 if (_store_size < _write_position) {
938 _store_size = _write_position;
939 }
940 return buffer;
941 }
942
943 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
944 assert(for_dump(), "Code Cache file is not created");
945 if (nbytes == 0) {
946 return 0;
947 }
948 uint new_position = _write_position + nbytes;
949 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
950 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
951 nbytes, _write_position);
952 set_failed();
953 report_store_failure();
954 return 0;
955 }
956 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
957 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
958 _write_position += nbytes;
959 if (_store_size < _write_position) {
960 _store_size = _write_position;
961 }
962 return nbytes;
963 }
964
965 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
966 assert(is_using_code(), "AOT code caching should be enabled");
967 if (!method->in_aot_cache()) {
968 return nullptr;
969 }
970
971 MethodCounters* mc = method->method_counters();
972 if (mc != nullptr && mc->aot_code_recompile_requested()) {
973 return nullptr; // Already requested JIT compilation
974 }
975
976 switch (comp_level) {
977 case CompLevel_simple:
978 if ((DisableAOTCode & (1 << 0)) != 0) {
979 return nullptr;
980 }
981 break;
982 case CompLevel_limited_profile:
983 if ((DisableAOTCode & (1 << 1)) != 0) {
984 return nullptr;
985 }
986 break;
987 case CompLevel_full_optimization:
988 if ((DisableAOTCode & (1 << 2)) != 0) {
989 return nullptr;
990 }
991 break;
992
993 default: return nullptr; // Level 1, 2, and 4 only
994 }
995 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
996 if (is_on() && _cache->cache_buffer() != nullptr) {
997 uint id = AOTCacheAccess::convert_method_to_offset(method());
998 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Nmethod, id, comp_level);
999 if (entry == nullptr) {
1000 LogStreamHandle(Info, aot, codecache, nmethod) log;
1001 if (log.is_enabled()) {
1002 ResourceMark rm;
1003 const char* target_name = method->name_and_sig_as_C_string();
1004 log.print("Missing entry for '%s' (comp_level %d, id: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, id);
1005 }
1006 #ifdef ASSERT
1007 } else {
1008 assert(!entry->has_clinit_barriers(), "only preload code should have clinit barriers");
1009 ResourceMark rm;
1010 assert(method() == entry->method(), "AOTCodeCache: saved nmethod's method %p (name: %s id: " UINT32_FORMAT_X_0
1011 ") is different from the method %p (name: %s, id: " UINT32_FORMAT_X_0 " being looked up" ,
1012 entry->method(), entry->method()->name_and_sig_as_C_string(), entry->id(), method(), method()->name_and_sig_as_C_string(), id);
1013 #endif
1014 }
1015
1016 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
1017 if (directives->IgnoreAOTCompiledOption || directives->ExcludeOption) {
1018 LogStreamHandle(Info, aot, codecache, compilation) log;
1019 if (log.is_enabled()) {
1020 log.print("Ignore AOT code entry on level %d for ", comp_level);
1021 method->print_value_on(&log);
1022 }
1023 return nullptr;
1024 }
1025
1026 return entry;
1027 }
1028 return nullptr;
1029 }
1030
1031 Method* AOTCodeEntry::method() {
1032 assert(_kind == Nmethod, "invalid kind %d", _kind);
1033 assert(AOTCodeCache::is_on_for_use(), "must be");
1034 return AOTCacheAccess::convert_offset_to_method(_id);
1035 }
1036
1037 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
1038 return (void*)(cache->add_entry());
1039 }
1040
1041 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
1042 if (entry->kind() == kind) {
1043 assert(entry->id() == id, "sanity");
1044 if (kind != AOTCodeEntry::Nmethod || // addapters and stubs have only one version
1045 // Look only for normal AOT code entry, preload code is handled separately
1046 (!entry->not_entrant() && (entry->comp_level() == comp_level))) {
1047 return true; // Found
1048 }
1049 }
1050 return false;
1051 }
1052
1053 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
1054 assert(_for_use, "sanity");
1055 uint count = _load_header->entries_count();
1056 if (_load_entries == nullptr) {
1057 // Read it
1058 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
1059 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
1060 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1061 }
1062 // Binary search
1063 int l = 0;
1064 int h = count - 1;
1065 while (l <= h) {
1066 int mid = (l + h) >> 1;
1067 int ix = mid * 2;
1068 uint is = _search_entries[ix];
1069 if (is == id) {
1070 int index = _search_entries[ix + 1];
1071 AOTCodeEntry* entry = &(_load_entries[index]);
1072 if (check_entry(kind, id, comp_level, entry)) {
1073 return entry; // Found
1074 }
1075 // Leaner search around
1076 for (int i = mid - 1; i >= l; i--) { // search back
1077 ix = i * 2;
1078 is = _search_entries[ix];
1079 if (is != id) {
1080 break;
1081 }
1082 index = _search_entries[ix + 1];
1083 AOTCodeEntry* entry = &(_load_entries[index]);
1084 if (check_entry(kind, id, comp_level, entry)) {
1085 return entry; // Found
1086 }
1087 }
1088 for (int i = mid + 1; i <= h; i++) { // search forward
1089 ix = i * 2;
1090 is = _search_entries[ix];
1091 if (is != id) {
1092 break;
1093 }
1094 index = _search_entries[ix + 1];
1095 AOTCodeEntry* entry = &(_load_entries[index]);
1096 if (check_entry(kind, id, comp_level, entry)) {
1097 return entry; // Found
1098 }
1099 }
1100 break; // No match found
1101 } else if (is < id) {
1102 l = mid + 1;
1103 } else {
1104 h = mid - 1;
1105 }
1106 }
1107 return nullptr;
1108 }
1109
1110 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1111 assert(entry!= nullptr, "all entries should be read already");
1112 if (entry->not_entrant()) {
1113 return; // Someone invalidated it already
1114 }
1115 #ifdef ASSERT
1116 assert(_load_entries != nullptr, "sanity");
1117 {
1118 uint name_offset = entry->offset() + entry->name_offset();
1119 const char* name = _load_buffer + name_offset;;
1120 uint level = entry->comp_level();
1121 uint comp_id = entry->comp_id();
1122 bool for_preload = entry->for_preload();
1123 bool clinit_brs = entry->has_clinit_barriers();
1124 log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1125 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1126 }
1127 assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1128 bool found = false;
1129 uint i = 0;
1130 uint count = 0;
1131 if (entry->for_preload()) {
1132 count = _load_header->preload_entries_count();
1133 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1134 for (; i < count; i++) {
1135 if (entry == &preload_entry[i]) {
1136 break;
1137 }
1138 }
1139 } else {
1140 count = _load_header->entries_count();
1141 for(; i < count; i++) {
1142 if (entry == &(_load_entries[i])) {
1143 break;
1144 }
1145 }
1146 }
1147 found = (i < count);
1148 assert(found, "entry should exist");
1149 #endif
1150 entry->set_not_entrant();
1151 uint name_offset = entry->offset() + entry->name_offset();
1152 const char* name = _load_buffer + name_offset;;
1153 uint level = entry->comp_level();
1154 uint comp_id = entry->comp_id();
1155 bool for_preload = entry->for_preload();
1156 bool clinit_brs = entry->has_clinit_barriers();
1157 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1158 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1159
1160 if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1161 // Invalidate preload code if normal AOT C2 code is invalidated,
1162 // most likely because some dependencies changed during run.
1163 // We can still use normal AOT code if preload code is
1164 // invalidated - normal AOT code has less restrictions.
1165 Method* method = entry->method();
1166 MethodCounters* mc = entry->method()->method_counters();
1167 if (mc != nullptr && mc->aot_preload_code_entry() != nullptr) {
1168 AOTCodeEntry* preload_entry = mc->aot_preload_code_entry();
1169 if (preload_entry != nullptr) {
1170 assert(preload_entry->for_preload(), "expecting only such entries here");
1171 invalidate_entry(preload_entry);
1172 }
1173 }
1174 }
1175 }
1176
1177 static int uint_cmp(const void *i, const void *j) {
1178 uint a = *(uint *)i;
1179 uint b = *(uint *)j;
1180 return a > b ? 1 : a < b ? -1 : 0;
1181 }
1182
1183 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1184 uint* size_ptr = (uint *)buffer;
1185 *size_ptr = buffer_size;
1186 buffer += sizeof(uint);
1187
1188 VM_Version::store_cpu_features(buffer);
1189 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1190 buffer += buffer_size;
1191 buffer = align_up(buffer, DATA_ALIGNMENT);
1192 }
1193
1194 bool AOTCodeCache::finish_write() {
1195 if (!align_write()) {
1196 return false;
1197 }
1198 // End of AOT code
1199 uint code_size = _write_position;
1200 uint strings_offset = code_size;
1201 int strings_count = store_strings();
1202 if (strings_count < 0) {
1203 return false;
1204 }
1205 if (!align_write()) {
1206 return false;
1207 }
1208 uint strings_size = _write_position - strings_offset;
1209
1210 uint code_count = _store_entries_cnt;
1211 if (code_count > 0) {
1212 _aot_code_directory = CachedCodeDirectory::create();
1213 assert(_aot_code_directory != nullptr, "Sanity check");
1214
1215 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1216 uint search_count = code_count * 2;
1217 uint search_size = search_count * sizeof(uint);
1218 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1219 // _write_position should include code and strings
1220 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1221 uint cpu_features_size = VM_Version::cpu_features_size();
1222 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1223 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
1224 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1225 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1226
1227 // Allocate in AOT Cache buffer
1228 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1229 char* start = align_up(buffer, DATA_ALIGNMENT);
1230 char* current = start + header_size; // Skip header
1231
1232 uint cpu_features_offset = current - start;
1233 store_cpu_features(current, cpu_features_size);
1234 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1235 assert(current < start + total_size, "sanity check");
1236
1237 // Create ordered search table for entries [id, index];
1238 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1239
1240 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1241 AOTCodeStats stats;
1242 uint max_size = 0;
1243 // AOTCodeEntry entries were allocated in reverse in store buffer.
1244 // Process them in reverse order to cache first code first.
1245
1246 // Store AOTCodeEntry-s for preload code
1247 current = align_up(current, DATA_ALIGNMENT);
1248 uint preload_entries_cnt = 0;
1249 uint preload_entries_offset = current - start;
1250 AOTCodeEntry* preload_entries = (AOTCodeEntry*)current;
1251 for (int i = code_count - 1; i >= 0; i--) {
1252 AOTCodeEntry* entry = &entries_address[i];
1253 if (entry->load_fail()) {
1254 continue;
1255 }
1256 if (entry->for_preload()) {
1257 if (entry->not_entrant()) {
1258 // Skip not entrant preload code:
1259 // we can't pre-load code which may have failing dependencies.
1260 log_info(aot, codecache, exit)("Skip not entrant preload code comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1261 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1262 } else {
1263 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1264 stats.collect_entry_stats(entry);
1265 current += sizeof(AOTCodeEntry);
1266 preload_entries_cnt++;
1267 }
1268 }
1269 }
1270
1271 // Now write the data for preload AOTCodeEntry
1272 for (int i = 0; i < (int)preload_entries_cnt; i++) {
1273 AOTCodeEntry* entry = &preload_entries[i];
1274 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1275 if (size > max_size) {
1276 max_size = size;
1277 }
1278 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1279 entry->set_offset(current - start); // New offset
1280 current += size;
1281 }
1282
1283 current = align_up(current, DATA_ALIGNMENT);
1284 uint entries_count = 0;
1285 uint new_entries_offset = current - start;
1286 AOTCodeEntry* code_entries = (AOTCodeEntry*)current;
1287 // Now scan normal entries
1288 for (int i = code_count - 1; i >= 0; i--) {
1289 AOTCodeEntry* entry = &entries_address[i];
1290 if (entry->load_fail() || entry->for_preload()) {
1291 continue;
1292 }
1293 if (entry->not_entrant()) {
1294 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1295 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1296 entry->set_entrant(); // Reset
1297 }
1298 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1299 stats.collect_entry_stats(entry);
1300 current += sizeof(AOTCodeEntry);
1301 search[entries_count*2 + 0] = entry->id();
1302 search[entries_count*2 + 1] = entries_count;
1303 entries_count++;
1304 }
1305
1306 // Now write the data for normal AOTCodeEntry
1307 for (int i = 0; i < (int)entries_count; i++) {
1308 AOTCodeEntry* entry = &code_entries[i];
1309 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1310 if (size > max_size) {
1311 max_size = size;
1312 }
1313 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1314 entry->set_offset(current - start); // New offset
1315 current += size;
1316 }
1317
1318 if (preload_entries_cnt == 0 && entries_count == 0) {
1319 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entries");
1320 FREE_C_HEAP_ARRAY(uint, search);
1321 return true; // Nothing to write
1322 }
1323 uint total_entries_cnt = preload_entries_cnt + entries_count;
1324 assert(total_entries_cnt <= code_count, "%d > %d", total_entries_cnt, code_count);
1325 // Write strings
1326 if (strings_count > 0) {
1327 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1328 strings_offset = (current - start); // New offset
1329 current += strings_size;
1330 }
1331
1332 uint search_table_offset = current - start;
1333 // Sort and store search table
1334 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1335 search_size = 2 * entries_count * sizeof(uint);
1336 copy_bytes((const char*)search, (address)current, search_size);
1337 FREE_C_HEAP_ARRAY(uint, search);
1338 current += search_size;
1339
1340 log_stats_on_exit(stats);
1341
1342 uint size = (current - start);
1343 assert(size <= total_size, "%d > %d", size , total_size);
1344 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes", size);
1345 log_debug(aot, codecache, exit)(" header size: %u", header_size);
1346 log_debug(aot, codecache, exit)(" total code size: %u (max code's size: %u)", code_size, max_size);
1347 log_debug(aot, codecache, exit)(" entries size: %u", entries_size);
1348 log_debug(aot, codecache, exit)(" entry search table: %u", search_size);
1349 log_debug(aot, codecache, exit)(" C strings size: %u", strings_size);
1350 log_debug(aot, codecache, exit)(" CPU features data: %u", total_cpu_features_size);
1351
1352 // Finalize header
1353 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1354 header->init(size, (uint)strings_count, strings_offset,
1355 entries_count, search_table_offset, new_entries_offset,
1356 preload_entries_cnt, preload_entries_offset,
1357 stats.entry_count(AOTCodeEntry::Adapter), stats.entry_count(AOTCodeEntry::SharedBlob),
1358 stats.entry_count(AOTCodeEntry::C1Blob), stats.entry_count(AOTCodeEntry::C2Blob),
1359 stats.entry_count(AOTCodeEntry::Stub), cpu_features_offset);
1360
1361 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", total_entries_cnt);
1362
1363 _aot_code_directory->set_aot_code_data(size, start);
1364 }
1365 return true;
1366 }
1367
1368 //------------------Store/Load AOT code ----------------------
1369
1370 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1371 AOTCodeCache* cache = open_for_dump();
1372 if (cache == nullptr) {
1373 return false;
1374 }
1375 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1376
1377 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1378 return false;
1379 }
1380 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1381 return false;
1382 }
1383 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1384
1385 #ifdef ASSERT
1386 LogStreamHandle(Trace, aot, codecache, stubs) log;
1387 if (log.is_enabled()) {
1388 FlagSetting fs(PrintRelocations, true);
1389 blob.print_on(&log);
1390 }
1391 #endif
1392 // we need to take a lock to prevent race between compiler threads generating AOT code
1393 // and the main thread generating adapter
1394 MutexLocker ml(Compile_lock);
1395 if (!is_on()) {
1396 return false; // AOT code cache was already dumped and closed.
1397 }
1398 if (!cache->align_write()) {
1399 return false;
1400 }
1401 uint entry_position = cache->_write_position;
1402
1403 // Write name
1404 uint name_offset = cache->_write_position - entry_position;
1405 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1406 uint n = cache->write_bytes(name, name_size);
1407 if (n != name_size) {
1408 return false;
1409 }
1410
1411 // Write CodeBlob
1412 if (!cache->align_write()) {
1413 return false;
1414 }
1415 uint blob_offset = cache->_write_position - entry_position;
1416 address archive_buffer = cache->reserve_bytes(blob.size());
1417 if (archive_buffer == nullptr) {
1418 return false;
1419 }
1420 CodeBlob::archive_blob(&blob, archive_buffer);
1421
1422 uint reloc_data_size = blob.relocation_size();
1423 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1424 if (n != reloc_data_size) {
1425 return false;
1426 }
1427
1428 bool has_oop_maps = false;
1429 if (blob.oop_maps() != nullptr) {
1430 if (!cache->write_oop_map_set(blob)) {
1431 return false;
1432 }
1433 has_oop_maps = true;
1434 }
1435
1436 #ifndef PRODUCT
1437 // Write asm remarks
1438 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1439 return false;
1440 }
1441 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1442 return false;
1443 }
1444 #endif /* PRODUCT */
1445
1446 if (!cache->write_relocations(blob)) {
1447 if (!cache->failed()) {
1448 // We may miss an address in AOT table - skip this code blob.
1449 cache->set_write_position(entry_position);
1450 }
1451 return false;
1452 }
1453
1454 uint entry_size = cache->_write_position - entry_position;
1455 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1456 entry_position, entry_size, name_offset, name_size,
1457 blob_offset, has_oop_maps);
1458 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1459 return true;
1460 }
1461
1462 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1463 assert(AOTCodeEntry::is_blob(entry_kind),
1464 "wrong entry kind for blob id %s", StubInfo::name(id));
1465 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1466 }
1467
1468 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1469 AOTCodeCache* cache = open_for_use();
1470 if (cache == nullptr) {
1471 return nullptr;
1472 }
1473 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1474
1475 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1476 return nullptr;
1477 }
1478 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1479 return nullptr;
1480 }
1481 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1482
1483 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1484 if (entry == nullptr) {
1485 return nullptr;
1486 }
1487 AOTCodeReader reader(cache, entry, nullptr);
1488 CodeBlob* blob = reader.compile_code_blob(name);
1489
1490 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1491 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1492 return blob;
1493 }
1494
1495 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1496 assert(AOTCodeEntry::is_blob(entry_kind),
1497 "wrong entry kind for blob id %s", StubInfo::name(id));
1498 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1499 }
1500
1501 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1502 uint entry_position = _entry->offset();
1503
1504 // Read name
1505 uint name_offset = entry_position + _entry->name_offset();
1506 uint name_size = _entry->name_size(); // Includes '/0'
1507 const char* stored_name = addr(name_offset);
1508
1509 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1510 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1511 stored_name, name);
1512 set_lookup_failed(); // Skip this blob
1513 return nullptr;
1514 }
1515
1516 // Read archived code blob
1517 uint offset = entry_position + _entry->code_offset();
1518 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1519 offset += archived_blob->size();
1520
1521 address reloc_data = (address)addr(offset);
1522 offset += archived_blob->relocation_size();
1523 set_read_position(offset);
1524
1525 ImmutableOopMapSet* oop_maps = nullptr;
1526 if (_entry->has_oop_maps()) {
1527 oop_maps = read_oop_map_set();
1528 }
1529
1530 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1531 stored_name,
1532 reloc_data,
1533 oop_maps
1534 );
1535 if (code_blob == nullptr) { // no space left in CodeCache
1536 return nullptr;
1537 }
1538
1539 #ifndef PRODUCT
1540 code_blob->asm_remarks().init();
1541 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1542 code_blob->dbg_strings().init();
1543 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1544 #endif // PRODUCT
1545
1546 fix_relocations(code_blob);
1547
1548 #ifdef ASSERT
1549 LogStreamHandle(Trace, aot, codecache, stubs) log;
1550 if (log.is_enabled()) {
1551 FlagSetting fs(PrintRelocations, true);
1552 code_blob->print_on(&log);
1553 }
1554 #endif
1555 return code_blob;
1556 }
1557
1558 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1559 if (!is_dumping_stub()) {
1560 return false;
1561 }
1562 AOTCodeCache* cache = open_for_dump();
1563 if (cache == nullptr) {
1564 return false;
1565 }
1566 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1567 if (!cache->align_write()) {
1568 return false;
1569 }
1570 #ifdef ASSERT
1571 CodeSection* cs = cgen->assembler()->code_section();
1572 if (cs->has_locs()) {
1573 uint reloc_count = cs->locs_count();
1574 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1575 // Collect additional data
1576 RelocIterator iter(cs);
1577 while (iter.next()) {
1578 switch (iter.type()) {
1579 case relocInfo::none:
1580 break;
1581 default: {
1582 iter.print_current_on(tty);
1583 fatal("stub's relocation %d unimplemented", (int)iter.type());
1584 break;
1585 }
1586 }
1587 }
1588 }
1589 #endif
1590 uint entry_position = cache->_write_position;
1591
1592 // Write code
1593 uint code_offset = 0;
1594 uint code_size = cgen->assembler()->pc() - start;
1595 uint n = cache->write_bytes(start, code_size);
1596 if (n != code_size) {
1597 return false;
1598 }
1599 // Write name
1600 uint name_offset = cache->_write_position - entry_position;
1601 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1602 n = cache->write_bytes(name, name_size);
1603 if (n != name_size) {
1604 return false;
1605 }
1606 uint entry_size = cache->_write_position - entry_position;
1607 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1608 code_offset, code_size,
1609 AOTCodeEntry::Stub, (uint32_t)id);
1610 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1611 return true;
1612 }
1613
1614 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1615 if (!is_using_stub()) {
1616 return false;
1617 }
1618 assert(start == cgen->assembler()->pc(), "wrong buffer");
1619 AOTCodeCache* cache = open_for_use();
1620 if (cache == nullptr) {
1621 return false;
1622 }
1623 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1624 if (entry == nullptr) {
1625 return false;
1626 }
1627 uint entry_position = entry->offset();
1628 // Read name
1629 uint name_offset = entry->name_offset() + entry_position;
1630 uint name_size = entry->name_size(); // Includes '/0'
1631 const char* saved_name = cache->addr(name_offset);
1632 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1633 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1634 cache->set_failed();
1635 report_load_failure();
1636 return false;
1637 }
1638 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1639 // Read code
1640 uint code_offset = entry->code_offset() + entry_position;
1641 uint code_size = entry->code_size();
1642 copy_bytes(cache->addr(code_offset), start, code_size);
1643 cgen->assembler()->code_section()->set_end(start + code_size);
1644 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1645 return true;
1646 }
1647
1648 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1649 if (!is_dumping_code()) {
1650 return nullptr;
1651 }
1652 assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1653 AOTCodeCache* cache = open_for_dump();
1654 precond(cache != nullptr);
1655 precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1656 if (!compiler->is_c1() && !compiler->is_c2()) {
1657 // Only c1 and c2 compilers
1658 return nullptr;
1659 }
1660 int comp_level = nm->comp_level();
1661 if (comp_level == CompLevel_full_profile) {
1662 // Do not cache C1 compiles with full profile i.e. tier3
1663 return nullptr;
1664 }
1665 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1666
1667 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1668 AOTCodeEntry* entry = nullptr;
1669 entry = cache->write_nmethod(nm, for_preload);
1670 if (entry == nullptr) {
1671 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1672 }
1673 // Clean up fields which could be set here
1674 cache->_for_preload = false;
1675 cache->_has_clinit_barriers = false;
1676 return entry;
1677 }
1678
1679 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1680 AOTCodeCache* cache = open_for_dump();
1681 assert(cache != nullptr, "sanity check");
1682 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1683 uint comp_id = nm->compile_id();
1684 uint comp_level = nm->comp_level();
1685 Method* method = nm->method();
1686 if (!AOTCacheAccess::can_generate_aot_code(method)) {
1687 ResourceMark rm;
1688 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1689 assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1690 return nullptr;
1691 }
1692 InstanceKlass* holder = method->method_holder();
1693 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1694 if (!builtin_loader) {
1695 ResourceMark rm;
1696 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1697 assert(builtin_loader, "sanity");
1698 return nullptr;
1699 }
1700
1701 _for_preload = for_preload;
1702 _has_clinit_barriers = nm->has_clinit_barriers();
1703 assert(!_has_clinit_barriers || _for_preload, "only preload code has clinit barriers");
1704
1705 if (!align_write()) {
1706 return nullptr;
1707 }
1708
1709 uint entry_position = _write_position;
1710
1711 // Write name
1712 uint name_offset = 0;
1713 uint name_size = 0;
1714 uint id = 0;
1715 uint n;
1716 {
1717 ResourceMark rm;
1718 const char* name = method->name_and_sig_as_C_string();
1719 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1720 comp_id, (int)comp_level, name, comp_level,
1721 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1722
1723 LogStreamHandle(Info, aot, codecache, loader) log;
1724 if (log.is_enabled()) {
1725 oop loader = holder->class_loader();
1726 oop domain = holder->protection_domain();
1727 log.print("Holder: ");
1728 holder->print_value_on(&log);
1729 log.print(" loader: ");
1730 if (loader == nullptr) {
1731 log.print("nullptr");
1732 } else {
1733 loader->print_value_on(&log);
1734 }
1735 log.print(" domain: ");
1736 if (domain == nullptr) {
1737 log.print("nullptr");
1738 } else {
1739 domain->print_value_on(&log);
1740 }
1741 log.cr();
1742 }
1743 name_offset = _write_position - entry_position;
1744 name_size = (uint)strlen(name) + 1; // Includes '/0'
1745 n = write_bytes(name, name_size);
1746 if (n != name_size) {
1747 return nullptr;
1748 }
1749 }
1750 id = AOTCacheAccess::delta_from_base_address((address)nm->method());
1751
1752 // Write CodeBlob
1753 if (!cache->align_write()) {
1754 return nullptr;
1755 }
1756 uint blob_offset = cache->_write_position - entry_position;
1757 address archive_buffer = cache->reserve_bytes(nm->size());
1758 if (archive_buffer == nullptr) {
1759 return nullptr;
1760 }
1761 CodeBlob::archive_blob(nm, archive_buffer);
1762
1763 uint reloc_data_size = nm->relocation_size();
1764 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1765 if (n != reloc_data_size) {
1766 return nullptr;
1767 }
1768
1769 // Write oops and metadata present in the nmethod's data region
1770 if (!write_oops(nm)) {
1771 if (lookup_failed() && !failed()) {
1772 // Skip this method and reposition file
1773 set_write_position(entry_position);
1774 }
1775 return nullptr;
1776 }
1777 if (!write_metadata(nm)) {
1778 if (lookup_failed() && !failed()) {
1779 // Skip this method and reposition file
1780 set_write_position(entry_position);
1781 }
1782 return nullptr;
1783 }
1784
1785 bool has_oop_maps = false;
1786 if (nm->oop_maps() != nullptr) {
1787 if (!cache->write_oop_map_set(*nm)) {
1788 return nullptr;
1789 }
1790 has_oop_maps = true;
1791 }
1792
1793 uint immutable_data_size = nm->immutable_data_size();
1794 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1795 if (n != immutable_data_size) {
1796 return nullptr;
1797 }
1798
1799 JavaThread* thread = JavaThread::current();
1800 HandleMark hm(thread);
1801 GrowableArray<Handle> oop_list;
1802 GrowableArray<Metadata*> metadata_list;
1803
1804 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1805 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1806 if (lookup_failed() && !failed()) {
1807 // Skip this method and reposition file
1808 set_write_position(entry_position);
1809 }
1810 return nullptr;
1811 }
1812
1813 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1814 return nullptr;
1815 }
1816
1817 #ifndef PRODUCT
1818 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1819 return nullptr;
1820 }
1821 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1822 return nullptr;
1823 }
1824 #endif /* PRODUCT */
1825
1826 uint entry_size = _write_position - entry_position;
1827 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Nmethod, id,
1828 entry_position, entry_size,
1829 name_offset, name_size,
1830 blob_offset, has_oop_maps,
1831 comp_level, comp_id,
1832 nm->has_clinit_barriers(), for_preload);
1833 {
1834 ResourceMark rm;
1835 const char* name = nm->method()->name_and_sig_as_C_string();
1836 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1837 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1838 }
1839 if (VerifyAOTCode) {
1840 return nullptr;
1841 }
1842 return entry;
1843 }
1844
1845 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1846 if (!is_using_code()) {
1847 return false;
1848 }
1849 AOTCodeCache* cache = open_for_use();
1850 if (cache == nullptr) {
1851 return false;
1852 }
1853 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1854 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1855 CompileTask* task = env->task();
1856 task->mark_aot_load_start(os::elapsed_counter());
1857 AOTCodeEntry* entry = task->aot_code_entry();
1858 bool preload = task->preload();
1859 assert(entry != nullptr, "sanity");
1860 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1861 VM_ENTRY_MARK;
1862 ResourceMark rm;
1863 methodHandle method(THREAD, target->get_Method());
1864 const char* target_name = method->name_and_sig_as_C_string();
1865 uint id = AOTCacheAccess::convert_method_to_offset(method());
1866 bool clinit_brs = entry->has_clinit_barriers();
1867 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (id: " UINT32_FORMAT_X_0 "%s)",
1868 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1869 target_name, id, (clinit_brs ? ", has clinit barriers" : ""));
1870 }
1871 ReadingMark rdmk;
1872 if (rdmk.failed()) {
1873 // Cache is closed, cannot touch anything.
1874 return false;
1875 }
1876
1877 AOTCodeReader reader(cache, entry, task);
1878 bool success = reader.compile_nmethod(env, target, compiler);
1879 if (success) {
1880 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1881 } else {
1882 entry->set_load_fail();
1883 entry->set_not_entrant();
1884 }
1885 task->mark_aot_load_finish(os::elapsed_counter());
1886 return success;
1887 }
1888
1889 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1890 CompileTask* task = env->task();
1891 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1892 nmethod* nm = nullptr;
1893
1894 uint entry_position = aot_code_entry->offset();
1895 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1896 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1897 set_read_position(archived_nm_offset + archived_nm->size());
1898
1899 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1900 env->set_oop_recorder(oop_recorder);
1901
1902 uint offset;
1903
1904 offset = read_position();
1905 address reloc_data = (address)addr(offset);
1906 offset += archived_nm->relocation_size();
1907 set_read_position(offset);
1908
1909 // Read oops and metadata
1910 VM_ENTRY_MARK
1911 GrowableArray<Handle> oop_list;
1912 GrowableArray<Metadata*> metadata_list;
1913
1914 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1915 return false;
1916 }
1917
1918 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1919
1920 offset = read_position();
1921 address immutable_data = (address)addr(offset);
1922 offset += archived_nm->immutable_data_size();
1923 set_read_position(offset);
1924
1925 GrowableArray<Handle> reloc_immediate_oop_list;
1926 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1927 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1928 return false;
1929 }
1930
1931 // Read Dependencies (compressed already)
1932 Dependencies* dependencies = new Dependencies(env);
1933 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1934 env->set_dependencies(dependencies);
1935
1936 const char* name = addr(entry_position + aot_code_entry->name_offset());
1937
1938 if (VerifyAOTCode) {
1939 return false;
1940 }
1941
1942 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1943 nm = env->register_aot_method(THREAD,
1944 target,
1945 compiler,
1946 archived_nm,
1947 reloc_data,
1948 oop_list,
1949 metadata_list,
1950 oopmaps,
1951 immutable_data,
1952 reloc_immediate_oop_list,
1953 reloc_immediate_metadata_list,
1954 this);
1955 bool success = task->is_success();
1956 if (success) {
1957 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1958 #ifdef ASSERT
1959 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1960 if (log.is_enabled()) {
1961 FlagSetting fs(PrintRelocations, true);
1962 nm->print_on(&log);
1963 nm->decode2(&log);
1964 }
1965 #endif
1966 }
1967
1968 return success;
1969 }
1970
1971 bool skip_preload(methodHandle mh) {
1972 if (!mh->method_holder()->is_loaded()) {
1973 return true;
1974 }
1975 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1976 if (directives->DontPreloadOption || directives->ExcludeOption) {
1977 LogStreamHandle(Info, aot, codecache, init) log;
1978 if (log.is_enabled()) {
1979 log.print("Exclude preloading code for ");
1980 mh->print_value_on(&log);
1981 }
1982 return true;
1983 }
1984 return false;
1985 }
1986
1987 void AOTCodeCache::preload_code(JavaThread* thread) {
1988 if (!is_using_code()) {
1989 return;
1990 }
1991 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
1992 if (comp == nullptr) {
1993 log_debug(aot, codecache, init)("AOT preload code skipped: C2 compiler disabled");
1994 return;
1995 }
1996
1997 if ((DisableAOTCode & (1 << 3)) != 0) {
1998 return; // no preloaded code (level 5);
1999 }
2000 _cache->preload_aot_code(thread);
2001 }
2002
2003 void AOTCodeCache::preload_aot_code(TRAPS) {
2004 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
2005 // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
2006 // level we want (that is CompLevel_full_optimization).
2007 return;
2008 }
2009 TraceTime t1("Total time to preload AOT code", &_t_totalPreload, enable_timers(), false);
2010 assert(_for_use, "sanity");
2011 uint count = _load_header->entries_count();
2012 uint preload_entries_count = _load_header->preload_entries_count();
2013 if (preload_entries_count > 0) {
2014 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
2015 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
2016 uint count = MIN2(preload_entries_count, AOTCodePreloadStop);
2017 for (uint i = AOTCodePreloadStart; i < count; i++) {
2018 AOTCodeEntry* entry = &preload_entry[i];
2019 if (entry->not_entrant()) {
2020 continue;
2021 }
2022 methodHandle mh(THREAD, entry->method());
2023 assert((mh.not_null() && AOTMetaspace::in_aot_cache((address)mh())), "sanity");
2024 if (skip_preload(mh)) {
2025 continue; // Exclude preloading for this method
2026 }
2027 assert(mh->method_holder()->is_loaded(), "");
2028 if (!mh->method_holder()->is_linked()) {
2029 ResourceMark rm;
2030 log_debug(aot, codecache, init)("Preload AOT code for %s skipped: method holder is not linked",
2031 mh->name_and_sig_as_C_string());
2032 continue; // skip
2033 }
2034 CompileBroker::preload_aot_method(mh, entry, CHECK);
2035 }
2036 }
2037 }
2038
2039 // ------------ process code and data --------------
2040
2041 // Can't use -1. It is valid value for jump to iteself destination
2042 // used by static call stub: see NativeJump::jump_destination().
2043 #define BAD_ADDRESS_ID -2
2044
2045 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2046 GrowableArray<uint> reloc_data;
2047 RelocIterator iter(&code_blob);
2048 LogStreamHandle(Trace, aot, codecache, reloc) log;
2049 while (iter.next()) {
2050 int idx = reloc_data.append(0); // default value
2051 switch (iter.type()) {
2052 case relocInfo::none:
2053 break;
2054 case relocInfo::oop_type: {
2055 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2056 if (r->oop_is_immediate()) {
2057 assert(oop_list != nullptr, "sanity check");
2058 // store index of oop in the reloc immediate oop list
2059 Handle h(JavaThread::current(), r->oop_value());
2060 int oop_idx = oop_list->find(h);
2061 assert(oop_idx != -1, "sanity check");
2062 reloc_data.at_put(idx, (uint)oop_idx);
2063 }
2064 break;
2065 }
2066 case relocInfo::metadata_type: {
2067 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2068 if (r->metadata_is_immediate()) {
2069 assert(metadata_list != nullptr, "sanity check");
2070 // store index of metadata in the reloc immediate metadata list
2071 int metadata_idx = metadata_list->find(r->metadata_value());
2072 assert(metadata_idx != -1, "sanity check");
2073 reloc_data.at_put(idx, (uint)metadata_idx);
2074 }
2075 break;
2076 }
2077 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2078 case relocInfo::opt_virtual_call_type:
2079 case relocInfo::static_call_type: {
2080 CallRelocation* r = (CallRelocation*)iter.reloc();
2081 address dest = r->destination();
2082 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2083 dest = (address)-1; // do nothing in this case when loading this relocation
2084 }
2085 int id = _table->id_for_address(dest, iter, &code_blob);
2086 if (id == BAD_ADDRESS_ID) {
2087 return false;
2088 }
2089 reloc_data.at_put(idx, id);
2090 break;
2091 }
2092 case relocInfo::trampoline_stub_type: {
2093 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2094 int id = _table->id_for_address(dest, iter, &code_blob);
2095 if (id == BAD_ADDRESS_ID) {
2096 return false;
2097 }
2098 reloc_data.at_put(idx, id);
2099 break;
2100 }
2101 case relocInfo::static_stub_type:
2102 break;
2103 case relocInfo::runtime_call_type: {
2104 // Record offset of runtime destination
2105 CallRelocation* r = (CallRelocation*)iter.reloc();
2106 address dest = r->destination();
2107 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2108 dest = (address)-1; // do nothing in this case when loading this relocation
2109 }
2110 int id = _table->id_for_address(dest, iter, &code_blob);
2111 if (id == BAD_ADDRESS_ID) {
2112 return false;
2113 }
2114 reloc_data.at_put(idx, id);
2115 break;
2116 }
2117 case relocInfo::runtime_call_w_cp_type:
2118 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2119 return false;
2120 case relocInfo::external_word_type: {
2121 // Record offset of runtime target
2122 address target = ((external_word_Relocation*)iter.reloc())->target();
2123 int id = _table->id_for_address(target, iter, &code_blob);
2124 if (id == BAD_ADDRESS_ID) {
2125 return false;
2126 }
2127 reloc_data.at_put(idx, id);
2128 break;
2129 }
2130 case relocInfo::internal_word_type: {
2131 address target = ((internal_word_Relocation*)iter.reloc())->target();
2132 // assert to make sure that delta fits into 32 bits
2133 assert(CodeCache::contains((void *)target), "Wrong internal_word_type relocation");
2134 uint delta = (uint)(target - code_blob.content_begin());
2135 reloc_data.at_put(idx, delta);
2136 break;
2137 }
2138 case relocInfo::section_word_type: {
2139 address target = ((section_word_Relocation*)iter.reloc())->target();
2140 assert(CodeCache::contains((void *)target), "Wrong section_word_type relocation");
2141 uint delta = (uint)(target - code_blob.content_begin());
2142 reloc_data.at_put(idx, delta);
2143 break;
2144 }
2145 case relocInfo::poll_type:
2146 break;
2147 case relocInfo::poll_return_type:
2148 break;
2149 case relocInfo::post_call_nop_type:
2150 break;
2151 case relocInfo::entry_guard_type:
2152 break;
2153 default:
2154 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2155 return false;
2156 break;
2157 }
2158 if (log.is_enabled()) {
2159 iter.print_current_on(&log);
2160 }
2161 }
2162
2163 // Write additional relocation data: uint per relocation
2164 // Write the count first
2165 int count = reloc_data.length();
2166 write_bytes(&count, sizeof(int));
2167 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2168 iter != reloc_data.end(); ++iter) {
2169 uint value = *iter;
2170 int n = write_bytes(&value, sizeof(uint));
2171 if (n != sizeof(uint)) {
2172 return false;
2173 }
2174 }
2175 return true;
2176 }
2177
2178 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2179 LogStreamHandle(Trace, aot, reloc) log;
2180 uint offset = read_position();
2181 int count = *(int*)addr(offset);
2182 offset += sizeof(int);
2183 if (log.is_enabled()) {
2184 log.print_cr("======== extra relocations count=%d", count);
2185 }
2186 uint* reloc_data = (uint*)addr(offset);
2187 offset += (count * sizeof(uint));
2188 set_read_position(offset);
2189
2190 RelocIterator iter(code_blob);
2191 int j = 0;
2192 while (iter.next()) {
2193 switch (iter.type()) {
2194 case relocInfo::none:
2195 break;
2196 case relocInfo::oop_type: {
2197 assert(code_blob->is_nmethod(), "sanity check");
2198 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2199 if (r->oop_is_immediate()) {
2200 assert(oop_list != nullptr, "sanity check");
2201 Handle h = oop_list->at(reloc_data[j]);
2202 r->set_value(cast_from_oop<address>(h()));
2203 } else {
2204 r->fix_oop_relocation();
2205 }
2206 break;
2207 }
2208 case relocInfo::metadata_type: {
2209 assert(code_blob->is_nmethod(), "sanity check");
2210 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2211 Metadata* m;
2212 if (r->metadata_is_immediate()) {
2213 assert(metadata_list != nullptr, "sanity check");
2214 m = metadata_list->at(reloc_data[j]);
2215 } else {
2216 // Get already updated value from nmethod.
2217 int index = r->metadata_index();
2218 m = code_blob->as_nmethod()->metadata_at(index);
2219 }
2220 r->set_value((address)m);
2221 break;
2222 }
2223 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2224 case relocInfo::opt_virtual_call_type:
2225 case relocInfo::static_call_type: {
2226 address dest = _cache->address_for_id(reloc_data[j]);
2227 if (dest != (address)-1) {
2228 ((CallRelocation*)iter.reloc())->set_destination(dest);
2229 }
2230 break;
2231 }
2232 case relocInfo::trampoline_stub_type: {
2233 address dest = _cache->address_for_id(reloc_data[j]);
2234 if (dest != (address)-1) {
2235 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2236 }
2237 break;
2238 }
2239 case relocInfo::static_stub_type:
2240 break;
2241 case relocInfo::runtime_call_type: {
2242 address dest = _cache->address_for_id(reloc_data[j]);
2243 if (dest != (address)-1) {
2244 ((CallRelocation*)iter.reloc())->set_destination(dest);
2245 }
2246 break;
2247 }
2248 case relocInfo::runtime_call_w_cp_type:
2249 // this relocation should not be in cache (see write_relocations)
2250 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2251 break;
2252 case relocInfo::external_word_type: {
2253 address target = _cache->address_for_id(reloc_data[j]);
2254 // Add external address to global table
2255 int index = ExternalsRecorder::find_index(target);
2256 // Update index in relocation
2257 Relocation::add_jint(iter.data(), index);
2258 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2259 assert(reloc->target() == target, "sanity");
2260 reloc->set_value(target); // Patch address in the code
2261 break;
2262 }
2263 case relocInfo::internal_word_type: {
2264 uint delta = reloc_data[j];
2265 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2266 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2267 break;
2268 }
2269 case relocInfo::section_word_type: {
2270 uint delta = reloc_data[j];
2271 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2272 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2273 break;
2274 }
2275 case relocInfo::poll_type:
2276 break;
2277 case relocInfo::poll_return_type:
2278 break;
2279 case relocInfo::post_call_nop_type:
2280 break;
2281 case relocInfo::entry_guard_type:
2282 break;
2283 default:
2284 assert(false,"relocation %d unimplemented", (int)iter.type());
2285 break;
2286 }
2287 if (log.is_enabled()) {
2288 iter.print_current_on(&log);
2289 }
2290 j++;
2291 }
2292 assert(j == count, "sanity");
2293 }
2294
2295 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2296 int count = oop_list.length();
2297 if (!write_bytes(&count, sizeof(int))) {
2298 return false;
2299 }
2300 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2301 iter != oop_list.end(); ++iter) {
2302 Handle h = *iter;
2303 if (!write_oop(h())) {
2304 return false;
2305 }
2306 }
2307
2308 count = metadata_list.length();
2309 if (!write_bytes(&count, sizeof(int))) {
2310 return false;
2311 }
2312 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2313 iter != metadata_list.end(); ++iter) {
2314 Metadata* m = *iter;
2315 if (!write_metadata(m)) {
2316 return false;
2317 }
2318 }
2319 return true;
2320 }
2321
2322 bool AOTCodeCache::write_metadata(nmethod* nm) {
2323 int count = nm->metadata_count()-1;
2324 if (!write_bytes(&count, sizeof(int))) {
2325 return false;
2326 }
2327 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2328 if (!write_metadata(*p)) {
2329 return false;
2330 }
2331 }
2332 return true;
2333 }
2334
2335 bool AOTCodeCache::write_metadata(Metadata* m) {
2336 uint n = 0;
2337 if (m == nullptr) {
2338 DataKind kind = DataKind::Null;
2339 n = write_bytes(&kind, sizeof(int));
2340 if (n != sizeof(int)) {
2341 return false;
2342 }
2343 } else if (m == (Metadata*)Universe::non_oop_word()) {
2344 DataKind kind = DataKind::No_Data;
2345 n = write_bytes(&kind, sizeof(int));
2346 if (n != sizeof(int)) {
2347 return false;
2348 }
2349 } else if (m->is_klass()) {
2350 if (!write_klass((Klass*)m)) {
2351 return false;
2352 }
2353 } else if (m->is_method()) {
2354 if (!write_method((Method*)m)) {
2355 return false;
2356 }
2357 } else if (m->is_methodCounters()) {
2358 DataKind kind = DataKind::MethodCnts;
2359 n = write_bytes(&kind, sizeof(int));
2360 if (n != sizeof(int)) {
2361 return false;
2362 }
2363 if (!write_method(((MethodCounters*)m)->method())) {
2364 return false;
2365 }
2366 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2367 } else { // Not supported
2368 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2369 return false;
2370 }
2371 return true;
2372 }
2373
2374 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2375 uint code_offset = read_position();
2376 Metadata* m = nullptr;
2377 DataKind kind = *(DataKind*)addr(code_offset);
2378 code_offset += sizeof(DataKind);
2379 set_read_position(code_offset);
2380 if (kind == DataKind::Null) {
2381 m = (Metadata*)nullptr;
2382 } else if (kind == DataKind::No_Data) {
2383 m = (Metadata*)Universe::non_oop_word();
2384 } else if (kind == DataKind::Klass) {
2385 m = (Metadata*)read_klass(comp_method);
2386 } else if (kind == DataKind::Method) {
2387 m = (Metadata*)read_method(comp_method);
2388 } else if (kind == DataKind::MethodCnts) {
2389 kind = *(DataKind*)addr(code_offset);
2390 code_offset += sizeof(DataKind);
2391 set_read_position(code_offset);
2392 m = (Metadata*)read_method(comp_method);
2393 if (m != nullptr) {
2394 Method* method = (Method*)m;
2395 m = method->get_method_counters(Thread::current());
2396 if (m == nullptr) {
2397 set_lookup_failed();
2398 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2399 } else {
2400 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2401 }
2402 }
2403 } else {
2404 set_lookup_failed();
2405 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2406 }
2407 return m;
2408 }
2409
2410 bool AOTCodeCache::write_method(Method* method) {
2411 ResourceMark rm; // To method's name printing
2412 if (AOTCacheAccess::can_generate_aot_code(method)) {
2413 DataKind kind = DataKind::Method;
2414 uint n = write_bytes(&kind, sizeof(int));
2415 if (n != sizeof(int)) {
2416 return false;
2417 }
2418 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2419 n = write_bytes(&method_offset, sizeof(uint));
2420 if (n != sizeof(uint)) {
2421 return false;
2422 }
2423 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2424 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2425 return true;
2426 }
2427 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2428 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2429 set_lookup_failed();
2430 return false;
2431 }
2432
2433 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2434 uint code_offset = read_position();
2435 uint method_offset = *(uint*)addr(code_offset);
2436 code_offset += sizeof(uint);
2437 set_read_position(code_offset);
2438 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2439 if (!AOTMetaspace::in_aot_cache((address)m)) {
2440 // Something changed in CDS
2441 set_lookup_failed();
2442 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2443 return nullptr;
2444 }
2445 assert(m->is_method(), "sanity");
2446 ResourceMark rm;
2447 Klass* k = m->method_holder();
2448 if (!k->is_instance_klass()) {
2449 set_lookup_failed();
2450 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2451 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2452 return nullptr;
2453 } else if (!AOTMetaspace::in_aot_cache((address)k)) {
2454 set_lookup_failed();
2455 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2456 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2457 return nullptr;
2458 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2459 set_lookup_failed();
2460 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2461 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2462 return nullptr;
2463 } else if (!InstanceKlass::cast(k)->is_linked()) {
2464 set_lookup_failed();
2465 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2466 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2467 return nullptr;
2468 }
2469 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2470 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2471 return m;
2472 }
2473
2474 bool AOTCodeCache::write_klass(Klass* klass) {
2475 uint array_dim = 0;
2476 if (klass->is_objArray_klass()) {
2477 array_dim = ObjArrayKlass::cast(klass)->dimension();
2478 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2479 }
2480 uint init_state = 0;
2481 bool can_write = true;
2482 if (klass->is_instance_klass()) {
2483 InstanceKlass* ik = InstanceKlass::cast(klass);
2484 init_state = (ik->is_initialized() ? 1 : 0);
2485 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2486 } else {
2487 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2488 }
2489 ResourceMark rm;
2490 uint state = (array_dim << 1) | (init_state & 1);
2491 if (can_write) {
2492 DataKind kind = DataKind::Klass;
2493 uint n = write_bytes(&kind, sizeof(int));
2494 if (n != sizeof(int)) {
2495 return false;
2496 }
2497 // Record state of instance klass initialization and array dimentions.
2498 n = write_bytes(&state, sizeof(int));
2499 if (n != sizeof(int)) {
2500 return false;
2501 }
2502 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2503 n = write_bytes(&klass_offset, sizeof(uint));
2504 if (n != sizeof(uint)) {
2505 return false;
2506 }
2507 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2508 compile_id(), comp_level(), klass->external_name(),
2509 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2510 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2511 return true;
2512 }
2513 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2514 compile_id(), comp_level(), klass->external_name(),
2515 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2516 (array_dim > 0 ? " (object array)" : ""));
2517 set_lookup_failed();
2518 return false;
2519 }
2520
2521 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2522 uint code_offset = read_position();
2523 uint state = *(uint*)addr(code_offset);
2524 uint init_state = (state & 1);
2525 uint array_dim = (state >> 1);
2526 code_offset += sizeof(int);
2527 uint klass_offset = *(uint*)addr(code_offset);
2528 code_offset += sizeof(uint);
2529 set_read_position(code_offset);
2530 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2531 if (!AOTMetaspace::in_aot_cache((address)k)) {
2532 // Something changed in CDS
2533 set_lookup_failed();
2534 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2535 return nullptr;
2536 }
2537 assert(k->is_klass(), "sanity");
2538 ResourceMark rm;
2539 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2540 set_lookup_failed();
2541 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2542 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2543 return nullptr;
2544 } else
2545 // Allow not initialized klass which was uninitialized during code caching or for preload
2546 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2547 set_lookup_failed();
2548 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2549 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2550 return nullptr;
2551 }
2552 if (array_dim > 0) {
2553 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2554 Klass* ak = k->array_klass_or_null(array_dim);
2555 // FIXME: what would it take to create an array class on the fly?
2556 // Klass* ak = k->array_klass(dim, JavaThread::current());
2557 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2558 if (ak == nullptr) {
2559 set_lookup_failed();
2560 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2561 compile_id(), comp_level(), array_dim, k->external_name());
2562 }
2563 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2564 return ak;
2565 } else {
2566 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2567 compile_id(), comp_level(), k->external_name());
2568 return k;
2569 }
2570 }
2571
2572 bool AOTCodeCache::write_oop(jobject& jo) {
2573 oop obj = JNIHandles::resolve(jo);
2574 return write_oop(obj);
2575 }
2576
2577 bool AOTCodeCache::write_oop(oop obj) {
2578 DataKind kind;
2579 uint n = 0;
2580 if (obj == nullptr) {
2581 kind = DataKind::Null;
2582 n = write_bytes(&kind, sizeof(int));
2583 if (n != sizeof(int)) {
2584 return false;
2585 }
2586 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2587 kind = DataKind::No_Data;
2588 n = write_bytes(&kind, sizeof(int));
2589 if (n != sizeof(int)) {
2590 return false;
2591 }
2592 } else if (java_lang_Class::is_instance(obj)) {
2593 if (java_lang_Class::is_primitive(obj)) {
2594 int bt = (int)java_lang_Class::primitive_type(obj);
2595 kind = DataKind::Primitive;
2596 n = write_bytes(&kind, sizeof(int));
2597 if (n != sizeof(int)) {
2598 return false;
2599 }
2600 n = write_bytes(&bt, sizeof(int));
2601 if (n != sizeof(int)) {
2602 return false;
2603 }
2604 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2605 } else {
2606 Klass* klass = java_lang_Class::as_Klass(obj);
2607 if (!write_klass(klass)) {
2608 return false;
2609 }
2610 }
2611 } else if (java_lang_String::is_instance(obj)) { // herere
2612 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2613 ResourceMark rm;
2614 size_t length_sz = 0;
2615 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2616 if (k >= 0) {
2617 kind = DataKind::String;
2618 n = write_bytes(&kind, sizeof(int));
2619 if (n != sizeof(int)) {
2620 return false;
2621 }
2622 n = write_bytes(&k, sizeof(int));
2623 if (n != sizeof(int)) {
2624 return false;
2625 }
2626 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2627 return true;
2628 }
2629 // Not archived String object - bailout
2630 set_lookup_failed();
2631 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2632 compile_id(), comp_level(), p2i(obj), string);
2633 return false;
2634 } else if (java_lang_Module::is_instance(obj)) {
2635 fatal("Module object unimplemented");
2636 } else if (java_lang_ClassLoader::is_instance(obj)) {
2637 if (obj == SystemDictionary::java_system_loader()) {
2638 kind = DataKind::SysLoader;
2639 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2640 } else if (obj == SystemDictionary::java_platform_loader()) {
2641 kind = DataKind::PlaLoader;
2642 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2643 } else {
2644 ResourceMark rm;
2645 set_lookup_failed();
2646 log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2647 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2648 return false;
2649 }
2650 n = write_bytes(&kind, sizeof(int));
2651 if (n != sizeof(int)) {
2652 return false;
2653 }
2654 } else { // herere
2655 ResourceMark rm;
2656 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2657 if (k >= 0) {
2658 kind = DataKind::MH_Oop;
2659 n = write_bytes(&kind, sizeof(int));
2660 if (n != sizeof(int)) {
2661 return false;
2662 }
2663 n = write_bytes(&k, sizeof(int));
2664 if (n != sizeof(int)) {
2665 return false;
2666 }
2667 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2668 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2669 return true;
2670 }
2671 // Not archived Java object - bailout
2672 set_lookup_failed();
2673 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2674 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2675 return false;
2676 }
2677 return true;
2678 }
2679
2680 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2681 uint code_offset = read_position();
2682 oop obj = nullptr;
2683 DataKind kind = *(DataKind*)addr(code_offset);
2684 code_offset += sizeof(DataKind);
2685 set_read_position(code_offset);
2686 if (kind == DataKind::Null) {
2687 return nullptr;
2688 } else if (kind == DataKind::No_Data) {
2689 return cast_to_oop(Universe::non_oop_word());
2690 } else if (kind == DataKind::Klass) {
2691 Klass* k = read_klass(comp_method);
2692 if (k == nullptr) {
2693 return nullptr;
2694 }
2695 obj = k->java_mirror();
2696 if (obj == nullptr) {
2697 set_lookup_failed();
2698 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2699 return nullptr;
2700 }
2701 } else if (kind == DataKind::Primitive) {
2702 code_offset = read_position();
2703 int t = *(int*)addr(code_offset);
2704 code_offset += sizeof(int);
2705 set_read_position(code_offset);
2706 BasicType bt = (BasicType)t;
2707 obj = java_lang_Class::primitive_mirror(bt);
2708 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2709 } else if (kind == DataKind::String) {
2710 code_offset = read_position();
2711 int k = *(int*)addr(code_offset);
2712 code_offset += sizeof(int);
2713 set_read_position(code_offset);
2714 obj = AOTCacheAccess::get_archived_object(k);
2715 if (obj == nullptr) {
2716 set_lookup_failed();
2717 log_debug(aot, codecache, oops)("Lookup failed for String object");
2718 return nullptr;
2719 }
2720 assert(java_lang_String::is_instance(obj), "must be string");
2721
2722 ResourceMark rm;
2723 size_t length_sz = 0;
2724 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2725 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2726 } else if (kind == DataKind::SysLoader) {
2727 obj = SystemDictionary::java_system_loader();
2728 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2729 } else if (kind == DataKind::PlaLoader) {
2730 obj = SystemDictionary::java_platform_loader();
2731 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2732 } else if (kind == DataKind::MH_Oop) {
2733 code_offset = read_position();
2734 int k = *(int*)addr(code_offset);
2735 code_offset += sizeof(int);
2736 set_read_position(code_offset);
2737 obj = AOTCacheAccess::get_archived_object(k);
2738 if (obj == nullptr) {
2739 set_lookup_failed();
2740 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2741 return nullptr;
2742 }
2743 ResourceMark rm;
2744 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2745 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2746 } else {
2747 set_lookup_failed();
2748 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2749 compile_id(), comp_level(), (int)kind);
2750 return nullptr;
2751 }
2752 return obj;
2753 }
2754
2755 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2756 methodHandle comp_method(JavaThread::current(), target->get_Method());
2757 JavaThread* current = JavaThread::current();
2758 uint offset = read_position();
2759 int count = *(int *)addr(offset);
2760 offset += sizeof(int);
2761 set_read_position(offset);
2762 for (int i = 0; i < count; i++) {
2763 oop obj = read_oop(current, comp_method);
2764 if (lookup_failed()) {
2765 return false;
2766 }
2767 Handle h(thread, obj);
2768 oop_list.append(h);
2769 if (oop_recorder != nullptr) {
2770 jobject jo = JNIHandles::make_local(thread, obj);
2771 if (oop_recorder->is_real(jo)) {
2772 oop_recorder->find_index(jo);
2773 } else {
2774 oop_recorder->allocate_oop_index(jo);
2775 }
2776 }
2777 LogStreamHandle(Debug, aot, codecache, oops) log;
2778 if (log.is_enabled()) {
2779 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2780 if (obj == Universe::non_oop_word()) {
2781 log.print("non-oop word");
2782 } else if (obj == nullptr) {
2783 log.print("nullptr-oop");
2784 } else {
2785 obj->print_value_on(&log);
2786 }
2787 log.cr();
2788 }
2789 }
2790
2791 offset = read_position();
2792 count = *(int *)addr(offset);
2793 offset += sizeof(int);
2794 set_read_position(offset);
2795 for (int i = 0; i < count; i++) {
2796 Metadata* m = read_metadata(comp_method);
2797 if (lookup_failed()) {
2798 return false;
2799 }
2800 metadata_list.append(m);
2801 if (oop_recorder != nullptr) {
2802 if (oop_recorder->is_real(m)) {
2803 oop_recorder->find_index(m);
2804 } else {
2805 oop_recorder->allocate_metadata_index(m);
2806 }
2807 }
2808 LogTarget(Debug, aot, codecache, metadata) log;
2809 if (log.is_enabled()) {
2810 LogStream ls(log);
2811 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2812 if (m == (Metadata*)Universe::non_oop_word()) {
2813 ls.print("non-metadata word");
2814 } else if (m == nullptr) {
2815 ls.print("nullptr-oop");
2816 } else {
2817 Metadata::print_value_on_maybe_null(&ls, m);
2818 }
2819 ls.cr();
2820 }
2821 }
2822 return true;
2823 }
2824
2825 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2826 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2827 int oopmaps_size = oopmaps->nr_of_bytes();
2828 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2829 return false;
2830 }
2831 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2832 if (n != (uint)oopmaps->nr_of_bytes()) {
2833 return false;
2834 }
2835 return true;
2836 }
2837
2838 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2839 uint offset = read_position();
2840 int size = *(int *)addr(offset);
2841 offset += sizeof(int);
2842 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2843 offset += size;
2844 set_read_position(offset);
2845 return oopmaps;
2846 }
2847
2848 bool AOTCodeCache::write_oops(nmethod* nm) {
2849 int count = nm->oops_count()-1;
2850 if (!write_bytes(&count, sizeof(int))) {
2851 return false;
2852 }
2853 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2854 if (!write_oop(*p)) {
2855 return false;
2856 }
2857 }
2858 return true;
2859 }
2860
2861 #ifndef PRODUCT
2862 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2863 // Write asm remarks
2864 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2865 if (count_ptr == nullptr) {
2866 return false;
2867 }
2868 uint count = 0;
2869 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2870 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2871 uint n = write_bytes(&offset, sizeof(uint));
2872 if (n != sizeof(uint)) {
2873 return false;
2874 }
2875 if (use_string_table) {
2876 const char* cstr = add_C_string(str);
2877 int id = _table->id_for_C_string((address)cstr);
2878 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2879 n = write_bytes(&id, sizeof(int));
2880 if (n != sizeof(int)) {
2881 return false;
2882 }
2883 } else {
2884 n = write_bytes(str, (uint)strlen(str) + 1);
2885 if (n != strlen(str) + 1) {
2886 return false;
2887 }
2888 }
2889 count += 1;
2890 return true;
2891 });
2892 *count_ptr = count;
2893 return result;
2894 }
2895
2896 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2897 // Read asm remarks
2898 uint offset = read_position();
2899 uint count = *(uint *)addr(offset);
2900 offset += sizeof(uint);
2901 for (uint i = 0; i < count; i++) {
2902 uint remark_offset = *(uint *)addr(offset);
2903 offset += sizeof(uint);
2904 const char* remark = nullptr;
2905 if (use_string_table) {
2906 int remark_string_id = *(uint *)addr(offset);
2907 offset += sizeof(int);
2908 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2909 } else {
2910 remark = (const char*)addr(offset);
2911 offset += (uint)strlen(remark)+1;
2912 }
2913 asm_remarks.insert(remark_offset, remark);
2914 }
2915 set_read_position(offset);
2916 }
2917
2918 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2919 // Write dbg strings
2920 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2921 if (count_ptr == nullptr) {
2922 return false;
2923 }
2924 uint count = 0;
2925 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2926 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2927 if (use_string_table) {
2928 const char* cstr = add_C_string(str);
2929 int id = _table->id_for_C_string((address)cstr);
2930 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2931 uint n = write_bytes(&id, sizeof(int));
2932 if (n != sizeof(int)) {
2933 return false;
2934 }
2935 } else {
2936 uint n = write_bytes(str, (uint)strlen(str) + 1);
2937 if (n != strlen(str) + 1) {
2938 return false;
2939 }
2940 }
2941 count += 1;
2942 return true;
2943 });
2944 *count_ptr = count;
2945 return result;
2946 }
2947
2948 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2949 // Read dbg strings
2950 uint offset = read_position();
2951 uint count = *(uint *)addr(offset);
2952 offset += sizeof(uint);
2953 for (uint i = 0; i < count; i++) {
2954 const char* str = nullptr;
2955 if (use_string_table) {
2956 int string_id = *(uint *)addr(offset);
2957 offset += sizeof(int);
2958 str = (const char*)_cache->address_for_C_string(string_id);
2959 } else {
2960 str = (const char*)addr(offset);
2961 offset += (uint)strlen(str)+1;
2962 }
2963 dbg_strings.insert(str);
2964 }
2965 set_read_position(offset);
2966 }
2967 #endif // PRODUCT
2968
2969 //======================= AOTCodeAddressTable ===============
2970
2971 // address table ids for generated routines, external addresses and C
2972 // string addresses are partitioned into positive integer ranges
2973 // defined by the following positive base and max values
2974 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2975 // [_stubs_base, _stubs_base + _stubs_max -1],
2976 // ...
2977 // [_c_str_base, _c_str_base + _c_str_max -1],
2978 #define _extrs_max 140
2979 #define _stubs_max 210
2980 #define _shared_blobs_max 25
2981 #define _C1_blobs_max 50
2982 #define _C2_blobs_max 25
2983 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2984 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2985
2986 #define _extrs_base 0
2987 #define _stubs_base (_extrs_base + _extrs_max)
2988 #define _shared_blobs_base (_stubs_base + _stubs_max)
2989 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2990 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2991 #define _blobs_end (_shared_blobs_base + _blobs_max)
2992 #if (_C2_blobs_base >= _all_max)
2993 #error AOTCodeAddressTable ranges need adjusting
2994 #endif
2995
2996 #define SET_ADDRESS(type, addr) \
2997 { \
2998 type##_addr[type##_length++] = (address) (addr); \
2999 assert(type##_length <= type##_max, "increase size"); \
3000 }
3001
3002 static bool initializing_extrs = false;
3003
3004 void AOTCodeAddressTable::init_extrs() {
3005 if (_extrs_complete || initializing_extrs) return; // Done already
3006
3007 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
3008
3009 initializing_extrs = true;
3010 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3011
3012 _extrs_length = 0;
3013
3014 // Record addresses of VM runtime methods
3015 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3016 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3017 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3018 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3019 {
3020 // Required by Shared blobs
3021 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
3022 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
3023 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
3024 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3025 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3026 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3027 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3028 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
3029 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
3030 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
3031 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
3032 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3033 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3034 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
3035 }
3036 {
3037 // Required by initial stubs
3038 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3039 #if defined(AMD64)
3040 SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
3041 #endif
3042 }
3043
3044 #ifdef COMPILER1
3045 {
3046 // Required by C1 blobs
3047 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3048 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3049 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3050 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3051 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3052 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3053 SET_ADDRESS(_extrs, Runtime1::new_instance);
3054 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3055 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3056 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3057 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3058 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3059 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3060 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3061 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3062 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3063 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3064 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3065 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3066 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3067 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3068 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3069 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3070 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3071 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3072 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3073 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3074 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3075 #ifdef X86
3076 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3077 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3078 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3079 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3080 #endif
3081 #ifndef PRODUCT
3082 SET_ADDRESS(_extrs, os::breakpoint);
3083 #endif
3084 }
3085 #endif // COMPILER1
3086
3087 #ifdef COMPILER2
3088 {
3089 // Required by C2 blobs
3090 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3091 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3092 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3093 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3094 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3095 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3096 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3097 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3098 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3099 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3100 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3101 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3102 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3103 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3104 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3105 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3106 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3107 SET_ADDRESS(_extrs, OptoRuntime::compile_method_C);
3108 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
3109 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
3110 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
3111 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
3112 SET_ADDRESS(_extrs, Parse::trap_stress_counter_address());
3113 #if defined(AMD64)
3114 // Use by C2 intinsic
3115 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3116 #endif
3117 }
3118 #endif // COMPILER2
3119 #if INCLUDE_G1GC
3120 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3121 #endif
3122
3123 #if INCLUDE_SHENANDOAHGC
3124 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3125 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3126 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3127 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3128 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3129 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3130 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3131 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3132 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3133 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
3134 #endif
3135
3136 #if INCLUDE_ZGC
3137 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
3138 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3139 #if defined(AMD64)
3140 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3141 #endif
3142 #if defined(AARCH64)
3143 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
3144 SET_ADDRESS(_extrs, bs_asm->patching_epoch_addr());
3145 #endif
3146 #endif // INCLUDE_ZGC
3147
3148 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3149 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3150 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3151
3152 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3153 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3154 #if defined(AMD64) && !defined(ZERO)
3155 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3156 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3157 #endif // AMD64
3158 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3159 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3160 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3161 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3162 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3163 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3164 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3165 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3166 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3167 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3168 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3169 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3170 #ifndef ZERO
3171 SET_ADDRESS(_extrs, SharedRuntime::drem);
3172 SET_ADDRESS(_extrs, SharedRuntime::frem);
3173 #endif
3174 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3175 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3176 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3177 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3178 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3179
3180 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3181 SET_ADDRESS(_extrs, Thread::current);
3182 SET_ADDRESS(_extrs, ObjectMonitorTable::current_table_address());
3183
3184 SET_ADDRESS(_extrs, os::javaTimeMillis);
3185 SET_ADDRESS(_extrs, os::javaTimeNanos);
3186 // For JFR
3187 SET_ADDRESS(_extrs, os::elapsed_counter);
3188 #if defined(X86) && !defined(ZERO)
3189 SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3190 #endif
3191
3192 #if INCLUDE_JVMTI
3193 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3194 #endif /* INCLUDE_JVMTI */
3195 SET_ADDRESS(_extrs, MountUnmountDisabler::notify_jvmti_events_address());
3196 SET_ADDRESS(_extrs, MountUnmountDisabler::global_vthread_transition_disable_count_address());
3197
3198 #ifndef PRODUCT
3199 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3200 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3201 #endif
3202
3203 #ifndef ZERO
3204 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3205 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3206 #endif
3207 #if defined(AARCH64)
3208 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3209 #endif
3210 #endif // ZERO
3211
3212 // addresses of fields in AOT runtime constants area
3213 address* p = AOTRuntimeConstants::field_addresses_list();
3214 while (*p != nullptr) {
3215 SET_ADDRESS(_extrs, *p++);
3216 }
3217
3218 _extrs_complete = true;
3219 log_info(aot, codecache, init)("External addresses recorded");
3220 }
3221
3222 static bool initializing_early_stubs = false;
3223
3224 void AOTCodeAddressTable::init_early_stubs() {
3225 if (_complete || initializing_early_stubs) return; // Done already
3226 initializing_early_stubs = true;
3227 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3228 _stubs_length = 0;
3229 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3230
3231 {
3232 // Required by C1 blobs
3233 #if defined(AMD64) && !defined(ZERO)
3234 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3235 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3236 #endif // AMD64
3237 }
3238
3239 _early_stubs_complete = true;
3240 log_info(aot, codecache, init)("Early stubs recorded");
3241 }
3242
3243 static bool initializing_shared_blobs = false;
3244
3245 void AOTCodeAddressTable::init_shared_blobs() {
3246 if (_complete || initializing_shared_blobs) return; // Done already
3247 initializing_shared_blobs = true;
3248 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3249
3250 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3251 _shared_blobs_addr = blobs_addr;
3252 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3253 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3254
3255 _shared_blobs_length = 0;
3256 _C1_blobs_length = 0;
3257 _C2_blobs_length = 0;
3258
3259 // clear the address table
3260 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3261
3262 // Record addresses of generated code blobs
3263 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3264 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3265 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3266 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3267 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3268 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3269 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3270 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3271 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3272 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3273 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3274 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3275 #ifdef COMPILER2
3276 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3277 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3278 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3279 }
3280 #endif
3281 #if INCLUDE_JVMCI
3282 if (EnableJVMCI) {
3283 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3284 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3285 }
3286 #endif
3287 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3288 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3289 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3290 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3291 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3292
3293 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3294 _shared_blobs_complete = true;
3295 log_info(aot, codecache, init)("All shared blobs recorded");
3296 }
3297
3298 static bool initializing_stubs = false;
3299 void AOTCodeAddressTable::init_stubs() {
3300 if (_complete || initializing_stubs) return; // Done already
3301 assert(_early_stubs_complete, "early stubs whould be initialized");
3302 initializing_stubs = true;
3303
3304 // Stubs
3305 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3306 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3307 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3308 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3309 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3310 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3311
3312 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3313 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3314 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3315
3316 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3317
3318 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3319 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3320 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3321 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3322 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3323 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3324
3325 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3326 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3327 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3328 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3329 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3330 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3331
3332 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3333 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3334 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3335 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3336 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3337 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3338
3339 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3340 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3341 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3342 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3343 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3344 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3345
3346 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3347 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3348
3349 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3350 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3351
3352 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3353 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3354 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3355 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3356 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3357 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3358
3359 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3360 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3361
3362 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3363 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3364 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3365 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3366 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3367 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3368 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3369 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3370 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3371 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3372 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3373 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3374 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3375 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3376 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3377 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3378 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3379 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3380 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3381 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3382 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3383 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3384 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3385 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3386 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3387 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3388 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3389 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3390 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3391 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3392 SET_ADDRESS(_stubs, StubRoutines::kyber12To16());
3393
3394 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3395 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3396 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3397
3398 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3399 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3400 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3401 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3402 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3403 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3404 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3405 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3406
3407 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3408
3409 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3410
3411 SET_ADDRESS(_stubs, StubRoutines::dexp());
3412 SET_ADDRESS(_stubs, StubRoutines::dlog());
3413 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3414 SET_ADDRESS(_stubs, StubRoutines::dpow());
3415 SET_ADDRESS(_stubs, StubRoutines::dsin());
3416 SET_ADDRESS(_stubs, StubRoutines::dcos());
3417 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3418 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3419 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3420 SET_ADDRESS(_stubs, StubRoutines::dtan());
3421
3422 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3423 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3424
3425 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3426 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3427 }
3428 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3429
3430 #if defined(AMD64) && !defined(ZERO)
3431 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3432 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3433 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3434 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3435 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3436 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3437 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3438 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3439 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3440 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3441 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3442 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3443 SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3444 SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3445 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3446 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3447 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_byte_mask());
3448 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_short_mask());
3449 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3450 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3451 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3452 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3453 // See C2_MacroAssembler::load_iota_indices().
3454 for (int i = 0; i < 6; i++) {
3455 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3456 }
3457 #ifdef COMPILER2
3458 for (int i = 0; i < 4; i++) {
3459 SET_ADDRESS(_stubs, StubRoutines::_string_indexof_array[i]);
3460 }
3461 #endif
3462 #endif
3463 #if defined(AARCH64) && !defined(ZERO)
3464 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3465 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3466 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3467 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3468 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3469 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3470 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3471 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3472 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3473 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3474 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3475 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3476 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3477
3478 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3479 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3480 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3481 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3482 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3483 #endif
3484
3485 _complete = true;
3486 log_info(aot, codecache, init)("Stubs recorded");
3487 }
3488
3489 void AOTCodeAddressTable::init_early_c1() {
3490 #ifdef COMPILER1
3491 // Runtime1 Blobs
3492 StubId id = StubInfo::stub_base(StubGroup::C1);
3493 // include forward_exception in range we publish
3494 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
3495 for (; id != limit; id = StubInfo::next(id)) {
3496 if (Runtime1::blob_for(id) == nullptr) {
3497 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3498 continue;
3499 }
3500 if (Runtime1::entry_for(id) == nullptr) {
3501 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3502 continue;
3503 }
3504 address entry = Runtime1::entry_for(id);
3505 SET_ADDRESS(_C1_blobs, entry);
3506 }
3507 #endif // COMPILER1
3508 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3509 _early_c1_complete = true;
3510 }
3511
3512 void AOTCodeAddressTable::init_c1() {
3513 #ifdef COMPILER1
3514 // Runtime1 Blobs
3515 assert(_early_c1_complete, "early C1 blobs should be initialized");
3516 StubId id = StubInfo::next(StubId::c1_forward_exception_id);
3517 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
3518 for (; id != limit; id = StubInfo::next(id)) {
3519 if (Runtime1::blob_for(id) == nullptr) {
3520 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3521 continue;
3522 }
3523 if (Runtime1::entry_for(id) == nullptr) {
3524 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3525 continue;
3526 }
3527 address entry = Runtime1::entry_for(id);
3528 SET_ADDRESS(_C1_blobs, entry);
3529 }
3530 #if INCLUDE_G1GC
3531 if (UseG1GC) {
3532 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3533 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3534 SET_ADDRESS(_C1_blobs, entry);
3535 }
3536 #endif // INCLUDE_G1GC
3537 #if INCLUDE_ZGC
3538 if (UseZGC) {
3539 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3540 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3541 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3542 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3543 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3544 }
3545 #endif // INCLUDE_ZGC
3546 #if INCLUDE_SHENANDOAHGC
3547 if (UseShenandoahGC) {
3548 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3549 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3550 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3551 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3552 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3553 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3554 }
3555 #endif // INCLUDE_SHENANDOAHGC
3556 #endif // COMPILER1
3557
3558 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3559 _c1_complete = true;
3560 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3561 }
3562
3563 void AOTCodeAddressTable::init_c2() {
3564 #ifdef COMPILER2
3565 // OptoRuntime Blobs
3566 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3567 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3568 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3569 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3570 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3571 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3572 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3573 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3574 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3575 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3576 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3577 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3578 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3579 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3580 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3581 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3582 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3583 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3584 SET_ADDRESS(_C2_blobs, OptoRuntime::compile_method_Java());
3585 #if INCLUDE_JVMTI
3586 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_first_transition_Java());
3587 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_final_transition_Java());
3588 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_transition_Java());
3589 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_transition_Java());
3590 #endif /* INCLUDE_JVMTI */
3591 #endif
3592
3593 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3594 _c2_complete = true;
3595 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3596 }
3597 #undef SET_ADDRESS
3598
3599 AOTCodeAddressTable::~AOTCodeAddressTable() {
3600 if (_extrs_addr != nullptr) {
3601 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3602 }
3603 if (_stubs_addr != nullptr) {
3604 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3605 }
3606 if (_shared_blobs_addr != nullptr) {
3607 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3608 }
3609 }
3610
3611 #ifdef PRODUCT
3612 #define MAX_STR_COUNT 200
3613 #else
3614 #define MAX_STR_COUNT 500
3615 #endif
3616 #define _c_str_max MAX_STR_COUNT
3617 static const int _c_str_base = _all_max;
3618
3619 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3620 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3621 static int _C_strings_count = 0;
3622 static int _C_strings_s[MAX_STR_COUNT] = {0};
3623 static int _C_strings_id[MAX_STR_COUNT] = {0};
3624 static int _C_strings_used = 0;
3625
3626 void AOTCodeCache::load_strings() {
3627 uint strings_count = _load_header->strings_count();
3628 if (strings_count == 0) {
3629 return;
3630 }
3631 uint strings_offset = _load_header->strings_offset();
3632 uint* string_lengths = (uint*)addr(strings_offset);
3633 strings_offset += (strings_count * sizeof(uint));
3634 uint strings_size = _load_header->search_table_offset() - strings_offset;
3635 // We have to keep cached strings longer than _cache buffer
3636 // because they are refernced from compiled code which may
3637 // still be executed on VM exit after _cache is freed.
3638 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3639 memcpy(p, addr(strings_offset), strings_size);
3640 _C_strings_buf = p;
3641 assert(strings_count <= MAX_STR_COUNT, "sanity");
3642 for (uint i = 0; i < strings_count; i++) {
3643 _C_strings[i] = p;
3644 uint len = string_lengths[i];
3645 _C_strings_s[i] = i;
3646 _C_strings_id[i] = i;
3647 p += len;
3648 }
3649 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3650 _C_strings_count = strings_count;
3651 _C_strings_used = strings_count;
3652 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3653 }
3654
3655 int AOTCodeCache::store_strings() {
3656 if (_C_strings_used > 0) {
3657 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3658 uint offset = _write_position;
3659 uint length = 0;
3660 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
3661 if (lengths == nullptr) {
3662 return -1;
3663 }
3664 for (int i = 0; i < _C_strings_used; i++) {
3665 const char* str = _C_strings[_C_strings_s[i]];
3666 uint len = (uint)strlen(str) + 1;
3667 length += len;
3668 assert(len < 1000, "big string: %s", str);
3669 lengths[i] = len;
3670 uint n = write_bytes(str, len);
3671 if (n != len) {
3672 return -1;
3673 }
3674 }
3675 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
3676 _C_strings_used, length, offset);
3677 }
3678 return _C_strings_used;
3679 }
3680
3681 const char* AOTCodeCache::add_C_string(const char* str) {
3682 if (is_on_for_dump() && str != nullptr) {
3683 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3684 AOTCodeAddressTable* table = addr_table();
3685 if (table != nullptr) {
3686 return table->add_C_string(str);
3687 }
3688 }
3689 return str;
3690 }
3691
3692 const char* AOTCodeAddressTable::add_C_string(const char* str) {
3693 if (_extrs_complete) {
3694 // Check previous strings address
3695 for (int i = 0; i < _C_strings_count; i++) {
3696 if (_C_strings_in[i] == str) {
3697 return _C_strings[i]; // Found previous one - return our duplicate
3698 } else if (strcmp(_C_strings[i], str) == 0) {
3699 return _C_strings[i];
3700 }
3701 }
3702 // Add new one
3703 if (_C_strings_count < MAX_STR_COUNT) {
3704 // Passed in string can be freed and used space become inaccessible.
3705 // Keep original address but duplicate string for future compare.
3706 _C_strings_id[_C_strings_count] = -1; // Init
3707 _C_strings_in[_C_strings_count] = str;
3708 const char* dup = os::strdup(str);
3709 _C_strings[_C_strings_count++] = dup;
3710 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
3711 return dup;
3712 } else {
3713 assert(false, "Number of C strings >= MAX_STR_COUNT");
3714 }
3715 }
3716 return str;
3717 }
3718
3719 int AOTCodeAddressTable::id_for_C_string(address str) {
3720 if (str == nullptr) {
3721 return -1;
3722 }
3723 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3724 for (int i = 0; i < _C_strings_count; i++) {
3725 if (_C_strings[i] == (const char*)str) { // found
3726 int id = _C_strings_id[i];
3727 if (id >= 0) {
3728 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
3729 return id; // Found recorded
3730 }
3731 // Not found in recorded, add new
3732 id = _C_strings_used++;
3733 _C_strings_s[id] = i;
3734 _C_strings_id[i] = id;
3735 return id;
3736 }
3737 }
3738 return -1;
3739 }
3740
3741 address AOTCodeAddressTable::address_for_C_string(int idx) {
3742 assert(idx < _C_strings_count, "sanity");
3743 return (address)_C_strings[idx];
3744 }
3745
3746 static int search_address(address addr, address* table, uint length) {
3747 for (int i = 0; i < (int)length; i++) {
3748 if (table[i] == addr) {
3749 return i;
3750 }
3751 }
3752 return BAD_ADDRESS_ID;
3753 }
3754
3755 address AOTCodeAddressTable::address_for_id(int idx) {
3756 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3757 if (idx == -1) {
3758 return (address)-1;
3759 }
3760 uint id = (uint)idx;
3761 // special case for symbols based relative to os::init
3762 if (id > (_c_str_base + _c_str_max)) {
3763 return (address)os::init + idx;
3764 }
3765 if (idx < 0) {
3766 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3767 return nullptr;
3768 }
3769 // no need to compare unsigned id against 0
3770 if (/* id >= _extrs_base && */ id < _extrs_length) {
3771 return _extrs_addr[id - _extrs_base];
3772 }
3773 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3774 return _stubs_addr[id - _stubs_base];
3775 }
3776 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3777 return _stubs_addr[id - _stubs_base];
3778 }
3779 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3780 return _shared_blobs_addr[id - _shared_blobs_base];
3781 }
3782 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3783 return _C1_blobs_addr[id - _C1_blobs_base];
3784 }
3785 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3786 return _C1_blobs_addr[id - _C1_blobs_base];
3787 }
3788 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3789 return _C2_blobs_addr[id - _C2_blobs_base];
3790 }
3791 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3792 return address_for_C_string(id - _c_str_base);
3793 }
3794 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3795 return nullptr;
3796 }
3797
3798 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3799 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3800 int id = -1;
3801 if (addr == (address)-1) { // Static call stub has jump to itself
3802 return id;
3803 }
3804 // Check card_table_base address first since it can point to any address
3805 BarrierSet* bs = BarrierSet::barrier_set();
3806 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
3807 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
3808
3809 // Seach for C string
3810 id = id_for_C_string(addr);
3811 if (id >= 0) {
3812 return id + _c_str_base;
3813 }
3814 if (StubRoutines::contains(addr)) {
3815 // Search in stubs
3816 id = search_address(addr, _stubs_addr, _stubs_length);
3817 if (id == BAD_ADDRESS_ID) {
3818 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3819 if (desc == nullptr) {
3820 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3821 }
3822 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3823 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3824 } else {
3825 return _stubs_base + id;
3826 }
3827 } else {
3828 CodeBlob* cb = CodeCache::find_blob(addr);
3829 if (cb != nullptr) {
3830 int id_base = _shared_blobs_base;
3831 // Search in code blobs
3832 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3833 if (id == BAD_ADDRESS_ID) {
3834 id_base = _C1_blobs_base;
3835 // search C1 blobs
3836 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3837 }
3838 if (id == BAD_ADDRESS_ID) {
3839 id_base = _C2_blobs_base;
3840 // search C2 blobs
3841 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3842 }
3843 if (id == BAD_ADDRESS_ID) {
3844 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3845 } else {
3846 return id_base + id;
3847 }
3848 } else {
3849 // Search in runtime functions
3850 id = search_address(addr, _extrs_addr, _extrs_length);
3851 if (id == BAD_ADDRESS_ID) {
3852 ResourceMark rm;
3853 const int buflen = 1024;
3854 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3855 int offset = 0;
3856 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3857 if (offset > 0) {
3858 // Could be address of C string
3859 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3860 CompileTask* task = ciEnv::current()->task();
3861 uint compile_id = 0;
3862 uint comp_level =0;
3863 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3864 compile_id = task->compile_id();
3865 comp_level = task->comp_level();
3866 }
3867 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3868 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3869 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3870 return dist;
3871 }
3872 reloc.print_current_on(tty);
3873 blob->print_on(tty);
3874 blob->print_code_on(tty);
3875 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3876 } else {
3877 reloc.print_current_on(tty);
3878 blob->print_on(tty);
3879 blob->print_code_on(tty);
3880 os::find(addr, tty);
3881 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3882 }
3883 } else {
3884 return _extrs_base + id;
3885 }
3886 }
3887 }
3888 return id;
3889 }
3890
3891 #undef _extrs_max
3892 #undef _stubs_max
3893 #undef _shared_blobs_max
3894 #undef _C1_blobs_max
3895 #undef _C2_blobs_max
3896 #undef _blobs_max
3897 #undef _extrs_base
3898 #undef _stubs_base
3899 #undef _shared_blobs_base
3900 #undef _C1_blobs_base
3901 #undef _C2_blobs_base
3902 #undef _blobs_end
3903
3904 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3905
3906 void AOTRuntimeConstants::initialize_from_runtime() {
3907 BarrierSet* bs = BarrierSet::barrier_set();
3908 address card_table_base = nullptr;
3909 uint grain_shift = 0;
3910 #if INCLUDE_G1GC
3911 if (bs->is_a(BarrierSet::G1BarrierSet)) {
3912 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
3913 } else
3914 #endif
3915 #if INCLUDE_SHENANDOAHGC
3916 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
3917 grain_shift = 0;
3918 } else
3919 #endif
3920 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3921 CardTable::CardValue* base = ci_card_table_address_const();
3922 assert(base != nullptr, "unexpected byte_map_base");
3923 card_table_base = base;
3924 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
3925 grain_shift = ctbs->grain_shift();
3926 }
3927 _aot_runtime_constants._card_table_base = card_table_base;
3928 _aot_runtime_constants._grain_shift = grain_shift;
3929 }
3930
3931 address AOTRuntimeConstants::_field_addresses_list[] = {
3932 ((address)&_aot_runtime_constants._card_table_base),
3933 ((address)&_aot_runtime_constants._grain_shift),
3934 nullptr
3935 };
3936
3937 address AOTRuntimeConstants::card_table_base_address() {
3938 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
3939 return (address)&_aot_runtime_constants._card_table_base;
3940 }
3941
3942 void AOTCodeCache::wait_for_no_nmethod_readers() {
3943 while (true) {
3944 int cur = AtomicAccess::load(&_nmethod_readers);
3945 int upd = -(cur + 1);
3946 if (cur >= 0 && AtomicAccess::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3947 // Success, no new readers should appear.
3948 break;
3949 }
3950 }
3951
3952 // Now wait for all readers to leave.
3953 SpinYield w;
3954 while (AtomicAccess::load(&_nmethod_readers) != -1) {
3955 w.wait();
3956 }
3957 }
3958
3959 AOTCodeCache::ReadingMark::ReadingMark() {
3960 while (true) {
3961 int cur = AtomicAccess::load(&_nmethod_readers);
3962 if (cur < 0) {
3963 // Cache is already closed, cannot proceed.
3964 _failed = true;
3965 return;
3966 }
3967 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3968 // Successfully recorded ourselves as entered.
3969 _failed = false;
3970 return;
3971 }
3972 }
3973 }
3974
3975 AOTCodeCache::ReadingMark::~ReadingMark() {
3976 if (_failed) {
3977 return;
3978 }
3979 while (true) {
3980 int cur = AtomicAccess::load(&_nmethod_readers);
3981 if (cur > 0) {
3982 // Cache is open, we are counting down towards 0.
3983 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3984 return;
3985 }
3986 } else {
3987 // Cache is closed, we are counting up towards -1.
3988 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3989 return;
3990 }
3991 }
3992 }
3993 }
3994
3995 void AOTCodeCache::print_timers_on(outputStream* st) {
3996 if (is_using_code()) {
3997 st->print_cr (" AOT Code Preload Time: %7.3f s", _t_totalPreload.seconds());
3998 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3999 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
4000 st->print_cr (" find AOT code entry: %7.3f s", _t_totalFind.seconds());
4001 }
4002 if (is_dumping_code()) {
4003 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
4004 }
4005 }
4006
4007 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
4008 AOTCodeStats result;
4009 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4010 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
4011 }
4012
4013 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4014 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
4015 }
4016 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
4017 return result;
4018 }
4019
4020 void AOTCodeCache::log_stats_on_exit(AOTCodeStats& stats) {
4021 LogStreamHandle(Debug, aot, codecache, exit) log;
4022 if (log.is_enabled()) {
4023 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4024 log.print_cr(" %s: total=%u", aot_code_entry_kind_name[kind], stats.entry_count(kind));
4025 if (kind == AOTCodeEntry::Nmethod) {
4026 for (uint lvl = CompLevel_simple; lvl < AOTCompLevel_count; lvl++) {
4027 log.print(" Tier %d: total=%u", lvl, stats.nmethod_count(lvl));
4028 if (lvl == AOTCompLevel_count-1) { // AOT Preload
4029 log.print(", has_clinit_barriers=%u", stats.clinit_barriers_count());
4030 }
4031 log.cr();
4032 }
4033 }
4034 }
4035 }
4036 }
4037
4038 static void print_helper1(outputStream* st, const char* name, int count) {
4039 if (count > 0) {
4040 st->print(" %s=%d", name, count);
4041 }
4042 }
4043
4044 void AOTCodeCache::print_statistics_on(outputStream* st) {
4045 AOTCodeCache* cache = open_for_use();
4046 if (cache != nullptr) {
4047 ReadingMark rdmk;
4048 if (rdmk.failed()) {
4049 // Cache is closed, cannot touch anything.
4050 return;
4051 }
4052 AOTCodeStats stats;
4053
4054 uint preload_count = cache->_load_header->preload_entries_count();
4055 AOTCodeEntry* preload_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->preload_entries_offset());
4056 for (uint i = 0; i < preload_count; i++) {
4057 stats.collect_all_stats(&preload_entries[i]);
4058 }
4059
4060 uint count = cache->_load_header->entries_count();
4061 AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
4062 for (uint i = 0; i < count; i++) {
4063 stats.collect_all_stats(&load_entries[i]);
4064 }
4065
4066 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4067 if (stats.entry_count(kind) > 0) {
4068 st->print(" %s:", aot_code_entry_kind_name[kind]);
4069 print_helper1(st, "total", stats.entry_count(kind));
4070 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4071 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4072 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4073 st->cr();
4074 }
4075 if (kind == AOTCodeEntry::Nmethod) {
4076 for (uint lvl = CompLevel_simple; lvl < AOTCompLevel_count; lvl++) {
4077 if (stats.nmethod_count(lvl) > 0) {
4078 st->print(" AOT Code T%d", lvl);
4079 print_helper1(st, "total", stats.nmethod_count(lvl));
4080 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4081 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4082 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4083 if (lvl == AOTCompLevel_count-1) {
4084 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4085 }
4086 st->cr();
4087 }
4088 }
4089 }
4090 }
4091 LogStreamHandle(Debug, aot, codecache, init) log;
4092 if (log.is_enabled()) {
4093 AOTCodeCache::print_unused_entries_on(&log);
4094 }
4095 LogStreamHandle(Trace, aot, codecache) aot_info;
4096 // need a lock to traverse the code cache
4097 if (aot_info.is_enabled()) {
4098 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4099 NMethodIterator iter(NMethodIterator::all);
4100 while (iter.next()) {
4101 nmethod* nm = iter.method();
4102 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4103 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4104 (nm->method()->in_aot_cache() ? 'S' : ' '),
4105 (nm->is_aot() ? 'A' : ' '),
4106 (nm->preloaded() ? 'P' : ' '),
4107 nm->comp_level());
4108 print_helper(nm, &aot_info);
4109 aot_info.print(": ");
4110 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4111 LogStreamHandle(Trace, aot, codecache) aot_debug;
4112 if (aot_debug.is_enabled()) {
4113 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4114 if (mtd != nullptr) {
4115 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4116 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4117 });
4118 }
4119 }
4120 }
4121 }
4122 }
4123 }
4124 }
4125
4126 void AOTCodeEntry::print(outputStream* st) const {
4127 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4128 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4129 (_not_entrant? "not_entrant" : "entrant"),
4130 (_loaded ? ", loaded" : ""),
4131 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4132 (_for_preload ? ", for_preload" : ""));
4133 }
4134
4135 // This is called after initialize() but before init2()
4136 // and _cache is not set yet.
4137 void AOTCodeCache::print_on(outputStream* st) {
4138 if (opened_cache != nullptr && opened_cache->for_use()) {
4139 ReadingMark rdmk;
4140 if (rdmk.failed()) {
4141 // Cache is closed, cannot touch anything.
4142 return;
4143 }
4144
4145 st->print_cr("\nAOT Code Cache Preload entries");
4146
4147 uint preload_count = opened_cache->_load_header->preload_entries_count();
4148 AOTCodeEntry* preload_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->preload_entries_offset());
4149 for (uint i = 0; i < preload_count; i++) {
4150 AOTCodeEntry* entry = &preload_entries[i];
4151
4152 uint entry_position = entry->offset();
4153 uint name_offset = entry->name_offset() + entry_position;
4154 const char* saved_name = opened_cache->addr(name_offset);
4155
4156 st->print_cr("%4u: %10s Id:%u AP%u size=%u '%s' %s%s%s",
4157 i, aot_code_entry_kind_name[entry->kind()], entry->id(), entry->comp_level(),
4158 entry->size(), saved_name,
4159 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4160 entry->is_loaded() ? " loaded" : "",
4161 entry->not_entrant() ? " not_entrant" : "");
4162
4163 st->print_raw(" ");
4164 AOTCodeReader reader(opened_cache, entry, nullptr);
4165 reader.print_on(st);
4166 }
4167
4168 st->print_cr("\nAOT Code Cache entries");
4169
4170 uint count = opened_cache->_load_header->entries_count();
4171 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4172 AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4173
4174 for (uint i = 0; i < count; i++) {
4175 int index = search_entries[2*i + 1];
4176 AOTCodeEntry* entry = &(load_entries[index]);
4177
4178 uint entry_position = entry->offset();
4179 uint name_offset = entry->name_offset() + entry_position;
4180 const char* saved_name = opened_cache->addr(name_offset);
4181
4182 st->print_cr("%4u: %10s idx:%4u Id:%u A%u size=%u '%s' %s%s",
4183 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4184 entry->size(), saved_name,
4185 entry->is_loaded() ? " loaded" : "",
4186 entry->not_entrant() ? " not_entrant" : "");
4187
4188 st->print_raw(" ");
4189 AOTCodeReader reader(opened_cache, entry, nullptr);
4190 reader.print_on(st);
4191 }
4192 }
4193 }
4194
4195 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4196 LogStreamHandle(Info, aot, codecache, init) info;
4197 if (info.is_enabled()) {
4198 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4199 if (entry->is_nmethod() && !entry->is_loaded()) {
4200 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4201 if (mtd != nullptr) {
4202 if (mtd->has_holder()) {
4203 if (mtd->holder()->method_holder()->is_initialized()) {
4204 ResourceMark rm;
4205 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4206 if ((uint)ctd->level() == entry->comp_level()) {
4207 if (ctd->init_deps_left_acquire() == 0) {
4208 nmethod* nm = mtd->holder()->code();
4209 if (nm == nullptr) {
4210 if (mtd->holder()->queued_for_compilation()) {
4211 return; // scheduled for compilation
4212 }
4213 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4214 return; // already online compiled and superseded by a more optimal method
4215 }
4216 info.print("AOT Code Cache entry not loaded: ");
4217 ctd->print_on(&info);
4218 info.cr();
4219 }
4220 }
4221 });
4222 } else {
4223 // not yet initialized
4224 }
4225 } else {
4226 info.print("AOT Code Cache entry doesn't have a holder: ");
4227 mtd->print_on(&info);
4228 info.cr();
4229 }
4230 }
4231 }
4232 });
4233 }
4234 }
4235
4236 void AOTCodeReader::print_on(outputStream* st) {
4237 uint entry_position = _entry->offset();
4238 set_read_position(entry_position);
4239
4240 // Read name
4241 uint name_offset = entry_position + _entry->name_offset();
4242 uint name_size = _entry->name_size(); // Includes '/0'
4243 const char* name = addr(name_offset);
4244
4245 st->print_cr(" name: %s", name);
4246 }
4247