1 /*
2 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "classfile/javaAssertions.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "code/codeCache.hpp"
35 #include "gc/shared/gcConfig.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/memoryReserver.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/flags/flagSetting.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "utilities/copy.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Runtime1.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
53 #if INCLUDE_G1GC
54 #include "gc/g1/g1BarrierSetRuntime.hpp"
55 #endif
56 #if INCLUDE_SHENANDOAHGC
57 #include "gc/shenandoah/shenandoahRuntime.hpp"
58 #endif
59 #if INCLUDE_ZGC
60 #include "gc/z/zBarrierSetRuntime.hpp"
61 #endif
62
63 #include <sys/stat.h>
64 #include <errno.h>
65
66 const char* aot_code_entry_kind_name[] = {
67 #define DECL_KIND_STRING(kind) XSTR(kind),
68 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
69 #undef DECL_KIND_STRING
70 };
71
72 static void report_load_failure() {
73 if (AbortVMOnAOTCodeFailure) {
74 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
75 }
76 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
77 AOTAdapterCaching = false;
78 AOTStubCaching = false;
79 }
80
81 static void report_store_failure() {
82 if (AbortVMOnAOTCodeFailure) {
83 tty->print_cr("Unable to create AOT Code Cache.");
84 vm_abort(false);
85 }
86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
87 AOTAdapterCaching = false;
88 AOTStubCaching = false;
89 }
90
91 bool AOTCodeCache::is_dumping_adapter() {
92 return AOTAdapterCaching && is_on_for_dump();
93 }
94
95 bool AOTCodeCache::is_using_adapter() {
96 return AOTAdapterCaching && is_on_for_use();
97 }
98
99 bool AOTCodeCache::is_dumping_stub() {
100 return AOTStubCaching && is_on_for_dump();
101 }
102
103 bool AOTCodeCache::is_using_stub() {
104 return AOTStubCaching && is_on_for_use();
105 }
106
107 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
108 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
109 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
110 // becasue both id and kind are used to find an entry, and that combination should be unique
111 if (kind == AOTCodeEntry::Adapter) {
112 return id;
113 } else if (kind == AOTCodeEntry::SharedBlob) {
114 return id;
115 } else if (kind == AOTCodeEntry::C1Blob) {
116 return (int)SharedStubId::NUM_STUBIDS + id;
117 } else {
118 // kind must be AOTCodeEntry::C2Blob
119 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
120 }
121 }
122
123 static uint _max_aot_code_size = 0;
124 uint AOTCodeCache::max_aot_code_size() {
125 return _max_aot_code_size;
126 }
127
128 void AOTCodeCache::initialize() {
129 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
130 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
131 AOTAdapterCaching = false;
132 AOTStubCaching = false;
133 return;
134 #else
135 if (FLAG_IS_DEFAULT(AOTCache)) {
136 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
137 AOTAdapterCaching = false;
138 AOTStubCaching = false;
139 return; // AOTCache must be specified to dump and use AOT code
140 }
141
142 // Disable stubs caching until JDK-8357398 is fixed.
143 FLAG_SET_ERGO(AOTStubCaching, false);
144
145 if (VerifyOops) {
146 // Disable AOT stubs caching when VerifyOops flag is on.
147 // Verify oops code generated a lot of C strings which overflow
148 // AOT C string table (which has fixed size).
149 // AOT C string table will be reworked later to handle such cases.
150 //
151 // Note: AOT adapters are not affected - they don't have oop operations.
152 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
153 FLAG_SET_ERGO(AOTStubCaching, false);
154 }
155
156 bool is_dumping = false;
157 bool is_using = false;
158 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
159 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
160 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
161 is_dumping = true;
162 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
163 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
164 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
165 is_using = true;
166 } else {
167 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
168 return; // nothing to do
169 }
170 if (!AOTAdapterCaching && !AOTStubCaching) {
171 return; // AOT code caching disabled on command line
172 }
173 _max_aot_code_size = AOTCodeMaxSize;
174 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
175 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
176 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
177 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
178 }
179 }
180 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
181 if (is_using && aot_code_size == 0) {
182 log_info(aot, codecache, init)("AOT Code Cache is empty");
183 return;
184 }
185 if (!open_cache(is_dumping, is_using)) {
186 if (is_using) {
187 report_load_failure();
188 } else {
189 report_store_failure();
190 }
191 return;
192 }
193 if (is_dumping) {
194 FLAG_SET_DEFAULT(ForceUnreachable, true);
195 }
196 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
197 #endif // defined(AMD64) || defined(AARCH64)
198 }
199
200 void AOTCodeCache::init2() {
201 if (!is_on()) {
202 return;
203 }
204 if (!verify_vm_config()) {
205 close();
206 report_load_failure();
207 }
208
209 // initialize the table of external routines so we can save
210 // generated code blobs that reference them
211 init_extrs_table();
212 init_early_stubs_table();
213 }
214
215 AOTCodeCache* AOTCodeCache::_cache = nullptr;
216
217 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
218 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
219 if (cache->failed()) {
220 delete cache;
221 _cache = nullptr;
222 return false;
223 }
224 _cache = cache;
225 return true;
226 }
227
228 void AOTCodeCache::close() {
229 if (is_on()) {
230 delete _cache; // Free memory
231 _cache = nullptr;
232 }
233 }
234
235 #define DATA_ALIGNMENT HeapWordSize
236
237 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
238 _load_header(nullptr),
239 _load_buffer(nullptr),
240 _store_buffer(nullptr),
241 _C_store_buffer(nullptr),
242 _write_position(0),
243 _load_size(0),
244 _store_size(0),
245 _for_use(is_using),
246 _for_dump(is_dumping),
247 _closing(false),
248 _failed(false),
249 _lookup_failed(false),
250 _table(nullptr),
251 _load_entries(nullptr),
252 _search_entries(nullptr),
253 _store_entries(nullptr),
254 _C_strings_buf(nullptr),
255 _store_entries_cnt(0)
256 {
257 // Read header at the begining of cache
258 if (_for_use) {
259 // Read cache
260 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
261 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
262 if (!rs.is_reserved()) {
263 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
264 set_failed();
265 return;
266 }
267 if (!AOTCacheAccess::map_aot_code_region(rs)) {
268 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
269 set_failed();
270 return;
271 }
272
273 _load_size = (uint)load_size;
274 _load_buffer = (char*)rs.base();
275 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
276 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
277
278 _load_header = (Header*)addr(0);
279 if (!_load_header->verify_config(_load_size)) {
280 set_failed();
281 return;
282 }
283 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
284 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
285 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
286 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
287 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
288 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
289
290 // Read strings
291 load_strings();
292 }
293 if (_for_dump) {
294 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
295 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
296 // Entries allocated at the end of buffer in reverse (as on stack).
297 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
298 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
299 }
300 _table = new AOTCodeAddressTable();
301 }
302
303 void AOTCodeCache::init_extrs_table() {
304 AOTCodeAddressTable* table = addr_table();
305 if (table != nullptr) {
306 table->init_extrs();
307 }
308 }
309
310 void AOTCodeCache::init_early_stubs_table() {
311 AOTCodeAddressTable* table = addr_table();
312 if (table != nullptr) {
313 table->init_early_stubs();
314 }
315 }
316
317 void AOTCodeCache::init_shared_blobs_table() {
318 AOTCodeAddressTable* table = addr_table();
319 if (table != nullptr) {
320 table->init_shared_blobs();
321 }
322 }
323
324 void AOTCodeCache::init_early_c1_table() {
325 AOTCodeAddressTable* table = addr_table();
326 if (table != nullptr) {
327 table->init_early_c1();
328 }
329 }
330
331 AOTCodeCache::~AOTCodeCache() {
332 if (_closing) {
333 return; // Already closed
334 }
335 // Stop any further access to cache.
336 _closing = true;
337
338 MutexLocker ml(Compile_lock);
339 if (for_dump()) { // Finalize cache
340 finish_write();
341 }
342 _load_buffer = nullptr;
343 if (_C_store_buffer != nullptr) {
344 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
345 _C_store_buffer = nullptr;
346 _store_buffer = nullptr;
347 }
348 if (_table != nullptr) {
349 delete _table;
350 _table = nullptr;
351 }
352 }
353
354 void AOTCodeCache::Config::record() {
355 _flags = 0;
356 #ifdef ASSERT
357 _flags |= debugVM;
358 #endif
359 if (UseCompressedOops) {
360 _flags |= compressedOops;
361 }
362 if (UseCompressedClassPointers) {
363 _flags |= compressedClassPointers;
364 }
365 if (UseTLAB) {
366 _flags |= useTLAB;
367 }
368 if (JavaAssertions::systemClassDefault()) {
369 _flags |= systemClassAssertions;
370 }
371 if (JavaAssertions::userClassDefault()) {
372 _flags |= userClassAssertions;
373 }
374 if (EnableContended) {
375 _flags |= enableContendedPadding;
376 }
377 if (RestrictContended) {
378 _flags |= restrictContendedPadding;
379 }
380 _compressedOopShift = CompressedOops::shift();
381 _compressedOopBase = CompressedOops::base();
382 _compressedKlassShift = CompressedKlassPointers::shift();
383 _contendedPaddingWidth = ContendedPaddingWidth;
384 _objectAlignment = ObjectAlignmentInBytes;
385 _gc = (uint)Universe::heap()->kind();
386 }
387
388 bool AOTCodeCache::Config::verify() const {
389 #ifdef ASSERT
390 if ((_flags & debugVM) == 0) {
391 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
392 return false;
393 }
394 #else
395 if ((_flags & debugVM) != 0) {
396 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
397 return false;
398 }
399 #endif
400
401 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
402 if (aot_gc != Universe::heap()->kind()) {
430 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
431 return false;
432 }
433 if (_compressedOopShift != (uint)CompressedOops::shift()) {
434 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
435 return false;
436 }
437 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
438 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
439 return false;
440 }
441 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
442 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
443 return false;
444 }
445 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
446 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
447 return false;
448 }
449
450 // This should be the last check as it only disables AOTStubCaching
451 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
452 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
453 AOTStubCaching = false;
454 }
455
456 return true;
457 }
458
459 bool AOTCodeCache::Header::verify_config(uint load_size) const {
460 if (_version != AOT_CODE_VERSION) {
461 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
462 return false;
463 }
464 if (load_size < _cache_size) {
465 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
466 return false;
467 }
468 return true;
469 }
470
471 AOTCodeCache* AOTCodeCache::open_for_use() {
472 if (AOTCodeCache::is_on_for_use()) {
473 return AOTCodeCache::cache();
474 }
475 return nullptr;
476 }
477
478 AOTCodeCache* AOTCodeCache::open_for_dump() {
479 if (AOTCodeCache::is_on_for_dump()) {
480 AOTCodeCache* cache = AOTCodeCache::cache();
481 cache->clear_lookup_failed(); // Reset bit
482 return cache;
483 }
484 return nullptr;
485 }
486
487 void copy_bytes(const char* from, address to, uint size) {
488 assert(size > 0, "sanity");
489 bool by_words = true;
490 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
491 // Use wordwise copies if possible:
492 Copy::disjoint_words((HeapWord*)from,
493 (HeapWord*)to,
494 ((size_t)size + HeapWordSize-1) / HeapWordSize);
495 } else {
496 by_words = false;
497 Copy::conjoint_jbytes(from, to, (size_t)size);
498 }
499 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
500 }
501
502 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
503 _cache = cache;
504 _entry = entry;
505 _load_buffer = cache->cache_buffer();
506 _read_position = 0;
507 _lookup_failed = false;
508 }
509
510 void AOTCodeReader::set_read_position(uint pos) {
511 if (pos == _read_position) {
512 return;
513 }
514 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
515 _read_position = pos;
516 }
517
518 bool AOTCodeCache::set_write_position(uint pos) {
519 if (pos == _write_position) {
520 return true;
521 }
522 if (_store_size < _write_position) {
523 _store_size = _write_position; // Adjust during write
524 }
525 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
526 _write_position = pos;
569 if (nbytes == 0) {
570 return 0;
571 }
572 uint new_position = _write_position + nbytes;
573 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
574 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
575 nbytes, _write_position);
576 set_failed();
577 report_store_failure();
578 return 0;
579 }
580 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
581 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
582 _write_position += nbytes;
583 if (_store_size < _write_position) {
584 _store_size = _write_position;
585 }
586 return nbytes;
587 }
588
589 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
590 return (void*)(cache->add_entry());
591 }
592
593 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
594 if (entry->kind() == kind) {
595 assert(entry->id() == id, "sanity");
596 return true; // Found
597 }
598 return false;
599 }
600
601 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
602 assert(_for_use, "sanity");
603 uint count = _load_header->entries_count();
604 if (_load_entries == nullptr) {
605 // Read it
606 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
607 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
608 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
609 }
610 // Binary search
611 int l = 0;
612 int h = count - 1;
613 while (l <= h) {
614 int mid = (l + h) >> 1;
615 int ix = mid * 2;
616 uint is = _search_entries[ix];
617 if (is == id) {
618 int index = _search_entries[ix + 1];
619 AOTCodeEntry* entry = &(_load_entries[index]);
620 if (check_entry(kind, id, entry)) {
621 return entry; // Found
622 }
623 // Linear search around to handle id collission
624 for (int i = mid - 1; i >= l; i--) { // search back
625 ix = i * 2;
626 is = _search_entries[ix];
627 if (is != id) {
628 break;
629 }
630 index = _search_entries[ix + 1];
631 AOTCodeEntry* entry = &(_load_entries[index]);
632 if (check_entry(kind, id, entry)) {
633 return entry; // Found
634 }
635 }
636 for (int i = mid + 1; i <= h; i++) { // search forward
637 ix = i * 2;
638 is = _search_entries[ix];
639 if (is != id) {
640 break;
641 }
642 index = _search_entries[ix + 1];
643 AOTCodeEntry* entry = &(_load_entries[index]);
644 if (check_entry(kind, id, entry)) {
645 return entry; // Found
646 }
647 }
648 break; // Not found match
649 } else if (is < id) {
650 l = mid + 1;
651 } else {
652 h = mid - 1;
653 }
654 }
655 return nullptr;
656 }
657
658 extern "C" {
659 static int uint_cmp(const void *i, const void *j) {
660 uint a = *(uint *)i;
661 uint b = *(uint *)j;
662 return a > b ? 1 : a < b ? -1 : 0;
663 }
664 }
665
666 bool AOTCodeCache::finish_write() {
667 if (!align_write()) {
668 return false;
669 }
670 uint strings_offset = _write_position;
671 int strings_count = store_strings();
672 if (strings_count < 0) {
673 return false;
674 }
675 if (!align_write()) {
676 return false;
677 }
678 uint strings_size = _write_position - strings_offset;
679
680 uint entries_count = 0; // Number of entrant (useful) code entries
681 uint entries_offset = _write_position;
682
683 uint store_count = _store_entries_cnt;
684 if (store_count > 0) {
685 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
686 uint code_count = store_count;
687 uint search_count = code_count * 2;
688 uint search_size = search_count * sizeof(uint);
689 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
690 // _write_position includes size of code and strings
691 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
692 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
693 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
694
695 // Create ordered search table for entries [id, index];
696 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
697 // Allocate in AOT Cache buffer
698 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
699 char* start = align_up(buffer, DATA_ALIGNMENT);
700 char* current = start + header_size; // Skip header
701
702 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
703 uint adapters_count = 0;
704 uint shared_blobs_count = 0;
705 uint C1_blobs_count = 0;
706 uint C2_blobs_count = 0;
707 uint max_size = 0;
708 // AOTCodeEntry entries were allocated in reverse in store buffer.
709 // Process them in reverse order to cache first code first.
710 for (int i = store_count - 1; i >= 0; i--) {
711 entries_address[i].set_next(nullptr); // clear pointers before storing data
712 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
713 if (size > max_size) {
714 max_size = size;
715 }
716 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
717 entries_address[i].set_offset(current - start); // New offset
718 current += size;
719 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
720 if (n != sizeof(AOTCodeEntry)) {
721 FREE_C_HEAP_ARRAY(uint, search);
722 return false;
723 }
724 search[entries_count*2 + 0] = entries_address[i].id();
725 search[entries_count*2 + 1] = entries_count;
726 entries_count++;
727 AOTCodeEntry::Kind kind = entries_address[i].kind();
728 if (kind == AOTCodeEntry::Adapter) {
729 adapters_count++;
730 } else if (kind == AOTCodeEntry::SharedBlob) {
731 shared_blobs_count++;
732 } else if (kind == AOTCodeEntry::C1Blob) {
733 C1_blobs_count++;
734 } else if (kind == AOTCodeEntry::C2Blob) {
735 C2_blobs_count++;
736 }
737 }
738 if (entries_count == 0) {
739 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
740 FREE_C_HEAP_ARRAY(uint, search);
741 return true; // Nothing to write
742 }
743 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
744 // Write strings
745 if (strings_count > 0) {
746 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
747 strings_offset = (current - start); // New offset
748 current += strings_size;
749 }
750
751 uint new_entries_offset = (current - start); // New offset
752 // Sort and store search table
753 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
754 search_size = 2 * entries_count * sizeof(uint);
755 copy_bytes((const char*)search, (address)current, search_size);
756 FREE_C_HEAP_ARRAY(uint, search);
757 current += search_size;
758
759 // Write entries
760 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
761 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
762 current += entries_size;
763 uint size = (current - start);
764 assert(size <= total_size, "%d > %d", size , total_size);
765
766 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
767 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
768 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
769 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
770 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
771
772 // Finalize header
773 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
774 header->init(size, (uint)strings_count, strings_offset,
775 entries_count, new_entries_offset,
776 adapters_count, shared_blobs_count,
777 C1_blobs_count, C2_blobs_count);
778
779 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
780 }
781 return true;
782 }
783
784 //------------------Store/Load AOT code ----------------------
785
786 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
787 AOTCodeCache* cache = open_for_dump();
788 if (cache == nullptr) {
789 return false;
790 }
791 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
792
793 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
794 return false;
795 }
796 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
797 return false;
798 }
799 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
882
883 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
884 AOTCodeCache* cache = open_for_use();
885 if (cache == nullptr) {
886 return nullptr;
887 }
888 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
889
890 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
891 return nullptr;
892 }
893 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
894 return nullptr;
895 }
896 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
897
898 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
899 if (entry == nullptr) {
900 return nullptr;
901 }
902 AOTCodeReader reader(cache, entry);
903 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
904
905 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
906 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
907 return blob;
908 }
909
910 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
911 uint entry_position = _entry->offset();
912
913 // Read name
914 uint name_offset = entry_position + _entry->name_offset();
915 uint name_size = _entry->name_size(); // Includes '/0'
916 const char* stored_name = addr(name_offset);
917
918 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
919 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
920 stored_name, name);
921 set_lookup_failed(); // Skip this blob
922 return nullptr;
966 set_read_position(offset);
967 for (int i = 0; i < stored_count; i++) {
968 uint32_t off = *(uint32_t*)addr(offset);
969 offset += sizeof(uint32_t);
970 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
971 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
972 stored_name, entry_name, _entry->id(), off);
973 entry_offsets[i] = off;
974 }
975
976 #ifdef ASSERT
977 LogStreamHandle(Trace, aot, codecache, stubs) log;
978 if (log.is_enabled()) {
979 FlagSetting fs(PrintRelocations, true);
980 code_blob->print_on(&log);
981 }
982 #endif
983 return code_blob;
984 }
985
986 // ------------ process code and data --------------
987
988 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
989 GrowableArray<uint> reloc_data;
990 RelocIterator iter(&code_blob);
991 LogStreamHandle(Trace, aot, codecache, reloc) log;
992 while (iter.next()) {
993 int idx = reloc_data.append(0); // default value
994 switch (iter.type()) {
995 case relocInfo::none:
996 break;
997 case relocInfo::runtime_call_type: {
998 // Record offset of runtime destination
999 CallRelocation* r = (CallRelocation*)iter.reloc();
1000 address dest = r->destination();
1001 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1002 dest = (address)-1; // do nothing in this case when loading this relocation
1003 }
1004 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
1005 break;
1006 }
1007 case relocInfo::runtime_call_w_cp_type:
1008 fatal("runtime_call_w_cp_type unimplemented");
1009 break;
1010 case relocInfo::external_word_type: {
1011 // Record offset of runtime target
1012 address target = ((external_word_Relocation*)iter.reloc())->target();
1013 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob));
1014 break;
1015 }
1016 case relocInfo::internal_word_type:
1017 break;
1018 case relocInfo::section_word_type:
1019 break;
1020 case relocInfo::post_call_nop_type:
1021 break;
1022 default:
1023 fatal("relocation %d unimplemented", (int)iter.type());
1024 break;
1025 }
1026 if (log.is_enabled()) {
1027 iter.print_current_on(&log);
1028 }
1029 }
1030
1031 // Write additional relocation data: uint per relocation
1032 // Write the count first
1033 int count = reloc_data.length();
1034 write_bytes(&count, sizeof(int));
1035 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1036 iter != reloc_data.end(); ++iter) {
1037 uint value = *iter;
1038 int n = write_bytes(&value, sizeof(uint));
1039 if (n != sizeof(uint)) {
1040 return false;
1041 }
1042 }
1043 return true;
1044 }
1045
1046 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1047 LogStreamHandle(Trace, aot, reloc) log;
1048 uint offset = read_position();
1049 int count = *(int*)addr(offset);
1050 offset += sizeof(int);
1051 if (log.is_enabled()) {
1052 log.print_cr("======== extra relocations count=%d", count);
1053 }
1054 uint* reloc_data = (uint*)addr(offset);
1055 offset += (count * sizeof(uint));
1056 set_read_position(offset);
1057
1058 RelocIterator iter(code_blob);
1059 int j = 0;
1060 while (iter.next()) {
1061 switch (iter.type()) {
1062 case relocInfo::none:
1063 break;
1064 case relocInfo::runtime_call_type: {
1065 address dest = _cache->address_for_id(reloc_data[j]);
1066 if (dest != (address)-1) {
1067 ((CallRelocation*)iter.reloc())->set_destination(dest);
1068 }
1069 break;
1070 }
1071 case relocInfo::runtime_call_w_cp_type:
1072 fatal("runtime_call_w_cp_type unimplemented");
1073 break;
1074 case relocInfo::external_word_type: {
1075 address target = _cache->address_for_id(reloc_data[j]);
1076 // Add external address to global table
1077 int index = ExternalsRecorder::find_index(target);
1078 // Update index in relocation
1079 Relocation::add_jint(iter.data(), index);
1080 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1081 assert(reloc->target() == target, "sanity");
1082 reloc->set_value(target); // Patch address in the code
1083 break;
1084 }
1085 case relocInfo::internal_word_type: {
1086 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1087 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1088 break;
1089 }
1090 case relocInfo::section_word_type: {
1091 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1092 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1093 break;
1094 }
1095 case relocInfo::post_call_nop_type:
1096 break;
1097 default:
1098 fatal("relocation %d unimplemented", (int)iter.type());
1099 break;
1100 }
1101 if (log.is_enabled()) {
1102 iter.print_current_on(&log);
1103 }
1104 j++;
1105 }
1106 assert(j == count, "sanity");
1107 }
1108
1109 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1110 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1111 int oopmaps_size = oopmaps->nr_of_bytes();
1112 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1113 return false;
1114 }
1115 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1116 if (n != (uint)oopmaps->nr_of_bytes()) {
1117 return false;
1118 }
1119 return true;
1120 }
1121
1122 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1123 uint offset = read_position();
1124 int size = *(int *)addr(offset);
1125 offset += sizeof(int);
1126 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1127 offset += size;
1128 set_read_position(offset);
1129 return oopmaps;
1130 }
1131
1132 #ifndef PRODUCT
1133 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1134 // Write asm remarks
1135 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1136 if (count_ptr == nullptr) {
1137 return false;
1138 }
1139 uint count = 0;
1140 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1141 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1142 uint n = write_bytes(&offset, sizeof(uint));
1143 if (n != sizeof(uint)) {
1144 return false;
1145 }
1146 const char* cstr = add_C_string(str);
1147 int id = _table->id_for_C_string((address)cstr);
1148 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1149 n = write_bytes(&id, sizeof(int));
1150 if (n != sizeof(int)) {
1151 return false;
1200 // Read dbg strings
1201 uint offset = read_position();
1202 uint count = *(uint *)addr(offset);
1203 offset += sizeof(uint);
1204 for (uint i = 0; i < count; i++) {
1205 int string_id = *(uint *)addr(offset);
1206 offset += sizeof(int);
1207 const char* str = (const char*)_cache->address_for_C_string(string_id);
1208 dbg_strings.insert(str);
1209 }
1210 set_read_position(offset);
1211 }
1212 #endif // PRODUCT
1213
1214 //======================= AOTCodeAddressTable ===============
1215
1216 // address table ids for generated routines, external addresses and C
1217 // string addresses are partitioned into positive integer ranges
1218 // defined by the following positive base and max values
1219 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1220 // [_blobs_base, _blobs_base + _blobs_max -1],
1221 // ...
1222 // [_c_str_base, _c_str_base + _c_str_max -1],
1223
1224 #define _extrs_max 100
1225 #define _stubs_max 3
1226
1227 #define _shared_blobs_max 20
1228 #define _C1_blobs_max 10
1229 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1230 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1231
1232 #define _extrs_base 0
1233 #define _stubs_base (_extrs_base + _extrs_max)
1234 #define _shared_blobs_base (_stubs_base + _stubs_max)
1235 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1236 #define _blobs_end (_shared_blobs_base + _blobs_max)
1237
1238 #define SET_ADDRESS(type, addr) \
1239 { \
1240 type##_addr[type##_length++] = (address) (addr); \
1241 assert(type##_length <= type##_max, "increase size"); \
1242 }
1243
1244 static bool initializing_extrs = false;
1245
1246 void AOTCodeAddressTable::init_extrs() {
1247 if (_extrs_complete || initializing_extrs) return; // Done already
1248
1249 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1250
1251 initializing_extrs = true;
1252 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1253
1254 _extrs_length = 0;
1255
1256 // Record addresses of VM runtime methods
1257 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1258 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1259 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1260 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1261 #if defined(AARCH64) && !defined(ZERO)
1262 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1263 #endif
1264 {
1265 // Required by Shared blobs
1266 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1267 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1268 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1269 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1270 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1271 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1272 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1273 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1274 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1275 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1276 }
1277
1278 #ifdef COMPILER1
1279 {
1280 // Required by C1 blobs
1281 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1282 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1283 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1284 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1285 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1286 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1287 SET_ADDRESS(_extrs, Runtime1::new_instance);
1288 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1289 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1290 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1291 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1292 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1293 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1294 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1295 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1296 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1297 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1298 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1299 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1300 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1301 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1302 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1303 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1304 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1305 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1306 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1307 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1308 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1309 SET_ADDRESS(_extrs, Thread::current);
1310 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1311 #ifndef PRODUCT
1312 SET_ADDRESS(_extrs, os::breakpoint);
1313 #endif
1314 }
1315 #endif
1316
1317 #ifdef COMPILER2
1318 {
1319 // Required by C2 blobs
1320 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1321 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1322 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1323 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1324 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1325 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1326 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1327 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1328 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1329 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1330 #if INCLUDE_JVMTI
1331 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
1332 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
1333 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
1334 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
1335 #endif
1336 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1337 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1338 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1339 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1340 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1341 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1342 #if defined(AARCH64)
1343 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1344 #endif // AARCH64
1345 }
1346 #endif // COMPILER2
1347
1348 #if INCLUDE_G1GC
1349 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
1350 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1351 #endif
1352 #if INCLUDE_SHENANDOAHGC
1353 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
1354 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1355 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1356 #endif
1357 #if INCLUDE_ZGC
1358 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1359 #if defined(AMD64)
1360 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1361 #endif
1362 #endif
1363 #ifndef ZERO
1364 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1365 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1366 #endif
1367 #endif // ZERO
1368
1369 _extrs_complete = true;
1370 log_debug(aot, codecache, init)("External addresses recorded");
1371 }
1372
1373 static bool initializing_early_stubs = false;
1374
1375 void AOTCodeAddressTable::init_early_stubs() {
1376 if (_complete || initializing_early_stubs) return; // Done already
1377 initializing_early_stubs = true;
1378 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1379 _stubs_length = 0;
1380 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1381
1382 {
1383 // Required by C1 blobs
1384 #if defined(AMD64) && !defined(ZERO)
1385 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1386 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1387 #endif // AMD64
1388 }
1389
1390 _early_stubs_complete = true;
1391 log_info(aot, codecache, init)("Early stubs recorded");
1392 }
1393
1394 static bool initializing_shared_blobs = false;
1395
1396 void AOTCodeAddressTable::init_shared_blobs() {
1397 if (_complete || initializing_shared_blobs) return; // Done already
1398 initializing_shared_blobs = true;
1399 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1400 _shared_blobs_addr = blobs_addr;
1401 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1402 _shared_blobs_length = _C1_blobs_length = 0;
1403
1404 // clear the address table
1405 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1406
1407 // Record addresses of generated code blobs
1408 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1409 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1410 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1411 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1412 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1413 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1414 #if INCLUDE_JVMCI
1415 if (EnableJVMCI) {
1416 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1417 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1418 }
1419 #endif
1420
1421 _shared_blobs_complete = true;
1422 log_debug(aot, codecache, init)("Early shared blobs recorded");
1423 _complete = true;
1424 }
1425
1426 void AOTCodeAddressTable::init_early_c1() {
1427 #ifdef COMPILER1
1428 // Runtime1 Blobs
1429 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
1430 C1StubId id = (C1StubId)i;
1431 if (Runtime1::blob_for(id) == nullptr) {
1432 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1433 continue;
1434 }
1435 if (Runtime1::entry_for(id) == nullptr) {
1436 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1437 continue;
1438 }
1439 address entry = Runtime1::entry_for(id);
1440 SET_ADDRESS(_C1_blobs, entry);
1441 }
1442 #endif // COMPILER1
1443 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1444 _early_c1_complete = true;
1445 }
1446
1447 #undef SET_ADDRESS
1448
1449 AOTCodeAddressTable::~AOTCodeAddressTable() {
1450 if (_extrs_addr != nullptr) {
1451 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1452 }
1453 if (_shared_blobs_addr != nullptr) {
1454 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1455 }
1456 }
1457
1458 #ifdef PRODUCT
1459 #define MAX_STR_COUNT 200
1460 #else
1461 #define MAX_STR_COUNT 500
1462 #endif
1463 #define _c_str_max MAX_STR_COUNT
1464 static const int _c_str_base = _all_max;
1465
1466 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1467 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1468 static int _C_strings_count = 0;
1469 static int _C_strings_s[MAX_STR_COUNT] = {0};
1470 static int _C_strings_id[MAX_STR_COUNT] = {0};
1471 static int _C_strings_used = 0;
1472
1603 fatal("AOT Code Cache VM runtime addresses table is not complete");
1604 }
1605 if (idx == -1) {
1606 return (address)-1;
1607 }
1608 uint id = (uint)idx;
1609 // special case for symbols based relative to os::init
1610 if (id > (_c_str_base + _c_str_max)) {
1611 return (address)os::init + idx;
1612 }
1613 if (idx < 0) {
1614 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1615 }
1616 // no need to compare unsigned id against 0
1617 if (/* id >= _extrs_base && */ id < _extrs_length) {
1618 return _extrs_addr[id - _extrs_base];
1619 }
1620 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1621 return _stubs_addr[id - _stubs_base];
1622 }
1623 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1624 return _shared_blobs_addr[id - _shared_blobs_base];
1625 }
1626 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1627 return _C1_blobs_addr[id - _C1_blobs_base];
1628 }
1629 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1630 return address_for_C_string(id - _c_str_base);
1631 }
1632 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1633 return nullptr;
1634 }
1635
1636 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1637 if (!_extrs_complete) {
1638 fatal("AOT Code Cache VM runtime addresses table is not complete");
1639 }
1640 int id = -1;
1641 if (addr == (address)-1) { // Static call stub has jump to itself
1642 return id;
1643 }
1644 // Seach for C string
1645 id = id_for_C_string(addr);
1646 if (id >= 0) {
1647 return id + _c_str_base;
1648 }
1649 if (StubRoutines::contains(addr)) {
1650 // Search in stubs
1651 id = search_address(addr, _stubs_addr, _stubs_length);
1652 if (id < 0) {
1653 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1654 if (desc == nullptr) {
1655 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1656 }
1657 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1658 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1659 } else {
1660 return id + _stubs_base;
1661 }
1662 } else {
1663 CodeBlob* cb = CodeCache::find_blob(addr);
1664 if (cb != nullptr) {
1665 // Search in code blobs
1666 int id_base = _shared_blobs_base;
1667 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1668 if (id < 0) {
1669 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1670 } else {
1671 return id_base + id;
1672 }
1673 } else {
1674 // Search in runtime functions
1675 id = search_address(addr, _extrs_addr, _extrs_length);
1676 if (id < 0) {
1677 ResourceMark rm;
1678 const int buflen = 1024;
1679 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1680 int offset = 0;
1681 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1682 if (offset > 0) {
1683 // Could be address of C string
1684 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1685 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1686 p2i(addr), dist, (const char*)addr);
1687 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1688 return dist;
1689 }
1690 reloc.print_current_on(tty);
1691 code_blob->print_on(tty);
1692 code_blob->print_code_on(tty);
1693 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1694 } else {
1695 reloc.print_current_on(tty);
1696 code_blob->print_on(tty);
1697 code_blob->print_code_on(tty);
1698 os::find(addr, tty);
1699 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1700 }
1701 } else {
1702 return _extrs_base + id;
1703 }
1704 }
1705 }
1706 return id;
1707 }
1708
1709 void AOTCodeCache::print_on(outputStream* st) {
1710 AOTCodeCache* cache = open_for_use();
1711 if (cache != nullptr) {
1712 uint count = cache->_load_header->entries_count();
1713 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
1714 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1715
1716 for (uint i = 0; i < count; i++) {
1717 // Use search_entries[] to order ouput
1718 int index = search_entries[2*i + 1];
1719 AOTCodeEntry* entry = &(load_entries[index]);
1720
1721 uint entry_position = entry->offset();
1722 uint name_offset = entry->name_offset() + entry_position;
1723 const char* saved_name = cache->addr(name_offset);
1724
1725 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u size=%u '%s'",
1726 i, index, entry->kind(), entry->id(), entry->size(), saved_name);
1727 }
1728 } else {
1729 st->print_cr("failed to map code cache");
1730 }
1731 }
|
1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "asm/codeBuffer.hpp"
28 #include "cds/aotCacheAccess.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "cds/metaspaceShared.hpp"
33 #include "ci/ciConstant.hpp"
34 #include "ci/ciEnv.hpp"
35 #include "ci/ciField.hpp"
36 #include "ci/ciMethod.hpp"
37 #include "ci/ciMethodData.hpp"
38 #include "ci/ciObject.hpp"
39 #include "ci/ciUtilities.inline.hpp"
40 #include "classfile/javaAssertions.hpp"
41 #include "classfile/stringTable.hpp"
42 #include "classfile/symbolTable.hpp"
43 #include "classfile/systemDictionary.hpp"
44 #include "classfile/vmClasses.hpp"
45 #include "classfile/vmIntrinsics.hpp"
46 #include "code/aotCodeCache.hpp"
47 #include "code/codeBlob.hpp"
48 #include "code/codeCache.hpp"
49 #include "code/oopRecorder.inline.hpp"
50 #include "compiler/abstractCompiler.hpp"
51 #include "compiler/compilationPolicy.hpp"
52 #include "compiler/compileBroker.hpp"
53 #include "compiler/compileTask.hpp"
54 #include "gc/g1/g1BarrierSetRuntime.hpp"
55 #include "gc/shared/gcConfig.hpp"
56 #include "logging/logStream.hpp"
57 #include "memory/memoryReserver.hpp"
58 #include "memory/universe.hpp"
59 #include "oops/klass.inline.hpp"
60 #include "oops/method.inline.hpp"
61 #include "oops/trainingData.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "runtime/atomic.hpp"
64 #include "runtime/deoptimization.hpp"
65 #include "runtime/flags/flagSetting.hpp"
66 #include "runtime/globals_extension.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/java.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/mutexLocker.hpp"
71 #include "runtime/os.inline.hpp"
72 #include "runtime/sharedRuntime.hpp"
73 #include "runtime/stubCodeGenerator.hpp"
74 #include "runtime/stubRoutines.hpp"
75 #include "runtime/timerTrace.hpp"
76 #include "runtime/threadIdentifier.hpp"
77 #include "utilities/copy.hpp"
78 #include "utilities/ostream.hpp"
79 #include "utilities/spinYield.hpp"
80 #ifdef COMPILER1
81 #include "c1/c1_Runtime1.hpp"
82 #include "c1/c1_LIRAssembler.hpp"
83 #include "gc/shared/c1/barrierSetC1.hpp"
84 #include "gc/g1/c1/g1BarrierSetC1.hpp"
85 #if INCLUDE_SHENANDOAHGC
86 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
87 #endif // INCLUDE_SHENANDOAHGC
88 #include "gc/z/c1/zBarrierSetC1.hpp"
89 #endif // COMPILER1
90 #ifdef COMPILER2
91 #include "opto/runtime.hpp"
92 #endif
93 #if INCLUDE_JVMCI
94 #include "jvmci/jvmci.hpp"
95 #endif
96 #if INCLUDE_G1GC
97 #include "gc/g1/g1BarrierSetRuntime.hpp"
98 #endif
99 #if INCLUDE_SHENANDOAHGC
100 #include "gc/shenandoah/shenandoahRuntime.hpp"
101 #endif
102 #if INCLUDE_ZGC
103 #include "gc/z/zBarrierSetRuntime.hpp"
104 #endif
105
106 #include <sys/stat.h>
107 #include <errno.h>
108
109 const char* aot_code_entry_kind_name[] = {
110 #define DECL_KIND_STRING(kind) XSTR(kind),
111 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
112 #undef DECL_KIND_STRING
113 };
114
115 static elapsedTimer _t_totalLoad;
116 static elapsedTimer _t_totalRegister;
117 static elapsedTimer _t_totalFind;
118 static elapsedTimer _t_totalStore;
119
120 static bool enable_timers() {
121 return CITime || log_is_enabled(Info, init);
122 }
123
124 static void report_load_failure() {
125 if (AbortVMOnAOTCodeFailure) {
126 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
127 }
128 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
129 AOTCodeCache::disable_caching();
130 }
131
132 static void report_store_failure() {
133 if (AbortVMOnAOTCodeFailure) {
134 tty->print_cr("Unable to create AOT Code Cache.");
135 vm_abort(false);
136 }
137 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
138 AOTCodeCache::disable_caching();
139 }
140
141 // The sequence of AOT code caching flags and parametters settings.
142 //
143 // 1. The initial AOT code caching flags setting is done
144 // during call to CDSConfig::check_vm_args_consistency().
145 //
146 // 2. The earliest AOT code state check done in compilationPolicy_init()
147 // where we set number of compiler threads for AOT assembly phase.
148 //
149 // 3. We determine presence of AOT code in AOT Cache in
150 // MetaspaceShared::open_static_archive() which is calles
151 // after compilationPolicy_init() but before codeCache_init().
152 //
153 // 4. AOTCodeCache::initialize() is called during universe_init()
154 // and does final AOT state and flags settings.
155 //
156 // 5. Finally AOTCodeCache::init2() is called after universe_init()
157 // when all GC settings are finalized.
158
159 // Next methods determine which action we do with AOT code depending
160 // on phase of AOT process: assembly or production.
161
162 bool AOTCodeCache::is_dumping_adapter() {
163 return AOTAdapterCaching && is_on_for_dump();
164 }
165
166 bool AOTCodeCache::is_using_adapter() {
167 return AOTAdapterCaching && is_on_for_use();
168 }
169
170 bool AOTCodeCache::is_dumping_stub() {
171 return AOTStubCaching && is_on_for_dump();
172 }
173
174 bool AOTCodeCache::is_using_stub() {
175 return AOTStubCaching && is_on_for_use();
176 }
177
178 bool AOTCodeCache::is_dumping_code() {
179 return AOTCodeCaching && is_on_for_dump();
180 }
181
182 bool AOTCodeCache::is_using_code() {
183 return AOTCodeCaching && is_on_for_use();
184 }
185
186 void AOTCodeCache::enable_caching() {
187 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
188 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
189 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
190 }
191
192 void AOTCodeCache::disable_caching() {
193 FLAG_SET_ERGO(AOTCodeCaching, false);
194 FLAG_SET_ERGO(AOTStubCaching, false);
195 FLAG_SET_ERGO(AOTAdapterCaching, false);
196 }
197
198 bool AOTCodeCache::is_caching_enabled() {
199 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
200 }
201
202 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
203 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
204 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
205 // becasue both id and kind are used to find an entry, and that combination should be unique
206 if (kind == AOTCodeEntry::Adapter) {
207 return id;
208 } else if (kind == AOTCodeEntry::SharedBlob) {
209 return id;
210 } else if (kind == AOTCodeEntry::C1Blob) {
211 return (int)SharedStubId::NUM_STUBIDS + id;
212 } else {
213 // kind must be AOTCodeEntry::C2Blob
214 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
215 }
216 }
217
218 static uint _max_aot_code_size = 0;
219 uint AOTCodeCache::max_aot_code_size() {
220 return _max_aot_code_size;
221 }
222
223 bool AOTCodeCache::is_C3_on() {
224 #if INCLUDE_JVMCI
225 if (UseJVMCICompiler) {
226 return (AOTCodeCaching) && UseC2asC3;
227 }
228 #endif
229 return false;
230 }
231
232 bool AOTCodeCache::is_code_load_thread_on() {
233 // We cannot trust AOTCodeCache status here, due to bootstrapping circularity.
234 // Compilation policy init runs before AOT cache is fully initialized, so the
235 // normal AOT cache status check would always fail.
236 // See: https://bugs.openjdk.org/browse/JDK-8358690
237 // return UseCodeLoadThread && is_using_code();
238 return UseCodeLoadThread && AOTCodeCaching && CDSConfig::is_using_archive();
239 }
240
241 bool AOTCodeCache::allow_const_field(ciConstant& value) {
242 return !is_on() || !is_dumping_code() // Restrict only when we generate cache
243 // Can not trust primitive too || !is_reference_type(value.basic_type())
244 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
245 ;
246 }
247
248 // It is called from MetaspaceShared::initialize_shared_spaces()
249 // which is called from universe_init().
250 // At this point all AOT class linking seetings are finilized
251 // and AOT cache is open so we can map AOT code region.
252 void AOTCodeCache::initialize() {
253 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
254 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
255 disable_caching();
256 return;
257 #else
258 if (FLAG_IS_DEFAULT(AOTCache)) {
259 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
260 disable_caching();
261 return; // AOTCache must be specified to dump and use AOT code
262 }
263
264 // Disable stubs caching until JDK-8357398 is fixed.
265 FLAG_SET_ERGO(AOTStubCaching, false);
266
267 if (VerifyOops) {
268 // Disable AOT stubs caching when VerifyOops flag is on.
269 // Verify oops code generated a lot of C strings which overflow
270 // AOT C string table (which has fixed size).
271 // AOT C string table will be reworked later to handle such cases.
272 //
273 // Note: AOT adapters are not affected - they don't have oop operations.
274 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
275 FLAG_SET_ERGO(AOTStubCaching, false);
276 }
277
278 bool is_dumping = false;
279 bool is_using = false;
280 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
281 enable_caching();
282 is_dumping = is_caching_enabled();
283 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
284 enable_caching();
285 is_using = is_caching_enabled();
286 } else {
287 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
288 disable_caching();
289 return; // nothing to do
290 }
291 if (!(is_dumping || is_using)) {
292 disable_caching();
293 return; // AOT code caching disabled on command line
294 }
295 if (AOTCodeCaching) {
296 if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
297 FLAG_SET_ERGO(ClassInitBarrierMode, 1);
298 }
299 } else if (ClassInitBarrierMode > 0) {
300 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOTCodeCaching is false.");
301 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
302 }
303 // Reserve AOT Cache region when we dumping AOT code.
304 _max_aot_code_size = AOTCodeMaxSize;
305 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
306 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
307 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
308 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
309 }
310 }
311 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
312 if (is_using && aot_code_size == 0) {
313 log_info(aot, codecache, init)("AOT Code Cache is empty");
314 disable_caching();
315 return;
316 }
317 if (!open_cache(is_dumping, is_using)) {
318 if (is_using) {
319 report_load_failure();
320 } else {
321 report_store_failure();
322 }
323 return;
324 }
325 if (is_dumping) {
326 FLAG_SET_DEFAULT(FoldStableValues, false);
327 FLAG_SET_DEFAULT(ForceUnreachable, true);
328 }
329 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
330 #endif // defined(AMD64) || defined(AARCH64)
331 }
332
333 // It is called after universe_init() when all GC settings are finalized.
334 void AOTCodeCache::init2() {
335 if (!is_on()) {
336 return;
337 }
338 // After Universe initialized
339 BarrierSet* bs = BarrierSet::barrier_set();
340 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
341 address byte_map_base = ci_card_table_address_as<address>();
342 if (is_on_for_dump() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
343 // Bail out since we can't encode card table base address with relocation
344 log_warning(aot, codecache, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
345 close();
346 report_load_failure();
347 return;
348 }
349 }
350 if (!verify_vm_config()) {
351 close();
352 report_load_failure();
353 return;
354 }
355
356 // initialize aot runtime constants as appropriate to this runtime
357 AOTRuntimeConstants::initialize_from_runtime();
358 // initialize the table of external routines so we can save
359 // generated code blobs that reference them
360 init_extrs_table();
361 // initialize the table of initial stubs so we can save
362 // generated code blobs that reference them
363 init_early_stubs_table();
364 }
365
366 AOTCodeCache* AOTCodeCache::_cache = nullptr;
367
368 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
369 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
370 if (cache->failed()) {
371 delete cache;
372 _cache = nullptr;
373 return false;
374 }
375 _cache = cache;
376 return true;
377 }
378
379 static void print_helper(nmethod* nm, outputStream* st) {
380 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
381 if (e->method() == nm->method()) {
382 ResourceMark rm;
383 stringStream ss;
384 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
385 if (e->decompile() > 0) {
386 ss.print("+D%d", e->decompile());
387 }
388 ss.print("[%s%s%s]",
389 (e->is_loaded() ? "L" : ""),
390 (e->load_fail() ? "F" : ""),
391 (e->not_entrant() ? "I" : ""));
392 ss.print("#%d", e->comp_id());
393
394 st->print(" %s", ss.freeze());
395 }
396 });
397 }
398
399 void AOTCodeCache::close() {
400 if (is_on()) {
401 if (AOTCodeCache::is_on_for_use()) {
402 LogStreamHandle(Info, init) log;
403 if (log.is_enabled()) {
404 log.print_cr("AOT Code Cache statistics (when closed): ");
405 AOTCodeCache::print_statistics_on(&log);
406 log.cr();
407 AOTCodeCache::print_timers_on(&log);
408
409 LogStreamHandle(Info, aot, codecache, init) log1;
410 if (log1.is_enabled()) {
411 AOTCodeCache::print_unused_entries_on(&log1);
412 }
413
414 LogStreamHandle(Info, aot, codecache) aot_info;
415 // need a lock to traverse the code cache
416 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
417 if (aot_info.is_enabled()) {
418 NMethodIterator iter(NMethodIterator::all);
419 while (iter.next()) {
420 nmethod* nm = iter.method();
421 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
422 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
423 (nm->method()->is_shared() ? 'S' : ' '),
424 (nm->is_aot() ? 'A' : ' '),
425 (nm->preloaded() ? 'P' : ' '),
426 nm->comp_level());
427 print_helper(nm, &aot_info);
428 aot_info.print(": ");
429 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
430
431 LogStreamHandle(Debug, aot, codecache) aot_debug;
432 if (aot_debug.is_enabled()) {
433 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
434 if (mtd != nullptr) {
435 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
436 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
437 });
438 }
439 }
440 }
441 }
442 }
443 }
444 }
445 delete _cache; // Free memory
446 _cache = nullptr;
447 }
448 }
449
450 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
451 public:
452 uint _aot_code_size;
453 char* _aot_code_data;
454
455 void set_aot_code_data(uint size, char* aot_data) {
456 _aot_code_size = size;
457 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
458 }
459
460 static CachedCodeDirectory* create();
461 };
462
463 // Storing AOT code in the cached code region of AOT Cache:
464 //
465 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
466 // E.g., you can build a hashtable to record what methods have been archived.
467 //
468 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
469 // allocated using AOTCacheAccess::allocate_aot_code_region().
470 //
471 // [3] CachedCodeDirectory must be the very first allocation.
472 //
473 // [4] Two kinds of pointer can be stored:
474 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
475 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
476 // (It's OK to point to an interior location within this buffer).
477 // Such pointers must be stored using AOTCacheAccess::set_pointer()
478 //
479 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
480 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
481 // (e.g., to account for the runtime location of the CodeCache).
482 //
483 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
484 static CachedCodeDirectory* _aot_code_directory = nullptr;
485
486 CachedCodeDirectory* CachedCodeDirectory::create() {
487 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
488 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
489 dir->dumptime_init_internal();
490 return dir;
491 }
492
493 #define DATA_ALIGNMENT HeapWordSize
494
495 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
496 _load_header(nullptr),
497 _load_buffer(nullptr),
498 _store_buffer(nullptr),
499 _C_store_buffer(nullptr),
500 _write_position(0),
501 _load_size(0),
502 _store_size(0),
503 _for_use(is_using),
504 _for_dump(is_dumping),
505 _closing(false),
506 _failed(false),
507 _lookup_failed(false),
508 _for_preload(false),
509 _gen_preload_code(false),
510 _has_clinit_barriers(false),
511 _table(nullptr),
512 _load_entries(nullptr),
513 _search_entries(nullptr),
514 _store_entries(nullptr),
515 _C_strings_buf(nullptr),
516 _store_entries_cnt(0),
517 _compile_id(0),
518 _comp_level(0)
519 {
520 _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false;
521
522 // Read header at the begining of cache
523 if (_for_use) {
524 // Read cache
525 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
526 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
527 if (!rs.is_reserved()) {
528 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
529 set_failed();
530 return;
531 }
532 if (!AOTCacheAccess::map_aot_code_region(rs)) {
533 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
534 set_failed();
535 return;
536 }
537 _aot_code_directory = (CachedCodeDirectory*)rs.base();
538 _aot_code_directory->runtime_init_internal();
539
540 _load_size = _aot_code_directory->_aot_code_size;
541 _load_buffer = _aot_code_directory->_aot_code_data;
542 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
543 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
544
545 _load_header = (AOTCodeCache::Header*)addr(0);
546 if (!_load_header->verify_config(_load_size)) {
547 set_failed();
548 return;
549 }
550 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
551 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
552 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
553 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
554 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
555 log_debug(aot, codecache, init)(" Stubs: total=%u", _load_header->stubs_count());
556 log_debug(aot, codecache, init)(" Nmethods: total=%u", _load_header->nmethods_count());
557 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
558
559 if (_load_header->has_meta_ptrs()) {
560 assert(UseSharedSpaces, "should be verified already");
561 _use_meta_ptrs = true; // Regardless UseMetadataPointers
562 UseMetadataPointers = true;
563 }
564 // Read strings
565 load_strings();
566 }
567 if (_for_dump) {
568 _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0);
569
570 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
571 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
572 // Entries allocated at the end of buffer in reverse (as on stack).
573 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
574 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
575 }
576 _table = new AOTCodeAddressTable();
577 }
578
579 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
580 // This could be concurent execution
581 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
582 _cache->invalidate_entry(entry);
583 }
584 }
585
586 bool AOTCodeCache::is_loaded(AOTCodeEntry* entry) {
587 if (is_on() && _cache->cache_buffer() != nullptr) {
588 return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
589 }
590 return false;
591 }
592
593 void AOTCodeCache::init_extrs_table() {
594 AOTCodeAddressTable* table = addr_table();
595 if (table != nullptr) {
596 table->init_extrs();
597 }
598 }
599
600 void AOTCodeCache::init_early_stubs_table() {
601 AOTCodeAddressTable* table = addr_table();
602 if (table != nullptr) {
603 table->init_early_stubs();
604 }
605 }
606
607 void AOTCodeCache::init_shared_blobs_table() {
608 AOTCodeAddressTable* table = addr_table();
609 if (table != nullptr) {
610 table->init_shared_blobs();
611 }
612 }
613
614 void AOTCodeCache::init_stubs_table() {
615 AOTCodeAddressTable* table = addr_table();
616 if (table != nullptr) {
617 table->init_stubs();
618 }
619 }
620
621 void AOTCodeCache::init_early_c1_table() {
622 AOTCodeAddressTable* table = addr_table();
623 if (table != nullptr) {
624 table->init_early_c1();
625 }
626 }
627
628 void AOTCodeCache::init_c1_table() {
629 AOTCodeAddressTable* table = addr_table();
630 if (table != nullptr) {
631 table->init_c1();
632 }
633 }
634
635 void AOTCodeCache::init_c2_table() {
636 AOTCodeAddressTable* table = addr_table();
637 if (table != nullptr) {
638 table->init_c2();
639 }
640 }
641
642 AOTCodeCache::~AOTCodeCache() {
643 if (_closing) {
644 return; // Already closed
645 }
646 // Stop any further access to cache.
647 // Checked on entry to load_nmethod() and store_nmethod().
648 _closing = true;
649 if (_for_use) {
650 // Wait for all load_nmethod() finish.
651 wait_for_no_nmethod_readers();
652 }
653 // Prevent writing code into cache while we are closing it.
654 // This lock held by ciEnv::register_method() which calls store_nmethod().
655 MutexLocker ml(Compile_lock);
656 if (for_dump()) { // Finalize cache
657 finish_write();
658 }
659 _load_buffer = nullptr;
660 if (_C_store_buffer != nullptr) {
661 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
662 _C_store_buffer = nullptr;
663 _store_buffer = nullptr;
664 }
665 if (_table != nullptr) {
666 delete _table;
667 _table = nullptr;
668 }
669 }
670
671 void AOTCodeCache::Config::record(bool use_meta_ptrs) {
672 _flags = 0;
673 if (use_meta_ptrs) {
674 _flags |= metadataPointers;
675 }
676 #ifdef ASSERT
677 _flags |= debugVM;
678 #endif
679 if (UseCompressedOops) {
680 _flags |= compressedOops;
681 }
682 if (UseCompressedClassPointers) {
683 _flags |= compressedClassPointers;
684 }
685 if (UseTLAB) {
686 _flags |= useTLAB;
687 }
688 if (JavaAssertions::systemClassDefault()) {
689 _flags |= systemClassAssertions;
690 }
691 if (JavaAssertions::userClassDefault()) {
692 _flags |= userClassAssertions;
693 }
694 if (EnableContended) {
695 _flags |= enableContendedPadding;
696 }
697 if (RestrictContended) {
698 _flags |= restrictContendedPadding;
699 }
700 _compressedOopShift = CompressedOops::shift();
701 _compressedOopBase = CompressedOops::base();
702 _compressedKlassShift = CompressedKlassPointers::shift();
703 _compressedKlassBase = CompressedKlassPointers::base();
704 _contendedPaddingWidth = ContendedPaddingWidth;
705 _objectAlignment = ObjectAlignmentInBytes;
706 _gc = (uint)Universe::heap()->kind();
707 }
708
709 bool AOTCodeCache::Config::verify() const {
710 #ifdef ASSERT
711 if ((_flags & debugVM) == 0) {
712 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
713 return false;
714 }
715 #else
716 if ((_flags & debugVM) != 0) {
717 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
718 return false;
719 }
720 #endif
721
722 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
723 if (aot_gc != Universe::heap()->kind()) {
751 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
752 return false;
753 }
754 if (_compressedOopShift != (uint)CompressedOops::shift()) {
755 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
756 return false;
757 }
758 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
759 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
760 return false;
761 }
762 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
763 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
764 return false;
765 }
766 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
767 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
768 return false;
769 }
770
771 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
772 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
773 return false;
774 }
775
776 // This should be the last check as it only disables AOTStubCaching
777 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
778 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
779 return false;
780 }
781
782 return true;
783 }
784
785 bool AOTCodeCache::Header::verify_config(uint load_size) const {
786 if (_version != AOT_CODE_VERSION) {
787 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
788 return false;
789 }
790 if (load_size < _cache_size) {
791 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
792 return false;
793 }
794 return true;
795 }
796
797 volatile int AOTCodeCache::_nmethod_readers = 0;
798
799 AOTCodeCache* AOTCodeCache::open_for_use() {
800 if (AOTCodeCache::is_on_for_use()) {
801 return AOTCodeCache::cache();
802 }
803 return nullptr;
804 }
805
806 AOTCodeCache* AOTCodeCache::open_for_dump() {
807 if (AOTCodeCache::is_on_for_dump()) {
808 AOTCodeCache* cache = AOTCodeCache::cache();
809 cache->clear_lookup_failed(); // Reset bit
810 return cache;
811 }
812 return nullptr;
813 }
814
815 bool AOTCodeCache::is_address_in_aot_cache(address p) {
816 AOTCodeCache* cache = open_for_use();
817 if (cache == nullptr) {
818 return false;
819 }
820 if ((p >= (address)cache->cache_buffer()) &&
821 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
822 return true;
823 }
824 return false;
825 }
826
827 static void copy_bytes(const char* from, address to, uint size) {
828 assert(size > 0, "sanity");
829 bool by_words = true;
830 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
831 // Use wordwise copies if possible:
832 Copy::disjoint_words((HeapWord*)from,
833 (HeapWord*)to,
834 ((size_t)size + HeapWordSize-1) / HeapWordSize);
835 } else {
836 by_words = false;
837 Copy::conjoint_jbytes(from, to, (size_t)size);
838 }
839 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
840 }
841
842 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
843 _cache = cache;
844 _entry = entry;
845 _load_buffer = cache->cache_buffer();
846 _read_position = 0;
847 if (task != nullptr) {
848 _compile_id = task->compile_id();
849 _comp_level = task->comp_level();
850 _preload = task->preload();
851 } else {
852 _compile_id = 0;
853 _comp_level = 0;
854 _preload = false;
855 }
856 _lookup_failed = false;
857 }
858
859 void AOTCodeReader::set_read_position(uint pos) {
860 if (pos == _read_position) {
861 return;
862 }
863 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
864 _read_position = pos;
865 }
866
867 bool AOTCodeCache::set_write_position(uint pos) {
868 if (pos == _write_position) {
869 return true;
870 }
871 if (_store_size < _write_position) {
872 _store_size = _write_position; // Adjust during write
873 }
874 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
875 _write_position = pos;
918 if (nbytes == 0) {
919 return 0;
920 }
921 uint new_position = _write_position + nbytes;
922 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
923 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
924 nbytes, _write_position);
925 set_failed();
926 report_store_failure();
927 return 0;
928 }
929 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
930 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
931 _write_position += nbytes;
932 if (_store_size < _write_position) {
933 _store_size = _write_position;
934 }
935 return nbytes;
936 }
937
938 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
939 switch (comp_level) {
940 case CompLevel_simple:
941 if ((DisableCachedCode & (1 << 0)) != 0) {
942 return nullptr;
943 }
944 break;
945 case CompLevel_limited_profile:
946 if ((DisableCachedCode & (1 << 1)) != 0) {
947 return nullptr;
948 }
949 break;
950 case CompLevel_full_optimization:
951 if ((DisableCachedCode & (1 << 2)) != 0) {
952 return nullptr;
953 }
954 break;
955
956 default: return nullptr; // Level 1, 2, and 4 only
957 }
958 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
959 if (is_on() && _cache->cache_buffer() != nullptr) {
960 MethodData* md = method->method_data();
961 uint decomp = (md == nullptr) ? 0 : md->decompile_count();
962
963 ResourceMark rm;
964 const char* target_name = method->name_and_sig_as_C_string();
965 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
966 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Code, hash, comp_level, decomp);
967 if (entry == nullptr) {
968 log_info(aot, codecache, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash);
969 #ifdef ASSERT
970 } else {
971 uint name_offset = entry->offset() + entry->name_offset();
972 uint name_size = entry->name_size(); // Includes '/0'
973 const char* name = _cache->cache_buffer() + name_offset;
974 if (strncmp(target_name, name, name_size) != 0) {
975 assert(false, "AOTCodeCache: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
976 }
977 #endif
978 }
979
980 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
981 if (directives->IgnorePrecompiledOption) {
982 LogStreamHandle(Info, aot, codecache, compilation) log;
983 if (log.is_enabled()) {
984 log.print("Ignore cached code entry on level %d for ", comp_level);
985 method->print_value_on(&log);
986 }
987 return nullptr;
988 }
989
990 return entry;
991 }
992 return nullptr;
993 }
994
995 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
996 return (void*)(cache->add_entry());
997 }
998
999 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, uint decomp, AOTCodeEntry* entry) {
1000 if (entry->kind() == kind) {
1001 assert(entry->id() == id, "sanity");
1002 if (kind != AOTCodeEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
1003 (entry->comp_level() == comp_level) &&
1004 (entry->ignore_decompile() || entry->decompile() == decomp))) {
1005 return true; // Found
1006 }
1007 }
1008 return false;
1009 }
1010
1011 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, uint decomp) {
1012 assert(_for_use, "sanity");
1013 uint count = _load_header->entries_count();
1014 if (_load_entries == nullptr) {
1015 // Read it
1016 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
1017 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
1018 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1019 }
1020 // Binary search
1021 int l = 0;
1022 int h = count - 1;
1023 while (l <= h) {
1024 int mid = (l + h) >> 1;
1025 int ix = mid * 2;
1026 uint is = _search_entries[ix];
1027 if (is == id) {
1028 int index = _search_entries[ix + 1];
1029 AOTCodeEntry* entry = &(_load_entries[index]);
1030 if (check_entry(kind, id, comp_level, decomp, entry)) {
1031 return entry; // Found
1032 }
1033 // Leaner search around (could be the same nmethod with different decompile count)
1034 for (int i = mid - 1; i >= l; i--) { // search back
1035 ix = i * 2;
1036 is = _search_entries[ix];
1037 if (is != id) {
1038 break;
1039 }
1040 index = _search_entries[ix + 1];
1041 AOTCodeEntry* entry = &(_load_entries[index]);
1042 if (check_entry(kind, id, comp_level, decomp, entry)) {
1043 return entry; // Found
1044 }
1045 }
1046 for (int i = mid + 1; i <= h; i++) { // search forward
1047 ix = i * 2;
1048 is = _search_entries[ix];
1049 if (is != id) {
1050 break;
1051 }
1052 index = _search_entries[ix + 1];
1053 AOTCodeEntry* entry = &(_load_entries[index]);
1054 if (check_entry(kind, id, comp_level, decomp, entry)) {
1055 return entry; // Found
1056 }
1057 }
1058 break; // Not found match (different decompile count or not_entrant state).
1059 } else if (is < id) {
1060 l = mid + 1;
1061 } else {
1062 h = mid - 1;
1063 }
1064 }
1065 return nullptr;
1066 }
1067
1068 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1069 assert(entry!= nullptr, "all entries should be read already");
1070 if (entry->not_entrant()) {
1071 return; // Someone invalidated it already
1072 }
1073 #ifdef ASSERT
1074 bool found = false;
1075 if (_for_use) {
1076 uint count = _load_header->entries_count();
1077 uint i = 0;
1078 for(; i < count; i++) {
1079 if (entry == &(_load_entries[i])) {
1080 break;
1081 }
1082 }
1083 found = (i < count);
1084 }
1085 if (!found && _for_dump) {
1086 uint count = _store_entries_cnt;
1087 uint i = 0;
1088 for(; i < count; i++) {
1089 if (entry == &(_store_entries[i])) {
1090 break;
1091 }
1092 }
1093 found = (i < count);
1094 }
1095 assert(found, "entry should exist");
1096 #endif
1097 entry->set_not_entrant();
1098 {
1099 uint name_offset = entry->offset() + entry->name_offset();
1100 const char* name;
1101 if (AOTCodeCache::is_loaded(entry)) {
1102 name = _load_buffer + name_offset;
1103 } else {
1104 name = _store_buffer + name_offset;
1105 }
1106 uint level = entry->comp_level();
1107 uint comp_id = entry->comp_id();
1108 uint decomp = entry->decompile();
1109 bool clinit_brs = entry->has_clinit_barriers();
1110 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)",
1111 name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1112 }
1113 if (entry->next() != nullptr) {
1114 entry = entry->next();
1115 assert(entry->has_clinit_barriers(), "expecting only such entries here");
1116 invalidate_entry(entry);
1117 }
1118 }
1119
1120 void AOTCodeEntry::update_method_for_writing() {
1121 if (_method != nullptr) {
1122 _method_offset = AOTCacheAccess::delta_from_base_address((address)_method);
1123 _method = nullptr;
1124 }
1125 }
1126
1127 static int uint_cmp(const void *i, const void *j) {
1128 uint a = *(uint *)i;
1129 uint b = *(uint *)j;
1130 return a > b ? 1 : a < b ? -1 : 0;
1131 }
1132
1133 bool AOTCodeCache::finish_write() {
1134 if (!align_write()) {
1135 return false;
1136 }
1137 uint strings_offset = _write_position;
1138 int strings_count = store_strings();
1139 if (strings_count < 0) {
1140 return false;
1141 }
1142 if (!align_write()) {
1143 return false;
1144 }
1145 uint strings_size = _write_position - strings_offset;
1146
1147 uint entries_count = 0; // Number of entrant (useful) code entries
1148 uint entries_offset = _write_position;
1149
1150 uint store_count = _store_entries_cnt;
1151 if (store_count > 0) {
1152 _aot_code_directory = CachedCodeDirectory::create();
1153 assert(_aot_code_directory != nullptr, "Sanity check");
1154
1155 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1156 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1157 uint code_count = store_count + load_count;
1158 uint search_count = code_count * 2;
1159 uint search_size = search_count * sizeof(uint);
1160 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1161 uint preload_entries_cnt = 0;
1162 uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1163 uint preload_entries_size = code_count * sizeof(uint);
1164 // _write_position should include code and strings
1165 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1166 uint total_size = _write_position + _load_size + header_size +
1167 code_alignment + search_size + preload_entries_size + entries_size;
1168 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1169
1170
1171 // Create ordered search table for entries [id, index];
1172 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1173 // Allocate in AOT Cache buffer
1174 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1175 char* start = align_up(buffer, DATA_ALIGNMENT);
1176 char* current = start + header_size; // Skip header
1177
1178 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1179 uint adapters_count = 0;
1180 uint shared_blobs_count = 0;
1181 uint C1_blobs_count = 0;
1182 uint C2_blobs_count = 0;
1183 uint stubs_count = 0;
1184 uint nmethods_count = 0;
1185 uint max_size = 0;
1186 // Add old entries first
1187 if (_for_use && (_load_header != nullptr)) {
1188 for(uint i = 0; i < load_count; i++) {
1189 if (_load_entries[i].load_fail()) {
1190 continue;
1191 }
1192 if (_load_entries[i].not_entrant()) {
1193 log_info(aot, codecache, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id());
1194 if (_load_entries[i].for_preload()) {
1195 // Skip not entrant preload code:
1196 // we can't pre-load code which may have failing dependencies.
1197 continue;
1198 }
1199 _load_entries[i].set_entrant(); // Reset
1200 } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) {
1201 // record entrant first version code for pre-loading
1202 preload_entries[preload_entries_cnt++] = entries_count;
1203 }
1204 {
1205 uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT);
1206 if (size > max_size) {
1207 max_size = size;
1208 }
1209 copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size);
1210 _load_entries[i].set_offset(current - start); // New offset
1211 current += size;
1212 uint n = write_bytes(&(_load_entries[i]), sizeof(AOTCodeEntry));
1213 if (n != sizeof(AOTCodeEntry)) {
1214 FREE_C_HEAP_ARRAY(uint, search);
1215 return false;
1216 }
1217 search[entries_count*2 + 0] = _load_entries[i].id();
1218 search[entries_count*2 + 1] = entries_count;
1219 entries_count++;
1220 AOTCodeEntry::Kind kind = _load_entries[i].kind();
1221 if (kind == AOTCodeEntry::Adapter) {
1222 adapters_count++;
1223 } else if (kind == AOTCodeEntry::SharedBlob) {
1224 shared_blobs_count++;
1225 } else if (kind == AOTCodeEntry::C1Blob) {
1226 C1_blobs_count++;
1227 } else if (kind == AOTCodeEntry::C2Blob) {
1228 C2_blobs_count++;
1229 } else if (kind == AOTCodeEntry::Stub) {
1230 stubs_count++;
1231 } else {
1232 assert(kind == AOTCodeEntry::Code, "sanity");
1233 nmethods_count++;
1234 }
1235 }
1236 }
1237 }
1238 // AOTCodeEntry entries were allocated in reverse in store buffer.
1239 // Process them in reverse order to cache first code first.
1240 for (int i = store_count - 1; i >= 0; i--) {
1241 if (entries_address[i].load_fail()) {
1242 continue;
1243 }
1244 if (entries_address[i].not_entrant()) {
1245 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : ""));
1246 if (entries_address[i].for_preload()) {
1247 // Skip not entrant preload code:
1248 // we can't pre-load code which may have failing dependencies.
1249 continue;
1250 }
1251 entries_address[i].set_entrant(); // Reset
1252 } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) {
1253 // record entrant first version code for pre-loading
1254 preload_entries[preload_entries_cnt++] = entries_count;
1255 }
1256 {
1257 entries_address[i].set_next(nullptr); // clear pointers before storing data
1258 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1259 if (size > max_size) {
1260 max_size = size;
1261 }
1262 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1263 entries_address[i].set_offset(current - start); // New offset
1264 entries_address[i].update_method_for_writing();
1265 current += size;
1266 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
1267 if (n != sizeof(AOTCodeEntry)) {
1268 FREE_C_HEAP_ARRAY(uint, search);
1269 return false;
1270 }
1271 search[entries_count*2 + 0] = entries_address[i].id();
1272 search[entries_count*2 + 1] = entries_count;
1273 entries_count++;
1274 AOTCodeEntry::Kind kind = entries_address[i].kind();
1275 if (kind == AOTCodeEntry::Adapter) {
1276 adapters_count++;
1277 } else if (kind == AOTCodeEntry::SharedBlob) {
1278 shared_blobs_count++;
1279 } else if (kind == AOTCodeEntry::C1Blob) {
1280 C1_blobs_count++;
1281 } else if (kind == AOTCodeEntry::C2Blob) {
1282 C2_blobs_count++;
1283 } else if (kind == AOTCodeEntry::Stub) {
1284 stubs_count++;
1285 } else {
1286 assert(kind == AOTCodeEntry::Code, "sanity");
1287 nmethods_count++;
1288 }
1289 }
1290 }
1291
1292 if (entries_count == 0) {
1293 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
1294 FREE_C_HEAP_ARRAY(uint, search);
1295 return true; // Nothing to write
1296 }
1297 assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1298 // Write strings
1299 if (strings_count > 0) {
1300 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1301 strings_offset = (current - start); // New offset
1302 current += strings_size;
1303 }
1304 uint preload_entries_offset = (current - start);
1305 preload_entries_size = preload_entries_cnt * sizeof(uint);
1306 if (preload_entries_size > 0) {
1307 copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1308 current += preload_entries_size;
1309 log_info(aot, codecache, exit)("Wrote %d preload entries to AOT Code Cache", preload_entries_cnt);
1310 }
1311 if (preload_entries != nullptr) {
1312 FREE_C_HEAP_ARRAY(uint, preload_entries);
1313 }
1314
1315 uint new_entries_offset = (current - start); // New offset
1316 // Sort and store search table
1317 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1318 search_size = 2 * entries_count * sizeof(uint);
1319 copy_bytes((const char*)search, (address)current, search_size);
1320 FREE_C_HEAP_ARRAY(uint, search);
1321 current += search_size;
1322
1323 // Write entries
1324 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1325 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1326 current += entries_size;
1327
1328 log_stats_on_exit();
1329
1330 uint size = (current - start);
1331 assert(size <= total_size, "%d > %d", size , total_size);
1332 uint blobs_count = shared_blobs_count + C1_blobs_count + C2_blobs_count;
1333 assert(nmethods_count == (entries_count - (stubs_count + blobs_count + adapters_count)), "sanity");
1334 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
1335 log_debug(aot, codecache, exit)(" Shared Blobs: total=%u", shared_blobs_count);
1336 log_debug(aot, codecache, exit)(" C1 Blobs: total=%u", C1_blobs_count);
1337 log_debug(aot, codecache, exit)(" C2 Blobs: total=%u", C2_blobs_count);
1338 log_debug(aot, codecache, exit)(" Stubs: total=%u", stubs_count);
1339 log_debug(aot, codecache, exit)(" Nmethods: total=%u", nmethods_count);
1340 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1341
1342 // Finalize header
1343 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1344 header->init(size, (uint)strings_count, strings_offset,
1345 entries_count, new_entries_offset,
1346 preload_entries_cnt, preload_entries_offset,
1347 adapters_count, shared_blobs_count,
1348 C1_blobs_count, C2_blobs_count,
1349 stubs_count, _use_meta_ptrs);
1350
1351 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1352
1353 _aot_code_directory->set_aot_code_data(size, start);
1354 }
1355 return true;
1356 }
1357
1358 //------------------Store/Load AOT code ----------------------
1359
1360 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1361 AOTCodeCache* cache = open_for_dump();
1362 if (cache == nullptr) {
1363 return false;
1364 }
1365 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1366
1367 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1368 return false;
1369 }
1370 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1371 return false;
1372 }
1373 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1456
1457 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1458 AOTCodeCache* cache = open_for_use();
1459 if (cache == nullptr) {
1460 return nullptr;
1461 }
1462 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1463
1464 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1465 return nullptr;
1466 }
1467 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1468 return nullptr;
1469 }
1470 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1471
1472 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1473 if (entry == nullptr) {
1474 return nullptr;
1475 }
1476 AOTCodeReader reader(cache, entry, nullptr);
1477 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
1478
1479 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1480 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1481 return blob;
1482 }
1483
1484 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
1485 uint entry_position = _entry->offset();
1486
1487 // Read name
1488 uint name_offset = entry_position + _entry->name_offset();
1489 uint name_size = _entry->name_size(); // Includes '/0'
1490 const char* stored_name = addr(name_offset);
1491
1492 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1493 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1494 stored_name, name);
1495 set_lookup_failed(); // Skip this blob
1496 return nullptr;
1540 set_read_position(offset);
1541 for (int i = 0; i < stored_count; i++) {
1542 uint32_t off = *(uint32_t*)addr(offset);
1543 offset += sizeof(uint32_t);
1544 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
1545 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
1546 stored_name, entry_name, _entry->id(), off);
1547 entry_offsets[i] = off;
1548 }
1549
1550 #ifdef ASSERT
1551 LogStreamHandle(Trace, aot, codecache, stubs) log;
1552 if (log.is_enabled()) {
1553 FlagSetting fs(PrintRelocations, true);
1554 code_blob->print_on(&log);
1555 }
1556 #endif
1557 return code_blob;
1558 }
1559
1560 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1561 if (!is_dumping_stub()) {
1562 return false;
1563 }
1564 AOTCodeCache* cache = open_for_dump();
1565 if (cache == nullptr) {
1566 return false;
1567 }
1568 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1569 if (!cache->align_write()) {
1570 return false;
1571 }
1572 #ifdef ASSERT
1573 CodeSection* cs = cgen->assembler()->code_section();
1574 if (cs->has_locs()) {
1575 uint reloc_count = cs->locs_count();
1576 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1577 // Collect additional data
1578 RelocIterator iter(cs);
1579 while (iter.next()) {
1580 switch (iter.type()) {
1581 case relocInfo::none:
1582 break;
1583 default: {
1584 iter.print_current_on(tty);
1585 fatal("stub's relocation %d unimplemented", (int)iter.type());
1586 break;
1587 }
1588 }
1589 }
1590 }
1591 #endif
1592 uint entry_position = cache->_write_position;
1593
1594 // Write code
1595 uint code_offset = 0;
1596 uint code_size = cgen->assembler()->pc() - start;
1597 uint n = cache->write_bytes(start, code_size);
1598 if (n != code_size) {
1599 return false;
1600 }
1601 // Write name
1602 uint name_offset = cache->_write_position - entry_position;
1603 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1604 n = cache->write_bytes(name, name_size);
1605 if (n != name_size) {
1606 return false;
1607 }
1608 uint entry_size = cache->_write_position - entry_position;
1609 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1610 code_offset, code_size, 0, 0,
1611 AOTCodeEntry::Stub, (uint32_t)id);
1612 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1613 return true;
1614 }
1615
1616 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1617 if (!is_using_stub()) {
1618 return false;
1619 }
1620 assert(start == cgen->assembler()->pc(), "wrong buffer");
1621 AOTCodeCache* cache = open_for_use();
1622 if (cache == nullptr) {
1623 return false;
1624 }
1625 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1626 if (entry == nullptr) {
1627 return false;
1628 }
1629 uint entry_position = entry->offset();
1630 // Read name
1631 uint name_offset = entry->name_offset() + entry_position;
1632 uint name_size = entry->name_size(); // Includes '/0'
1633 const char* saved_name = cache->addr(name_offset);
1634 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1635 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1636 cache->set_failed();
1637 report_load_failure();
1638 return false;
1639 }
1640 log_info(aot, codecache,stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1641 // Read code
1642 uint code_offset = entry->code_offset() + entry_position;
1643 uint code_size = entry->code_size();
1644 copy_bytes(cache->addr(code_offset), start, code_size);
1645 cgen->assembler()->code_section()->set_end(start + code_size);
1646 log_info(aot, codecache,stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1647 return true;
1648 }
1649
1650 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1651 if (!is_dumping_code()) {
1652 return nullptr;
1653 }
1654 if (!CDSConfig::is_dumping_aot_code()) {
1655 return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
1656 }
1657 AOTCodeCache* cache = open_for_dump();
1658 if (cache == nullptr) {
1659 return nullptr; // Cache file is closed
1660 }
1661 if (nm->is_osr_method()) {
1662 return nullptr; // No OSR
1663 }
1664 if (!compiler->is_c1() && !compiler->is_c2()) {
1665 // Only c1 and c2 compilers
1666 return nullptr;
1667 }
1668 int comp_level = nm->comp_level();
1669 if (comp_level == CompLevel_full_profile) {
1670 // Do not cache C1 compiles with full profile i.e. tier3
1671 return nullptr;
1672 }
1673 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1674
1675 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1676 AOTCodeEntry* entry = nullptr;
1677 entry = cache->write_nmethod(nm, for_preload);
1678 if (entry == nullptr) {
1679 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1680 }
1681 return entry;
1682 }
1683
1684 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1685 assert(!nm->has_clinit_barriers() || _gen_preload_code, "sanity");
1686 uint comp_id = nm->compile_id();
1687 uint comp_level = nm->comp_level();
1688 Method* method = nm->method();
1689 bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)method);
1690 InstanceKlass* holder = method->method_holder();
1691 bool klass_in_cds = holder->is_shared() && !holder->defined_by_other_loaders();
1692 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1693 if (!builtin_loader) {
1694 ResourceMark rm;
1695 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1696 return nullptr;
1697 }
1698 if (for_preload && !(method_in_cds && klass_in_cds)) {
1699 ResourceMark rm;
1700 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
1701 return nullptr;
1702 }
1703 assert(!for_preload || method_in_cds, "sanity");
1704 _for_preload = for_preload;
1705 _has_clinit_barriers = nm->has_clinit_barriers();
1706
1707 if (!align_write()) {
1708 return nullptr;
1709 }
1710
1711 uint entry_position = _write_position;
1712
1713 uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count();
1714
1715 // Is this one-step workflow assembly phase?
1716 // In this phase compilation is done based on saved profiling data
1717 // without application run. Ignore decompilation counters in such case.
1718 // Also ignore it for C1 code because it is decompiled unconditionally
1719 // when C2 generated code is published.
1720 bool ignore_decompile = (comp_level == CompLevel_limited_profile) ||
1721 CDSConfig::is_dumping_final_static_archive();
1722
1723 // Write name
1724 uint name_offset = 0;
1725 uint name_size = 0;
1726 uint hash = 0;
1727 uint n;
1728 {
1729 ResourceMark rm;
1730 const char* name = method->name_and_sig_as_C_string();
1731 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to AOT Code Cache",
1732 comp_id, (int)comp_level, name, comp_level, decomp,
1733 (ignore_decompile ? ", ignore_decomp" : ""),
1734 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1735
1736 LogStreamHandle(Info, aot, codecache, loader) log;
1737 if (log.is_enabled()) {
1738 oop loader = holder->class_loader();
1739 oop domain = holder->protection_domain();
1740 log.print("Holder: ");
1741 holder->print_value_on(&log);
1742 log.print(" loader: ");
1743 if (loader == nullptr) {
1744 log.print("nullptr");
1745 } else {
1746 loader->print_value_on(&log);
1747 }
1748 log.print(" domain: ");
1749 if (domain == nullptr) {
1750 log.print("nullptr");
1751 } else {
1752 domain->print_value_on(&log);
1753 }
1754 log.cr();
1755 }
1756 name_offset = _write_position - entry_position;
1757 name_size = (uint)strlen(name) + 1; // Includes '/0'
1758 n = write_bytes(name, name_size);
1759 if (n != name_size) {
1760 return nullptr;
1761 }
1762 hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
1763 }
1764 uint archived_nm_offset = _write_position - entry_position;
1765 nmethod* archived_nm = (nmethod*)reserve_bytes(nm->size());
1766 if (archived_nm == nullptr) {
1767 return nullptr;
1768 }
1769 nm->copy_to((address)archived_nm);
1770
1771 archived_nm->prepare_for_archiving();
1772
1773 #ifndef PRODUCT
1774 // Write asm remarks
1775 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1776 if (count_ptr == nullptr) {
1777 return nullptr;
1778 }
1779 uint count = 0;
1780 bool result = nm->asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1781 log_info(aot, codecache, nmethod)("asm remark offset=%d, str=%s", offset, str);
1782 n = write_bytes(&offset, sizeof(uint));
1783 if (n != sizeof(uint)) {
1784 return false;
1785 }
1786 n = write_bytes(str, (uint)strlen(str) + 1);
1787 if (n != strlen(str) + 1) {
1788 return false;
1789 }
1790 count += 1;
1791 return true;
1792 });
1793 if (!result) {
1794 return nullptr;
1795 }
1796 *count_ptr = count;
1797
1798 // Write dbg strings
1799 count_ptr = (uint *)reserve_bytes(sizeof(uint));
1800 if (count_ptr == nullptr) {
1801 return nullptr;
1802 }
1803 count = 0;
1804 result = nm->dbg_strings().iterate([&] (const char* str) -> bool {
1805 log_info(aot, codecache, nmethod)("dbg string[" INTPTR_FORMAT "]=%s", p2i(str), str);
1806 n = write_bytes(str, (uint)strlen(str) + 1);
1807 if (n != strlen(str) + 1) {
1808 return false;
1809 }
1810 count += 1;
1811 return true;
1812 });
1813 if (!result) {
1814 return nullptr;
1815 }
1816 *count_ptr = count;
1817 #endif /* PRODUCT */
1818
1819 uint reloc_data_size = nm->relocation_size();
1820 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1821 if (n != reloc_data_size) {
1822 return nullptr;
1823 }
1824
1825 // Write oops and metadata present in the nmethod's data region
1826 if (!write_oops(nm)) {
1827 if (lookup_failed() && !failed()) {
1828 // Skip this method and reposition file
1829 set_write_position(entry_position);
1830 }
1831 return nullptr;
1832 }
1833 if (!write_metadata(nm)) {
1834 if (lookup_failed() && !failed()) {
1835 // Skip this method and reposition file
1836 set_write_position(entry_position);
1837 }
1838 return nullptr;
1839 }
1840
1841 if (!write_oop_map_set(*nm)) {
1842 return nullptr;
1843 }
1844
1845 uint immutable_data_size = nm->immutable_data_size();
1846 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1847 if (n != immutable_data_size) {
1848 return nullptr;
1849 }
1850
1851 JavaThread* thread = JavaThread::current();
1852 HandleMark hm(thread);
1853 GrowableArray<Handle> oop_list;
1854 GrowableArray<Metadata*> metadata_list;
1855
1856 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1857 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1858 if (lookup_failed() && !failed()) {
1859 // Skip this method and reposition file
1860 set_write_position(entry_position);
1861 }
1862 return nullptr;
1863 }
1864
1865 if (!write_nmethod_loadtime_relocations(thread, nm, oop_list, metadata_list)) {
1866 return nullptr;
1867 }
1868
1869 uint entry_size = _write_position - entry_position;
1870 AOTCodeEntry* entry = new (this) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1871 archived_nm_offset, 0, 0, 0,
1872 AOTCodeEntry::Code, hash, nm->content_begin(), comp_level, comp_id, decomp,
1873 nm->has_clinit_barriers(), for_preload, ignore_decompile);
1874 if (method_in_cds) {
1875 entry->set_method(method);
1876 }
1877 #ifdef ASSERT
1878 if (nm->has_clinit_barriers() || for_preload) {
1879 assert(for_preload, "sanity");
1880 assert(entry->method() != nullptr, "sanity");
1881 }
1882 #endif
1883 {
1884 ResourceMark rm;
1885 const char* name = nm->method()->name_and_sig_as_C_string();
1886 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1887 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1888 }
1889 if (VerifyCachedCode) {
1890 return nullptr;
1891 }
1892 return entry;
1893 }
1894
1895 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1896 if (!is_using_code()) {
1897 return false;
1898 }
1899 AOTCodeCache* cache = open_for_use();
1900 if (cache == nullptr) {
1901 return false;
1902 }
1903 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1904 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1905 CompileTask* task = env->task();
1906 task->mark_aot_load_start(os::elapsed_counter());
1907 AOTCodeEntry* entry = task->aot_code_entry();
1908 bool preload = task->preload();
1909 assert(entry != nullptr, "sanity");
1910 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1911 uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count();
1912 VM_ENTRY_MARK;
1913 ResourceMark rm;
1914 methodHandle method(THREAD, target->get_Method());
1915 const char* target_name = method->name_and_sig_as_C_string();
1916 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
1917 bool clinit_brs = entry->has_clinit_barriers();
1918 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1919 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1920 target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : ""),
1921 (entry->ignore_decompile() ? ", ignore_decomp" : ""));
1922 }
1923 ReadingMark rdmk;
1924 if (rdmk.failed()) {
1925 // Cache is closed, cannot touch anything.
1926 return false;
1927 }
1928
1929 AOTCodeReader reader(cache, entry, task);
1930 bool success = reader.compile_nmethod(env, target, compiler);
1931 if (success) {
1932 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1933 } else {
1934 entry->set_load_fail();
1935 }
1936 task->mark_aot_load_finish(os::elapsed_counter());
1937 return success;
1938 }
1939
1940 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1941 CompileTask* task = env->task();
1942 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1943 nmethod* nm = nullptr;
1944
1945 uint entry_position = aot_code_entry->offset();
1946 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1947 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1948 set_read_position(archived_nm_offset + archived_nm->size());
1949
1950 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1951 env->set_oop_recorder(oop_recorder);
1952
1953 uint offset;
1954
1955 #ifndef PRODUCT
1956 // Read asm remarks
1957 offset = read_position();
1958 uint count = *(uint *)addr(offset);
1959 offset += sizeof(uint);
1960 AsmRemarks asm_remarks;
1961 for (uint i = 0; i < count; i++) {
1962 uint remark_offset = *(uint *)addr(offset);
1963 offset += sizeof(uint);
1964 const char* remark = (const char*)addr(offset);
1965 offset += (uint)strlen(remark)+1;
1966 asm_remarks.insert(remark_offset, remark);
1967 }
1968 set_read_position(offset);
1969
1970 // Read dbg strings
1971 count = *(uint *)addr(offset);
1972 offset += sizeof(uint);
1973 DbgStrings dbg_strings;
1974 for (uint i = 0; i < count; i++) {
1975 const char* str = (const char*)addr(offset);
1976 offset += (uint)strlen(str)+1;
1977 dbg_strings.insert(str);
1978 }
1979 set_read_position(offset);
1980 #endif /* PRODUCT */
1981
1982 offset = read_position();
1983 address reloc_data = (address)addr(offset);
1984 offset += archived_nm->relocation_size();
1985 set_read_position(offset);
1986
1987 // Read oops and metadata
1988 VM_ENTRY_MARK
1989 GrowableArray<Handle> oop_list;
1990 GrowableArray<Metadata*> metadata_list;
1991
1992 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1993 return false;
1994 }
1995
1996 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1997
1998 offset = read_position();
1999 address immutable_data = (address)addr(offset);
2000 offset += archived_nm->immutable_data_size();
2001 set_read_position(offset);
2002
2003 GrowableArray<Handle> reloc_immediate_oop_list;
2004 GrowableArray<Metadata*> reloc_immediate_metadata_list;
2005 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
2006 return false;
2007 }
2008
2009 // Read Dependencies (compressed already)
2010 Dependencies* dependencies = new Dependencies(env);
2011 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
2012 env->set_dependencies(dependencies);
2013
2014 const char* name = addr(entry_position + aot_code_entry->name_offset());
2015
2016 if (VerifyCachedCode) {
2017 return false;
2018 }
2019
2020 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
2021 env->register_aot_method(THREAD,
2022 target,
2023 compiler,
2024 archived_nm,
2025 reloc_data,
2026 oop_list,
2027 metadata_list,
2028 oopmaps,
2029 immutable_data,
2030 reloc_immediate_oop_list,
2031 reloc_immediate_metadata_list,
2032 NOT_PRODUCT_ARG(asm_remarks)
2033 NOT_PRODUCT_ARG(dbg_strings)
2034 this);
2035 bool success = task->is_success();
2036 if (success) {
2037 aot_code_entry->set_loaded();
2038 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
2039 #ifdef ASSERT
2040 LogStreamHandle(Debug, aot, codecache, nmethod) log;
2041 if (log.is_enabled()) {
2042 nmethod* nm = target->get_Method()->code();
2043 FlagSetting fs(PrintRelocations, true);
2044 nm->print_on(&log);
2045 nm->decode2(&log);
2046 }
2047 #endif
2048 }
2049
2050 return success;
2051 }
2052
2053 bool skip_preload(methodHandle mh) {
2054 if (!mh->method_holder()->is_loaded()) {
2055 return true;
2056 }
2057 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
2058 if (directives->DontPreloadOption) {
2059 LogStreamHandle(Info, aot, codecache, init) log;
2060 if (log.is_enabled()) {
2061 log.print("Exclude preloading code for ");
2062 mh->print_value_on(&log);
2063 }
2064 return true;
2065 }
2066 return false;
2067 }
2068
2069 bool AOTCodeCache::gen_preload_code(ciMethod* m, int entry_bci) {
2070 VM_ENTRY_MARK;
2071 return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
2072 AOTCacheAccess::can_generate_aot_code(m->get_Method());
2073 }
2074
2075 void AOTCodeCache::preload_code(JavaThread* thread) {
2076 if ((ClassInitBarrierMode == 0) || !is_on_for_use()) {
2077 return;
2078 }
2079 if ((DisableCachedCode & (1 << 3)) != 0) {
2080 return; // no preloaded code (level 5);
2081 }
2082 _cache->preload_startup_code(thread);
2083 }
2084
2085 void AOTCodeCache::preload_startup_code(TRAPS) {
2086 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
2087 // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
2088 // level we want (that is CompLevel_full_optimization).
2089 return;
2090 }
2091 assert(_for_use, "sanity");
2092 uint count = _load_header->entries_count();
2093 if (_load_entries == nullptr) {
2094 // Read it
2095 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
2096 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
2097 log_info(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
2098 }
2099 uint preload_entries_count = _load_header->preload_entries_count();
2100 if (preload_entries_count > 0) {
2101 uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
2102 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
2103 uint count = MIN2(preload_entries_count, SCLoadStop);
2104 for (uint i = SCLoadStart; i < count; i++) {
2105 uint index = entries_index[i];
2106 AOTCodeEntry* entry = &(_load_entries[index]);
2107 if (entry->not_entrant()) {
2108 continue;
2109 }
2110 Method* m = AOTCacheAccess::convert_offset_to_method(entry->method_offset());
2111 entry->set_method(m);
2112 methodHandle mh(THREAD, entry->method());
2113 assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
2114 if (skip_preload(mh)) {
2115 continue; // Exclude preloading for this method
2116 }
2117 assert(mh->method_holder()->is_loaded(), "");
2118 if (!mh->method_holder()->is_linked()) {
2119 assert(!HAS_PENDING_EXCEPTION, "");
2120 mh->method_holder()->link_class(THREAD);
2121 if (HAS_PENDING_EXCEPTION) {
2122 LogStreamHandle(Info, aot, codecache) log;
2123 if (log.is_enabled()) {
2124 ResourceMark rm;
2125 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
2126 THREAD->pending_exception()->print_value_on(&log);
2127 if (log_is_enabled(Debug, aot, codecache)) {
2128 THREAD->pending_exception()->print_on(&log);
2129 }
2130 }
2131 CLEAR_PENDING_EXCEPTION;
2132 }
2133 }
2134 if (mh->aot_code_entry() != nullptr) {
2135 // Second C2 compilation of the same method could happen for
2136 // different reasons without marking first entry as not entrant.
2137 continue; // Keep old entry to avoid issues
2138 }
2139 mh->set_aot_code_entry(entry);
2140 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
2141 }
2142 }
2143 }
2144
2145 // ------------ process code and data --------------
2146
2147 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
2148 GrowableArray<uint> reloc_data;
2149 RelocIterator iter(&code_blob);
2150 LogStreamHandle(Trace, aot, codecache, reloc) log;
2151 while (iter.next()) {
2152 int idx = reloc_data.append(0); // default value
2153 switch (iter.type()) {
2154 case relocInfo::none:
2155 break;
2156 case relocInfo::runtime_call_type: {
2157 // Record offset of runtime destination
2158 CallRelocation* r = (CallRelocation*)iter.reloc();
2159 address dest = r->destination();
2160 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2161 dest = (address)-1; // do nothing in this case when loading this relocation
2162 }
2163 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, &code_blob));
2164 break;
2165 }
2166 case relocInfo::runtime_call_w_cp_type:
2167 fatal("runtime_call_w_cp_type unimplemented");
2168 break;
2169 case relocInfo::external_word_type: {
2170 // Record offset of runtime target
2171 address target = ((external_word_Relocation*)iter.reloc())->target();
2172 reloc_data.at_put(idx, _table->id_for_address(target, iter, nullptr, &code_blob));
2173 break;
2174 }
2175 case relocInfo::internal_word_type:
2176 break;
2177 case relocInfo::section_word_type:
2178 break;
2179 case relocInfo::post_call_nop_type:
2180 break;
2181 default:
2182 fatal("relocation %d unimplemented", (int)iter.type());
2183 break;
2184 }
2185 if (log.is_enabled()) {
2186 iter.print_current_on(&log);
2187 }
2188 }
2189
2190 // Write additional relocation data: uint per relocation
2191 // Write the count first
2192 int count = reloc_data.length();
2193 write_bytes(&count, sizeof(int));
2194 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2195 iter != reloc_data.end(); ++iter) {
2196 uint value = *iter;
2197 int n = write_bytes(&value, sizeof(uint));
2198 if (n != sizeof(uint)) {
2199 return false;
2200 }
2201 }
2202 return true;
2203 }
2204
2205 void AOTCodeReader::apply_relocations(nmethod* nm, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list) {
2206 LogStreamHandle(Info, aot, codecache, reloc) log;
2207 uint buffer_offset = read_position();
2208 int count = *(int*)addr(buffer_offset);
2209 buffer_offset += sizeof(int);
2210 if (log.is_enabled()) {
2211 log.print_cr("======== extra relocations count=%d", count);
2212 }
2213 uint* reloc_data = (uint*)addr(buffer_offset);
2214 buffer_offset += (count * sizeof(uint));
2215 set_read_position(buffer_offset);
2216
2217 RelocIterator iter(nm);
2218 int j = 0;
2219
2220 while (iter.next()) {
2221 switch (iter.type()) {
2222 case relocInfo::none:
2223 break;
2224 case relocInfo::oop_type: {
2225 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2226 if (r->oop_is_immediate()) {
2227 Handle h = oop_list.at(reloc_data[j]);
2228 r->set_value(cast_from_oop<address>(h()));
2229 } else {
2230 r->fix_oop_relocation();
2231 }
2232 break;
2233 }
2234 case relocInfo::metadata_type: {
2235 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2236 Metadata* m;
2237 if (r->metadata_is_immediate()) {
2238 m = metadata_list.at(reloc_data[j]);
2239 } else {
2240 // Get already updated value from nmethod.
2241 int index = r->metadata_index();
2242 m = nm->metadata_at(index);
2243 }
2244 r->set_value((address)m);
2245 break;
2246 }
2247 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2248 case relocInfo::opt_virtual_call_type:
2249 case relocInfo::static_call_type: {
2250 address dest = _cache->address_for_id(reloc_data[j]);
2251 if (dest != (address)-1) {
2252 ((CallRelocation*)iter.reloc())->set_destination(dest);
2253 }
2254 break;
2255 }
2256 case relocInfo::trampoline_stub_type: {
2257 address dest = _cache->address_for_id(reloc_data[j]);
2258 if (dest != (address)-1) {
2259 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2260 }
2261 break;
2262 }
2263 case relocInfo::static_stub_type:
2264 break;
2265 case relocInfo::runtime_call_type: {
2266 address dest = _cache->address_for_id(reloc_data[j]);
2267 if (dest != (address)-1) {
2268 ((CallRelocation*)iter.reloc())->set_destination(dest);
2269 }
2270 break;
2271 }
2272 case relocInfo::runtime_call_w_cp_type:
2273 fatal("runtime_call_w_cp_type unimplemented");
2274 //address destination = iter.reloc()->value();
2275 break;
2276 case relocInfo::external_word_type: {
2277 address target = _cache->address_for_id(reloc_data[j]);
2278 // Add external address to global table
2279 int index = ExternalsRecorder::find_index(target);
2280 // Update index in relocation
2281 Relocation::add_jint(iter.data(), index);
2282 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2283 assert(reloc->target() == target, "sanity");
2284 reloc->set_value(target); // Patch address in the code
2285 break;
2286 }
2287 case relocInfo::internal_word_type: {
2288 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2289 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), nm->content_begin());
2290 break;
2291 }
2292 case relocInfo::section_word_type: {
2293 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2294 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), nm->content_begin());
2295 break;
2296 }
2297 case relocInfo::poll_type:
2298 break;
2299 case relocInfo::poll_return_type:
2300 break;
2301 case relocInfo::post_call_nop_type:
2302 break;
2303 case relocInfo::entry_guard_type:
2304 break;
2305 default:
2306 fatal("relocation %d unimplemented", (int)iter.type());
2307 break;
2308 }
2309 if (log.is_enabled()) {
2310 iter.print_current_on(&log);
2311 }
2312 j++;
2313 }
2314 assert(j == count, "must be");
2315 }
2316
2317
2318 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
2319 LogStreamHandle(Trace, aot, reloc) log;
2320 uint offset = read_position();
2321 int count = *(int*)addr(offset);
2322 offset += sizeof(int);
2323 if (log.is_enabled()) {
2324 log.print_cr("======== extra relocations count=%d", count);
2325 }
2326 uint* reloc_data = (uint*)addr(offset);
2327 offset += (count * sizeof(uint));
2328 set_read_position(offset);
2329
2330 RelocIterator iter(code_blob);
2331 int j = 0;
2332 while (iter.next()) {
2333 switch (iter.type()) {
2334 case relocInfo::none:
2335 break;
2336 case relocInfo::runtime_call_type: {
2337 address dest = _cache->address_for_id(reloc_data[j]);
2338 if (dest != (address)-1) {
2339 ((CallRelocation*)iter.reloc())->set_destination(dest);
2340 }
2341 break;
2342 }
2343 case relocInfo::runtime_call_w_cp_type:
2344 fatal("runtime_call_w_cp_type unimplemented");
2345 break;
2346 case relocInfo::external_word_type: {
2347 address target = _cache->address_for_id(reloc_data[j]);
2348 // Add external address to global table
2349 int index = ExternalsRecorder::find_index(target);
2350 // Update index in relocation
2351 Relocation::add_jint(iter.data(), index);
2352 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2353 assert(reloc->target() == target, "sanity");
2354 reloc->set_value(target); // Patch address in the code
2355 break;
2356 }
2357 case relocInfo::internal_word_type: {
2358 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2359 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2360 break;
2361 }
2362 case relocInfo::section_word_type: {
2363 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2364 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2365 break;
2366 }
2367 case relocInfo::post_call_nop_type:
2368 break;
2369 default:
2370 fatal("relocation %d unimplemented", (int)iter.type());
2371 break;
2372 }
2373 if (log.is_enabled()) {
2374 iter.print_current_on(&log);
2375 }
2376 j++;
2377 }
2378 assert(j == count, "sanity");
2379 }
2380
2381 bool AOTCodeCache::write_nmethod_loadtime_relocations(JavaThread* thread, nmethod* nm, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2382 LogStreamHandle(Info, aot, codecache, reloc) log;
2383 GrowableArray<uint> reloc_data;
2384 // Collect additional data
2385 RelocIterator iter(nm);
2386 bool has_immediate = false;
2387 while (iter.next()) {
2388 int idx = reloc_data.append(0); // default value
2389 switch (iter.type()) {
2390 case relocInfo::none:
2391 break;
2392 case relocInfo::oop_type: {
2393 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2394 if (r->oop_is_immediate()) {
2395 // store index of oop in the reloc immediate oop list
2396 Handle h(thread, r->oop_value());
2397 int oop_idx = oop_list.find(h);
2398 assert(oop_idx != -1, "sanity check");
2399 reloc_data.at_put(idx, (uint)oop_idx);
2400 has_immediate = true;
2401 }
2402 break;
2403 }
2404 case relocInfo::metadata_type: {
2405 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2406 if (r->metadata_is_immediate()) {
2407 // store index of metadata in the reloc immediate metadata list
2408 int metadata_idx = metadata_list.find(r->metadata_value());
2409 assert(metadata_idx != -1, "sanity check");
2410 reloc_data.at_put(idx, (uint)metadata_idx);
2411 has_immediate = true;
2412 }
2413 break;
2414 }
2415 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2416 case relocInfo::opt_virtual_call_type:
2417 case relocInfo::static_call_type: {
2418 CallRelocation* r = (CallRelocation*)iter.reloc();
2419 address dest = r->destination();
2420 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2421 dest = (address)-1; // do nothing in this case when loading this relocation
2422 }
2423 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, nm));
2424 break;
2425 }
2426 case relocInfo::trampoline_stub_type: {
2427 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2428 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, nm));
2429 break;
2430 }
2431 case relocInfo::static_stub_type:
2432 break;
2433 case relocInfo::runtime_call_type: {
2434 // Record offset of runtime destination
2435 CallRelocation* r = (CallRelocation*)iter.reloc();
2436 address dest = r->destination();
2437 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2438 dest = (address)-1; // do nothing in this case when loading this relocation
2439 }
2440 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, nm));
2441 break;
2442 }
2443 case relocInfo::runtime_call_w_cp_type:
2444 fatal("runtime_call_w_cp_type unimplemented");
2445 break;
2446 case relocInfo::external_word_type: {
2447 // Record offset of runtime target
2448 address target = ((external_word_Relocation*)iter.reloc())->target();
2449 reloc_data.at_put(idx, _table->id_for_address(target, iter, nullptr, nm));
2450 break;
2451 }
2452 case relocInfo::internal_word_type:
2453 break;
2454 case relocInfo::section_word_type:
2455 break;
2456 case relocInfo::poll_type:
2457 break;
2458 case relocInfo::poll_return_type:
2459 break;
2460 case relocInfo::post_call_nop_type:
2461 break;
2462 case relocInfo::entry_guard_type:
2463 break;
2464 default:
2465 fatal("relocation %d unimplemented", (int)iter.type());
2466 break;
2467 }
2468 if (log.is_enabled()) {
2469 iter.print_current_on(&log);
2470 }
2471 }
2472
2473 // Write additional relocation data: uint per relocation
2474 // Write the count first
2475 int count = reloc_data.length();
2476 write_bytes(&count, sizeof(int));
2477 uint data_size = count * sizeof(uint);
2478 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2479 iter != reloc_data.end(); ++iter) {
2480 uint value = *iter;
2481 int n = write_bytes(&value, sizeof(uint));
2482 if (n != sizeof(uint)) {
2483 return false;
2484 break;
2485 }
2486 }
2487
2488 if (!align_write()) {
2489 return false;
2490 }
2491 return true; //success;
2492 }
2493
2494 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2495 int count = oop_list.length();
2496 if (!write_bytes(&count, sizeof(int))) {
2497 return false;
2498 }
2499 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2500 iter != oop_list.end(); ++iter) {
2501 Handle h = *iter;
2502 if (!write_oop(h())) {
2503 return false;
2504 }
2505 }
2506
2507 count = metadata_list.length();
2508 if (!write_bytes(&count, sizeof(int))) {
2509 return false;
2510 }
2511 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2512 iter != metadata_list.end(); ++iter) {
2513 Metadata* m = *iter;
2514 if (!write_metadata(m)) {
2515 return false;
2516 }
2517 }
2518 return true;
2519 }
2520
2521 bool AOTCodeCache::write_debug_info(DebugInformationRecorder* recorder) {
2522 if (!align_write()) {
2523 return false;
2524 }
2525 // Don't call data_size() and pcs_size(). They will freeze OopRecorder.
2526 int data_size = recorder->stream()->position(); // In bytes
2527 uint n = write_bytes(&data_size, sizeof(int));
2528 if (n != sizeof(int)) {
2529 return false;
2530 }
2531 int pcs_length = recorder->pcs_length(); // In bytes
2532 n = write_bytes(&pcs_length, sizeof(int));
2533 if (n != sizeof(int)) {
2534 return false;
2535 }
2536 n = write_bytes(recorder->stream()->buffer(), data_size);
2537 if (n != (uint)data_size) {
2538 return false;
2539 }
2540 uint pcs_size = pcs_length * sizeof(PcDesc);
2541 n = write_bytes(recorder->pcs(), pcs_size);
2542 if (n != pcs_size) {
2543 return false;
2544 }
2545 return true;
2546 }
2547
2548 DebugInformationRecorder* AOTCodeReader::read_debug_info(OopRecorder* oop_recorder) {
2549 uint code_offset = align_up(read_position(), DATA_ALIGNMENT);
2550 int data_size = *(int*)addr(code_offset);
2551 code_offset += sizeof(int);
2552 int pcs_length = *(int*)addr(code_offset);
2553 code_offset += sizeof(int);
2554
2555 log_debug(aot, codecache)("======== read DebugInfo [%d, %d]:", data_size, pcs_length);
2556
2557 // Aligned initial sizes
2558 int data_size_align = align_up(data_size, DATA_ALIGNMENT);
2559 int pcs_length_align = pcs_length + 1;
2560 assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity");
2561 DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length);
2562
2563 copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align);
2564 recorder->stream()->set_position(data_size);
2565 code_offset += data_size;
2566
2567 uint pcs_size = pcs_length * sizeof(PcDesc);
2568 copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size);
2569 code_offset += pcs_size;
2570 set_read_position(code_offset);
2571 return recorder;
2572 }
2573
2574 bool AOTCodeCache::write_metadata(nmethod* nm) {
2575 int count = nm->metadata_count()-1;
2576 if (!write_bytes(&count, sizeof(int))) {
2577 return false;
2578 }
2579 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2580 if (!write_metadata(*p)) {
2581 return false;
2582 }
2583 }
2584 return true;
2585 }
2586
2587 bool AOTCodeCache::write_metadata(OopRecorder* oop_recorder) {
2588 int metadata_count = oop_recorder->metadata_count();
2589 uint n = write_bytes(&metadata_count, sizeof(int));
2590 if (n != sizeof(int)) {
2591 return false;
2592 }
2593
2594 log_debug(aot, codecache)("======== write metadata [%d]:", metadata_count);
2595
2596 for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr
2597 Metadata* m = oop_recorder->metadata_at(i);
2598 LogStreamHandle(Debug, aot, codecache, metadata) log;
2599 if (log.is_enabled()) {
2600 log.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2601 if (m == (Metadata*)Universe::non_oop_word()) {
2602 log.print("non-metadata word");
2603 } else if (m == nullptr) {
2604 log.print("nullptr-oop");
2605 } else {
2606 Metadata::print_value_on_maybe_null(&log, m);
2607 }
2608 log.cr();
2609 }
2610 if (!write_metadata(m)) {
2611 return false;
2612 }
2613 }
2614 return true;
2615 }
2616
2617 bool AOTCodeCache::write_metadata(Metadata* m) {
2618 uint n = 0;
2619 if (m == nullptr) {
2620 DataKind kind = DataKind::Null;
2621 n = write_bytes(&kind, sizeof(int));
2622 if (n != sizeof(int)) {
2623 return false;
2624 }
2625 } else if (m == (Metadata*)Universe::non_oop_word()) {
2626 DataKind kind = DataKind::No_Data;
2627 n = write_bytes(&kind, sizeof(int));
2628 if (n != sizeof(int)) {
2629 return false;
2630 }
2631 } else if (m->is_klass()) {
2632 if (!write_klass((Klass*)m)) {
2633 return false;
2634 }
2635 } else if (m->is_method()) {
2636 if (!write_method((Method*)m)) {
2637 return false;
2638 }
2639 } else if (m->is_methodCounters()) {
2640 DataKind kind = DataKind::MethodCnts;
2641 n = write_bytes(&kind, sizeof(int));
2642 if (n != sizeof(int)) {
2643 return false;
2644 }
2645 if (!write_method(((MethodCounters*)m)->method())) {
2646 return false;
2647 }
2648 log_info(aot, codecache)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2649 } else { // Not supported
2650 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2651 return false;
2652 }
2653 return true;
2654 }
2655
2656 bool AOTCodeReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) {
2657 uint code_offset = read_position();
2658 int metadata_count = *(int*)addr(code_offset);
2659 code_offset += sizeof(int);
2660 set_read_position(code_offset);
2661
2662 log_debug(aot, codecache)("======== read metadata [%d]:", metadata_count);
2663
2664 if (metadata_count == 0) {
2665 return true;
2666 }
2667 {
2668 VM_ENTRY_MARK;
2669 methodHandle comp_method(THREAD, target->get_Method());
2670
2671 for (int i = 1; i < metadata_count; i++) {
2672 Metadata* m = read_metadata(comp_method);
2673 if (lookup_failed()) {
2674 return false;
2675 }
2676 if (oop_recorder->is_real(m)) {
2677 oop_recorder->find_index(m);
2678 } else {
2679 oop_recorder->allocate_metadata_index(m);
2680 }
2681 LogTarget(Debug, aot, codecache, metadata) log;
2682 if (log.is_enabled()) {
2683 LogStream ls(log);
2684 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2685 if (m == (Metadata*)Universe::non_oop_word()) {
2686 ls.print("non-metadata word");
2687 } else if (m == nullptr) {
2688 ls.print("nullptr-oop");
2689 } else {
2690 Metadata::print_value_on_maybe_null(&ls, m);
2691 }
2692 ls.cr();
2693 }
2694 }
2695 }
2696 return true;
2697 }
2698
2699 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2700 uint code_offset = read_position();
2701 Metadata* m = nullptr;
2702 DataKind kind = *(DataKind*)addr(code_offset);
2703 code_offset += sizeof(DataKind);
2704 set_read_position(code_offset);
2705 if (kind == DataKind::Null) {
2706 m = (Metadata*)nullptr;
2707 } else if (kind == DataKind::No_Data) {
2708 m = (Metadata*)Universe::non_oop_word();
2709 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2710 m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2711 } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2712 m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2713 } else if (kind == DataKind::MethodCnts) {
2714 kind = *(DataKind*)addr(code_offset);
2715 bool shared = (kind == DataKind::Method_Shared);
2716 assert(kind == DataKind::Method || shared, "Sanity");
2717 code_offset += sizeof(DataKind);
2718 set_read_position(code_offset);
2719 m = (Metadata*)read_method(comp_method, shared);
2720 if (m != nullptr) {
2721 Method* method = (Method*)m;
2722 m = method->get_method_counters(Thread::current());
2723 if (m == nullptr) {
2724 set_lookup_failed();
2725 log_info(aot, codecache)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2726 } else {
2727 log_info(aot, codecache)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2728 }
2729 }
2730 } else {
2731 set_lookup_failed();
2732 log_info(aot, codecache)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2733 }
2734 return m;
2735 }
2736
2737 bool AOTCodeCache::write_method(Method* method) {
2738 bool can_use_meta_ptrs = _use_meta_ptrs;
2739 Klass* klass = method->method_holder();
2740 if (klass->is_instance_klass()) {
2741 InstanceKlass* ik = InstanceKlass::cast(klass);
2742 ClassLoaderData* cld = ik->class_loader_data();
2743 if (!cld->is_builtin_class_loader_data()) {
2744 set_lookup_failed();
2745 return false;
2746 }
2747 if (_for_preload && !AOTCacheAccess::can_generate_aot_code(ik)) {
2748 _for_preload = false;
2749 // Bailout if code has clinit barriers:
2750 // method will be recompiled without them in any case
2751 if (_has_clinit_barriers) {
2752 set_lookup_failed();
2753 return false;
2754 }
2755 can_use_meta_ptrs = false;
2756 }
2757 }
2758 ResourceMark rm;
2759 if (can_use_meta_ptrs && AOTCacheAccess::can_generate_aot_code(method)) {
2760 DataKind kind = DataKind::Method_Shared;
2761 uint n = write_bytes(&kind, sizeof(int));
2762 if (n != sizeof(int)) {
2763 return false;
2764 }
2765 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2766 n = write_bytes(&method_offset, sizeof(uint));
2767 if (n != sizeof(uint)) {
2768 return false;
2769 }
2770 log_info(aot, codecache)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2771 return true;
2772 }
2773 // Bailout if code has clinit barriers:
2774 // method will be recompiled without them in any case
2775 if (_for_preload && _has_clinit_barriers) {
2776 set_lookup_failed();
2777 return false;
2778 }
2779 _for_preload = false;
2780 log_info(aot, codecache,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string());
2781 if (method->is_hidden()) { // Skip such nmethod
2782 set_lookup_failed();
2783 return false;
2784 }
2785 DataKind kind = DataKind::Method;
2786 uint n = write_bytes(&kind, sizeof(int));
2787 if (n != sizeof(int)) {
2788 return false;
2789 }
2790 Symbol* name = method->name();
2791 Symbol* holder = method->klass_name();
2792 Symbol* signat = method->signature();
2793 int name_length = name->utf8_length();
2794 int holder_length = holder->utf8_length();
2795 int signat_length = signat->utf8_length();
2796
2797 // Write sizes and strings
2798 int total_length = holder_length + 1 + name_length + 1 + signat_length + 1;
2799 char* dest = NEW_RESOURCE_ARRAY(char, total_length);
2800 holder->as_C_string(dest, total_length);
2801 dest[holder_length] = '\0';
2802 int pos = holder_length + 1;
2803 name->as_C_string(&(dest[pos]), (total_length - pos));
2804 pos += name_length;
2805 dest[pos++] = '\0';
2806 signat->as_C_string(&(dest[pos]), (total_length - pos));
2807 dest[total_length - 1] = '\0';
2808
2809 LogTarget(Info, aot, codecache, loader) log;
2810 if (log.is_enabled()) {
2811 LogStream ls(log);
2812 oop loader = klass->class_loader();
2813 oop domain = klass->protection_domain();
2814 ls.print("Holder %s loader: ", dest);
2815 if (loader == nullptr) {
2816 ls.print("nullptr");
2817 } else {
2818 loader->print_value_on(&ls);
2819 }
2820 ls.print(" domain: ");
2821 if (domain == nullptr) {
2822 ls.print("nullptr");
2823 } else {
2824 domain->print_value_on(&ls);
2825 }
2826 ls.cr();
2827 }
2828
2829 n = write_bytes(&holder_length, sizeof(int));
2830 if (n != sizeof(int)) {
2831 return false;
2832 }
2833 n = write_bytes(&name_length, sizeof(int));
2834 if (n != sizeof(int)) {
2835 return false;
2836 }
2837 n = write_bytes(&signat_length, sizeof(int));
2838 if (n != sizeof(int)) {
2839 return false;
2840 }
2841 n = write_bytes(dest, total_length);
2842 if (n != (uint)total_length) {
2843 return false;
2844 }
2845 dest[holder_length] = ' ';
2846 dest[holder_length + 1 + name_length] = ' ';
2847 log_info(aot, codecache)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest);
2848 return true;
2849 }
2850
2851 Method* AOTCodeReader::read_method(const methodHandle& comp_method, bool shared) {
2852 uint code_offset = read_position();
2853 if (_cache->use_meta_ptrs() && shared) {
2854 uint method_offset = *(uint*)addr(code_offset);
2855 code_offset += sizeof(uint);
2856 set_read_position(code_offset);
2857 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2858 if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
2859 // Something changed in CDS
2860 set_lookup_failed();
2861 log_info(aot, codecache)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2862 return nullptr;
2863 }
2864 assert(m->is_method(), "sanity");
2865 ResourceMark rm;
2866 Klass* k = m->method_holder();
2867 if (!k->is_instance_klass()) {
2868 set_lookup_failed();
2869 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2870 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2871 return nullptr;
2872 } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2873 set_lookup_failed();
2874 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2875 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2876 return nullptr;
2877 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2878 set_lookup_failed();
2879 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2880 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2881 return nullptr;
2882 } else if (!InstanceKlass::cast(k)->is_linked()) {
2883 set_lookup_failed();
2884 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2885 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2886 return nullptr;
2887 }
2888 log_info(aot, codecache)("%d (L%d): Shared method lookup: %s",
2889 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2890 return m;
2891 }
2892 int holder_length = *(int*)addr(code_offset);
2893 code_offset += sizeof(int);
2894 int name_length = *(int*)addr(code_offset);
2895 code_offset += sizeof(int);
2896 int signat_length = *(int*)addr(code_offset);
2897 code_offset += sizeof(int);
2898
2899 const char* dest = addr(code_offset);
2900 code_offset += holder_length + 1 + name_length + 1 + signat_length + 1;
2901 set_read_position(code_offset);
2902 TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length);
2903 if (klass_sym == nullptr) {
2904 set_lookup_failed();
2905 log_info(aot, codecache)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0]));
2906 return nullptr;
2907 }
2908 // Use class loader of compiled method.
2909 Thread* thread = Thread::current();
2910 Handle loader(thread, comp_method->method_holder()->class_loader());
2911 Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
2912 assert(!thread->has_pending_exception(), "should not throw");
2913 if (k == nullptr && !loader.is_null()) {
2914 // Try default loader and domain
2915 k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
2916 assert(!thread->has_pending_exception(), "should not throw");
2917 }
2918 if (k != nullptr) {
2919 if (!k->is_instance_klass()) {
2920 set_lookup_failed();
2921 log_info(aot, codecache)("%d (L%d): Lookup failed for holder %s: not instance klass",
2922 compile_id(), comp_level(), &(dest[0]));
2923 return nullptr;
2924 } else if (!InstanceKlass::cast(k)->is_linked()) {
2925 set_lookup_failed();
2926 log_info(aot, codecache)("%d (L%d): Lookup failed for holder %s: not linked",
2927 compile_id(), comp_level(), &(dest[0]));
2928 return nullptr;
2929 }
2930 log_info(aot, codecache)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name());
2931 } else {
2932 set_lookup_failed();
2933 log_info(aot, codecache)("%d (L%d): Lookup failed for holder %s",
2934 compile_id(), comp_level(), &(dest[0]));
2935 return nullptr;
2936 }
2937 TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length);
2938 int pos = holder_length + 1 + name_length + 1;
2939 TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length);
2940 if (name_sym == nullptr) {
2941 set_lookup_failed();
2942 log_info(aot, codecache)("%d (L%d): Probe failed for method name %s",
2943 compile_id(), comp_level(), &(dest[holder_length + 1]));
2944 return nullptr;
2945 }
2946 if (sign_sym == nullptr) {
2947 set_lookup_failed();
2948 log_info(aot, codecache)("%d (L%d): Probe failed for method signature %s",
2949 compile_id(), comp_level(), &(dest[pos]));
2950 return nullptr;
2951 }
2952 Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym);
2953 if (m != nullptr) {
2954 ResourceMark rm;
2955 log_info(aot, codecache)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
2956 } else {
2957 set_lookup_failed();
2958 log_info(aot, codecache)("%d (L%d): Lookup failed for method %s::%s%s",
2959 compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos]));
2960 return nullptr;
2961 }
2962 return m;
2963 }
2964
2965 bool AOTCodeCache::write_klass(Klass* klass) {
2966 bool can_use_meta_ptrs = _use_meta_ptrs;
2967 uint array_dim = 0;
2968 if (klass->is_objArray_klass()) {
2969 array_dim = ObjArrayKlass::cast(klass)->dimension();
2970 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2971 }
2972 uint init_state = 0;
2973 if (klass->is_instance_klass()) {
2974 InstanceKlass* ik = InstanceKlass::cast(klass);
2975 ClassLoaderData* cld = ik->class_loader_data();
2976 if (!cld->is_builtin_class_loader_data()) {
2977 set_lookup_failed();
2978 return false;
2979 }
2980 if (_for_preload && !AOTCacheAccess::can_generate_aot_code(ik)) {
2981 _for_preload = false;
2982 // Bailout if code has clinit barriers:
2983 // method will be recompiled without them in any case
2984 if (_has_clinit_barriers) {
2985 set_lookup_failed();
2986 return false;
2987 }
2988 can_use_meta_ptrs = false;
2989 }
2990 init_state = (ik->is_initialized() ? 1 : 0);
2991 }
2992 ResourceMark rm;
2993 uint state = (array_dim << 1) | (init_state & 1);
2994 if (can_use_meta_ptrs && AOTCacheAccess::can_generate_aot_code(klass)) {
2995 DataKind kind = DataKind::Klass_Shared;
2996 uint n = write_bytes(&kind, sizeof(int));
2997 if (n != sizeof(int)) {
2998 return false;
2999 }
3000 // Record state of instance klass initialization.
3001 n = write_bytes(&state, sizeof(int));
3002 if (n != sizeof(int)) {
3003 return false;
3004 }
3005 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
3006 n = write_bytes(&klass_offset, sizeof(uint));
3007 if (n != sizeof(uint)) {
3008 return false;
3009 }
3010 log_info(aot, codecache)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(),
3011 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
3012 (array_dim > 0 ? " (object array)" : ""),
3013 klass_offset);
3014 return true;
3015 }
3016 // Bailout if code has clinit barriers:
3017 // method will be recompiled without them in any case
3018 if (_for_preload && _has_clinit_barriers) {
3019 set_lookup_failed();
3020 return false;
3021 }
3022 _for_preload = false;
3023 log_info(aot, codecache,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name());
3024 if (klass->is_hidden()) { // Skip such nmethod
3025 set_lookup_failed();
3026 return false;
3027 }
3028 DataKind kind = DataKind::Klass;
3029 uint n = write_bytes(&kind, sizeof(int));
3030 if (n != sizeof(int)) {
3031 return false;
3032 }
3033 // Record state of instance klass initialization.
3034 n = write_bytes(&state, sizeof(int));
3035 if (n != sizeof(int)) {
3036 return false;
3037 }
3038 Symbol* name = klass->name();
3039 int name_length = name->utf8_length();
3040 int total_length = name_length + 1;
3041 char* dest = NEW_RESOURCE_ARRAY(char, total_length);
3042 name->as_C_string(dest, total_length);
3043 dest[total_length - 1] = '\0';
3044 LogTarget(Info, aot, codecache, loader) log;
3045 if (log.is_enabled()) {
3046 LogStream ls(log);
3047 oop loader = klass->class_loader();
3048 oop domain = klass->protection_domain();
3049 ls.print("Class %s loader: ", dest);
3050 if (loader == nullptr) {
3051 ls.print("nullptr");
3052 } else {
3053 loader->print_value_on(&ls);
3054 }
3055 ls.print(" domain: ");
3056 if (domain == nullptr) {
3057 ls.print("nullptr");
3058 } else {
3059 domain->print_value_on(&ls);
3060 }
3061 ls.cr();
3062 }
3063 n = write_bytes(&name_length, sizeof(int));
3064 if (n != sizeof(int)) {
3065 return false;
3066 }
3067 n = write_bytes(dest, total_length);
3068 if (n != (uint)total_length) {
3069 return false;
3070 }
3071 log_info(aot, codecache)("%d (L%d): Wrote klass: %s%s%s",
3072 compile_id(), comp_level(),
3073 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
3074 (array_dim > 0 ? " (object array)" : ""));
3075 return true;
3076 }
3077
3078 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method, bool shared) {
3079 uint code_offset = read_position();
3080 uint state = *(uint*)addr(code_offset);
3081 uint init_state = (state & 1);
3082 uint array_dim = (state >> 1);
3083 code_offset += sizeof(int);
3084 if (_cache->use_meta_ptrs() && shared) {
3085 uint klass_offset = *(uint*)addr(code_offset);
3086 code_offset += sizeof(uint);
3087 set_read_position(code_offset);
3088 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
3089 if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
3090 // Something changed in CDS
3091 set_lookup_failed();
3092 log_info(aot, codecache)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
3093 return nullptr;
3094 }
3095 assert(k->is_klass(), "sanity");
3096 ResourceMark rm;
3097 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
3098 set_lookup_failed();
3099 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
3100 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
3101 return nullptr;
3102 } else
3103 // Allow not initialized klass which was uninitialized during code caching or for preload
3104 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
3105 set_lookup_failed();
3106 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
3107 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
3108 return nullptr;
3109 }
3110 if (array_dim > 0) {
3111 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
3112 Klass* ak = k->array_klass_or_null(array_dim);
3113 // FIXME: what would it take to create an array class on the fly?
3114 // Klass* ak = k->array_klass(dim, JavaThread::current());
3115 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
3116 if (ak == nullptr) {
3117 set_lookup_failed();
3118 log_info(aot, codecache)("%d (L%d): %d-dimension array klass lookup failed: %s",
3119 compile_id(), comp_level(), array_dim, k->external_name());
3120 }
3121 log_info(aot, codecache)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
3122 return ak;
3123 } else {
3124 log_info(aot, codecache)("%d (L%d): Shared klass lookup: %s",
3125 compile_id(), comp_level(), k->external_name());
3126 return k;
3127 }
3128 }
3129 int name_length = *(int*)addr(code_offset);
3130 code_offset += sizeof(int);
3131 const char* dest = addr(code_offset);
3132 code_offset += name_length + 1;
3133 set_read_position(code_offset);
3134 TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length);
3135 if (klass_sym == nullptr) {
3136 set_lookup_failed();
3137 log_info(aot, codecache)("%d (L%d): Probe failed for class %s",
3138 compile_id(), comp_level(), &(dest[0]));
3139 return nullptr;
3140 }
3141 // Use class loader of compiled method.
3142 Thread* thread = Thread::current();
3143 Handle loader(thread, comp_method->method_holder()->class_loader());
3144 Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
3145 assert(!thread->has_pending_exception(), "should not throw");
3146 if (k == nullptr && !loader.is_null()) {
3147 // Try default loader and domain
3148 k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
3149 assert(!thread->has_pending_exception(), "should not throw");
3150 }
3151 if (k != nullptr) {
3152 // Allow not initialized klass which was uninitialized during code caching
3153 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) {
3154 set_lookup_failed();
3155 log_info(aot, codecache)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0]));
3156 return nullptr;
3157 }
3158 log_info(aot, codecache)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name());
3159 } else {
3160 set_lookup_failed();
3161 log_info(aot, codecache)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0]));
3162 return nullptr;
3163 }
3164 return k;
3165 }
3166
3167 bool AOTCodeCache::write_oops(OopRecorder* oop_recorder) {
3168 int oop_count = oop_recorder->oop_count();
3169 uint n = write_bytes(&oop_count, sizeof(int));
3170 if (n != sizeof(int)) {
3171 return false;
3172 }
3173 log_debug(aot, codecache)("======== write oops [%d]:", oop_count);
3174
3175 for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr
3176 jobject jo = oop_recorder->oop_at(i);
3177 LogStreamHandle(Info, aot, codecache, oops) log;
3178 if (log.is_enabled()) {
3179 log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
3180 if (jo == (jobject)Universe::non_oop_word()) {
3181 log.print("non-oop word");
3182 } else if (jo == nullptr) {
3183 log.print("nullptr-oop");
3184 } else {
3185 JNIHandles::resolve(jo)->print_value_on(&log);
3186 }
3187 log.cr();
3188 }
3189 if (!write_oop(jo)) {
3190 return false;
3191 }
3192 }
3193 return true;
3194 }
3195
3196 bool AOTCodeCache::write_oop(jobject& jo) {
3197 oop obj = JNIHandles::resolve(jo);
3198 return write_oop(obj);
3199 }
3200
3201 bool AOTCodeCache::write_oop(oop obj) {
3202 DataKind kind;
3203 uint n = 0;
3204 if (obj == nullptr) {
3205 kind = DataKind::Null;
3206 n = write_bytes(&kind, sizeof(int));
3207 if (n != sizeof(int)) {
3208 return false;
3209 }
3210 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
3211 kind = DataKind::No_Data;
3212 n = write_bytes(&kind, sizeof(int));
3213 if (n != sizeof(int)) {
3214 return false;
3215 }
3216 } else if (java_lang_Class::is_instance(obj)) {
3217 if (java_lang_Class::is_primitive(obj)) {
3218 int bt = (int)java_lang_Class::primitive_type(obj);
3219 kind = DataKind::Primitive;
3220 n = write_bytes(&kind, sizeof(int));
3221 if (n != sizeof(int)) {
3222 return false;
3223 }
3224 n = write_bytes(&bt, sizeof(int));
3225 if (n != sizeof(int)) {
3226 return false;
3227 }
3228 log_info(aot, codecache)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
3229 } else {
3230 Klass* klass = java_lang_Class::as_Klass(obj);
3231 if (!write_klass(klass)) {
3232 return false;
3233 }
3234 }
3235 } else if (java_lang_String::is_instance(obj)) { // herere
3236 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
3237 if (k >= 0) {
3238 kind = DataKind::String_Shared;
3239 n = write_bytes(&kind, sizeof(int));
3240 if (n != sizeof(int)) {
3241 return false;
3242 }
3243 n = write_bytes(&k, sizeof(int));
3244 if (n != sizeof(int)) {
3245 return false;
3246 }
3247 return true;
3248 }
3249 kind = DataKind::String;
3250 n = write_bytes(&kind, sizeof(int));
3251 if (n != sizeof(int)) {
3252 return false;
3253 }
3254 ResourceMark rm;
3255 size_t length_sz = 0;
3256 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
3257 int length = (int)length_sz; // FIXME -- cast
3258 length++; // write tailing '/0'
3259 n = write_bytes(&length, sizeof(int));
3260 if (n != sizeof(int)) {
3261 return false;
3262 }
3263 n = write_bytes(string, (uint)length);
3264 if (n != (uint)length) {
3265 return false;
3266 }
3267 log_info(aot, codecache)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
3268 } else if (java_lang_Module::is_instance(obj)) {
3269 fatal("Module object unimplemented");
3270 } else if (java_lang_ClassLoader::is_instance(obj)) {
3271 if (obj == SystemDictionary::java_system_loader()) {
3272 kind = DataKind::SysLoader;
3273 log_info(aot, codecache)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
3274 } else if (obj == SystemDictionary::java_platform_loader()) {
3275 kind = DataKind::PlaLoader;
3276 log_info(aot, codecache)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
3277 } else {
3278 fatal("ClassLoader object unimplemented");
3279 return false;
3280 }
3281 n = write_bytes(&kind, sizeof(int));
3282 if (n != sizeof(int)) {
3283 return false;
3284 }
3285 } else { // herere
3286 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
3287 if (k >= 0) {
3288 kind = DataKind::MH_Oop_Shared;
3289 n = write_bytes(&kind, sizeof(int));
3290 if (n != sizeof(int)) {
3291 return false;
3292 }
3293 n = write_bytes(&k, sizeof(int));
3294 if (n != sizeof(int)) {
3295 return false;
3296 }
3297 return true;
3298 }
3299 // Unhandled oop - bailout
3300 set_lookup_failed();
3301 log_info(aot, codecache, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
3302 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
3303 return false;
3304 }
3305 return true;
3306 }
3307
3308 bool AOTCodeReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) {
3309 uint code_offset = read_position();
3310 int oop_count = *(int*)addr(code_offset);
3311 code_offset += sizeof(int);
3312 set_read_position(code_offset);
3313 log_debug(aot, codecache)("======== read oops [%d]:", oop_count);
3314 if (oop_count == 0) {
3315 return true;
3316 }
3317 {
3318 VM_ENTRY_MARK;
3319 methodHandle comp_method(THREAD, target->get_Method());
3320 for (int i = 1; i < oop_count; i++) {
3321 oop obj = read_oop(THREAD, comp_method);
3322 if (lookup_failed()) {
3323 return false;
3324 }
3325 jobject jo = JNIHandles::make_local(THREAD, obj);
3326 if (oop_recorder->is_real(jo)) {
3327 oop_recorder->find_index(jo);
3328 } else {
3329 oop_recorder->allocate_oop_index(jo);
3330 }
3331 LogStreamHandle(Debug, aot, codecache, oops) log;
3332 if (log.is_enabled()) {
3333 log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
3334 if (jo == (jobject)Universe::non_oop_word()) {
3335 log.print("non-oop word");
3336 } else if (jo == nullptr) {
3337 log.print("nullptr-oop");
3338 } else {
3339 JNIHandles::resolve(jo)->print_value_on(&log);
3340 }
3341 log.cr();
3342 }
3343 }
3344 }
3345 return true;
3346 }
3347
3348 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
3349 uint code_offset = read_position();
3350 oop obj = nullptr;
3351 DataKind kind = *(DataKind*)addr(code_offset);
3352 code_offset += sizeof(DataKind);
3353 set_read_position(code_offset);
3354 if (kind == DataKind::Null) {
3355 return nullptr;
3356 } else if (kind == DataKind::No_Data) {
3357 return cast_to_oop(Universe::non_oop_word());
3358 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
3359 Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
3360 if (k == nullptr) {
3361 return nullptr;
3362 }
3363 obj = k->java_mirror();
3364 if (obj == nullptr) {
3365 set_lookup_failed();
3366 log_info(aot, codecache)("Lookup failed for java_mirror of klass %s", k->external_name());
3367 return nullptr;
3368 }
3369 } else if (kind == DataKind::Primitive) {
3370 code_offset = read_position();
3371 int t = *(int*)addr(code_offset);
3372 code_offset += sizeof(int);
3373 set_read_position(code_offset);
3374 BasicType bt = (BasicType)t;
3375 obj = java_lang_Class::primitive_mirror(bt);
3376 log_info(aot, codecache)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
3377 } else if (kind == DataKind::String_Shared) {
3378 code_offset = read_position();
3379 int k = *(int*)addr(code_offset);
3380 code_offset += sizeof(int);
3381 set_read_position(code_offset);
3382 obj = AOTCacheAccess::get_archived_object(k);
3383 } else if (kind == DataKind::String) {
3384 code_offset = read_position();
3385 int length = *(int*)addr(code_offset);
3386 code_offset += sizeof(int);
3387 set_read_position(code_offset);
3388 const char* dest = addr(code_offset);
3389 set_read_position(code_offset + length);
3390 obj = StringTable::intern(&(dest[0]), thread);
3391 if (obj == nullptr) {
3392 set_lookup_failed();
3393 log_info(aot, codecache)("%d (L%d): Lookup failed for String %s",
3394 compile_id(), comp_level(), &(dest[0]));
3395 return nullptr;
3396 }
3397 assert(java_lang_String::is_instance(obj), "must be string");
3398 log_info(aot, codecache)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
3399 } else if (kind == DataKind::SysLoader) {
3400 obj = SystemDictionary::java_system_loader();
3401 log_info(aot, codecache)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
3402 } else if (kind == DataKind::PlaLoader) {
3403 obj = SystemDictionary::java_platform_loader();
3404 log_info(aot, codecache)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
3405 } else if (kind == DataKind::MH_Oop_Shared) {
3406 code_offset = read_position();
3407 int k = *(int*)addr(code_offset);
3408 code_offset += sizeof(int);
3409 set_read_position(code_offset);
3410 obj = AOTCacheAccess::get_archived_object(k);
3411 } else {
3412 set_lookup_failed();
3413 log_info(aot, codecache)("%d (L%d): Unknown oop's kind: %d",
3414 compile_id(), comp_level(), (int)kind);
3415 return nullptr;
3416 }
3417 return obj;
3418 }
3419
3420 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
3421 methodHandle comp_method(JavaThread::current(), target->get_Method());
3422 JavaThread* current = JavaThread::current();
3423 uint offset = read_position();
3424 int count = *(int *)addr(offset);
3425 offset += sizeof(int);
3426 set_read_position(offset);
3427 for (int i = 0; i < count; i++) {
3428 oop obj = read_oop(current, comp_method);
3429 if (lookup_failed()) {
3430 return false;
3431 }
3432 Handle h(thread, obj);
3433 oop_list.append(h);
3434 if (oop_recorder != nullptr) {
3435 jobject jo = JNIHandles::make_local(thread, obj);
3436 if (oop_recorder->is_real(jo)) {
3437 oop_recorder->find_index(jo);
3438 } else {
3439 oop_recorder->allocate_oop_index(jo);
3440 }
3441 }
3442 LogStreamHandle(Debug, aot, codecache, oops) log;
3443 if (log.is_enabled()) {
3444 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
3445 if (obj == Universe::non_oop_word()) {
3446 log.print("non-oop word");
3447 } else if (obj == nullptr) {
3448 log.print("nullptr-oop");
3449 } else {
3450 obj->print_value_on(&log);
3451 }
3452 log.cr();
3453 }
3454 }
3455
3456 offset = read_position();
3457 count = *(int *)addr(offset);
3458 offset += sizeof(int);
3459 set_read_position(offset);
3460 for (int i = 0; i < count; i++) {
3461 Metadata* m = read_metadata(comp_method);
3462 if (lookup_failed()) {
3463 return false;
3464 }
3465 metadata_list.append(m);
3466 if (oop_recorder != nullptr) {
3467 if (oop_recorder->is_real(m)) {
3468 oop_recorder->find_index(m);
3469 } else {
3470 oop_recorder->allocate_metadata_index(m);
3471 }
3472 }
3473 LogTarget(Debug, aot, codecache, metadata) log;
3474 if (log.is_enabled()) {
3475 LogStream ls(log);
3476 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
3477 if (m == (Metadata*)Universe::non_oop_word()) {
3478 ls.print("non-metadata word");
3479 } else if (m == nullptr) {
3480 ls.print("nullptr-oop");
3481 } else {
3482 Metadata::print_value_on_maybe_null(&ls, m);
3483 }
3484 ls.cr();
3485 }
3486 }
3487 return true;
3488 }
3489
3490 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
3491 ImmutableOopMapSet* oopmaps = cb.oop_maps();
3492 int oopmaps_size = oopmaps->nr_of_bytes();
3493 if (!write_bytes(&oopmaps_size, sizeof(int))) {
3494 return false;
3495 }
3496 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
3497 if (n != (uint)oopmaps->nr_of_bytes()) {
3498 return false;
3499 }
3500 return true;
3501 }
3502
3503 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
3504 uint offset = read_position();
3505 int size = *(int *)addr(offset);
3506 offset += sizeof(int);
3507 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
3508 offset += size;
3509 set_read_position(offset);
3510 return oopmaps;
3511 }
3512
3513 bool AOTCodeCache::write_oops(nmethod* nm) {
3514 int count = nm->oops_count()-1;
3515 if (!write_bytes(&count, sizeof(int))) {
3516 return false;
3517 }
3518 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
3519 if (!write_oop(*p)) {
3520 return false;
3521 }
3522 }
3523 return true;
3524 }
3525
3526 #ifndef PRODUCT
3527 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
3528 // Write asm remarks
3529 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
3530 if (count_ptr == nullptr) {
3531 return false;
3532 }
3533 uint count = 0;
3534 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
3535 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
3536 uint n = write_bytes(&offset, sizeof(uint));
3537 if (n != sizeof(uint)) {
3538 return false;
3539 }
3540 const char* cstr = add_C_string(str);
3541 int id = _table->id_for_C_string((address)cstr);
3542 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
3543 n = write_bytes(&id, sizeof(int));
3544 if (n != sizeof(int)) {
3545 return false;
3594 // Read dbg strings
3595 uint offset = read_position();
3596 uint count = *(uint *)addr(offset);
3597 offset += sizeof(uint);
3598 for (uint i = 0; i < count; i++) {
3599 int string_id = *(uint *)addr(offset);
3600 offset += sizeof(int);
3601 const char* str = (const char*)_cache->address_for_C_string(string_id);
3602 dbg_strings.insert(str);
3603 }
3604 set_read_position(offset);
3605 }
3606 #endif // PRODUCT
3607
3608 //======================= AOTCodeAddressTable ===============
3609
3610 // address table ids for generated routines, external addresses and C
3611 // string addresses are partitioned into positive integer ranges
3612 // defined by the following positive base and max values
3613 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
3614 // [_stubs_base, _stubs_base + _stubs_max -1],
3615 // ...
3616 // [_c_str_base, _c_str_base + _c_str_max -1],
3617 #define _extrs_max 140
3618 #define _stubs_max 210
3619 #define _all_blobs_max 100
3620 #define _shared_blobs_max 25
3621 #define _C2_blobs_max 25
3622 #define _C1_blobs_max (_all_blobs_max - _shared_blobs_max - _C2_blobs_max)
3623 #define _all_max 450
3624
3625 #define _extrs_base 0
3626 #define _stubs_base (_extrs_base + _extrs_max)
3627 #define _shared_blobs_base (_stubs_base + _stubs_max)
3628 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
3629 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
3630 #if (_C2_blobs_base >= _all_max)
3631 #error AOTCodeAddressTable ranges need adjusting
3632 #endif
3633
3634 #define SET_ADDRESS(type, addr) \
3635 { \
3636 type##_addr[type##_length++] = (address) (addr); \
3637 assert(type##_length <= type##_max, "increase size"); \
3638 }
3639
3640 static bool initializing_extrs = false;
3641
3642 void AOTCodeAddressTable::init_extrs() {
3643 if (_extrs_complete || initializing_extrs) return; // Done already
3644 initializing_extrs = true;
3645 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3646
3647 _extrs_length = 0;
3648 _stubs_length = 0;
3649
3650 // Record addresses of VM runtime methods
3651 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3652 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3653 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3654 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3655 #if defined(AARCH64) && !defined(ZERO)
3656 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3657 #endif
3658 {
3659 // Required by Shared blobs
3660 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
3661 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
3662 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
3663 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3664 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3665 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3666 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
3667 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
3668 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
3669 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
3670 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3671 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
3672
3673 }
3674
3675 #ifdef COMPILER1
3676 {
3677 // Required by C1 blobs
3678 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3679 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3680 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3681 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3682 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3683 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3684 SET_ADDRESS(_extrs, Runtime1::new_instance);
3685 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3686 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3687 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3688 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3689 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3690 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3691 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3692 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3693 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3694 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3695 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3696 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3697 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3698 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3699 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3700 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3701 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3702 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3703 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3704 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3705 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3706 #ifndef PRODUCT
3707 SET_ADDRESS(_extrs, os::breakpoint);
3708 #endif
3709 }
3710 #endif // COMPILER1
3711
3712 #ifdef COMPILER2
3713 {
3714 // Required by C2 blobs
3715 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3716 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3717 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3718 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3719 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3720 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3721 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3722 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3723 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3724 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3725 #if INCLUDE_JVMTI
3726 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
3727 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
3728 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
3729 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
3730 #endif
3731 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3732 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3733 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3734 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3735 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3736 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3737 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3738 }
3739 #endif // COMPILER2
3740
3741 #if INCLUDE_G1GC
3742 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3743 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3744 #endif
3745
3746 #if INCLUDE_SHENANDOAHGC
3747 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3748 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3749 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3750 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3751 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3752 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3753 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3754 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3755 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3756 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3757 #endif
3758
3759 #if INCLUDE_ZGC
3760 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3761 #if defined(AMD64)
3762 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3763 #endif
3764 #endif // INCLUDE_ZGC
3765
3766 SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3767 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3768 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3769 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3770
3771 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3772 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3773 #if defined(AMD64) && !defined(ZERO)
3774 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3775 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3776 #endif // AMD64
3777 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3778 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3779 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3780 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3781 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3782 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3783 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3784 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3785 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3786 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3787 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3788 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3789 #ifndef ZERO
3790 SET_ADDRESS(_extrs, SharedRuntime::drem);
3791 SET_ADDRESS(_extrs, SharedRuntime::frem);
3792 #endif
3793 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3794 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3795 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3796 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3797 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3798 #if INCLUDE_JVMTI
3799 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3800 #endif /* INCLUDE_JVMTI */
3801 BarrierSet* bs = BarrierSet::barrier_set();
3802 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3803 SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3804 }
3805 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3806 SET_ADDRESS(_extrs, Thread::current);
3807
3808 SET_ADDRESS(_extrs, os::javaTimeMillis);
3809 SET_ADDRESS(_extrs, os::javaTimeNanos);
3810
3811 #if INCLUDE_JVMTI
3812 SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3813 #endif /* INCLUDE_JVMTI */
3814 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3815 #ifndef PRODUCT
3816 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3817 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3818 #endif
3819
3820 #ifndef ZERO
3821 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3822 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3823 #endif
3824 #if defined(AMD64)
3825 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3826 #endif
3827 #endif // ZERO
3828
3829 #ifdef COMPILER1
3830 #ifdef X86
3831 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3832 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3833 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3834 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3835 #endif
3836 #endif
3837
3838 // addresses of fields in AOT runtime constants area
3839 address* p = AOTRuntimeConstants::field_addresses_list();
3840 while (*p != nullptr) {
3841 SET_ADDRESS(_extrs, *p++);
3842 }
3843
3844 _extrs_complete = true;
3845 log_info(aot, codecache,init)("External addresses recorded");
3846 }
3847
3848 static bool initializing_early_stubs = false;
3849
3850 void AOTCodeAddressTable::init_early_stubs() {
3851 if (_complete || initializing_early_stubs) return; // Done already
3852 initializing_early_stubs = true;
3853 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3854 _stubs_length = 0;
3855 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3856
3857 {
3858 // Required by C1 blobs
3859 #if defined(AMD64) && !defined(ZERO)
3860 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3861 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3862 #endif // AMD64
3863 }
3864
3865 _early_stubs_complete = true;
3866 log_info(aot, codecache, init)("early stubs recorded");
3867 }
3868
3869 static bool initializing_shared_blobs = false;
3870
3871 void AOTCodeAddressTable::init_shared_blobs() {
3872 if (_complete || initializing_shared_blobs) return; // Done already
3873 initializing_shared_blobs = true;
3874 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _all_blobs_max, mtCode);
3875
3876 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3877 _shared_blobs_addr = blobs_addr;
3878 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3879 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3880
3881 _shared_blobs_length = 0; // for shared blobs
3882 _C1_blobs_length = 0;
3883 _C2_blobs_length = 0;
3884
3885 // clear the address table
3886 memset(blobs_addr, 0, sizeof(address)* _all_blobs_max);
3887
3888 // Record addresses of generated code blobs
3889 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3890 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3891 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3892 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3893 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3894 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3895 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3896 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3897 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3898 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3899 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3900 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3901 #ifdef COMPILER2
3902 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3903 #endif
3904 #if INCLUDE_JVMCI
3905 if (EnableJVMCI) {
3906 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3907 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3908 }
3909 #endif
3910
3911 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3912 log_info(aot, codecache,init)("Early shared blobs recorded");
3913 }
3914
3915 static bool initializing_stubs = false;
3916 void AOTCodeAddressTable::init_stubs() {
3917 if (_complete || initializing_stubs) return; // Done already
3918 assert(_early_stubs_complete, "early stubs whould be initialized");
3919 initializing_stubs = true;
3920 // final blobs
3921 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3922 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3923 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3924 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3925 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3926
3927 assert(_shared_blobs_length <= _all_blobs_max, "increase _all_blobs_max to %d", _shared_blobs_length);
3928
3929 _shared_blobs_complete = true;
3930 log_info(aot, codecache,init)("All shared blobs recorded");
3931
3932 // Stubs
3933 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3934 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3935 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3936 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3937 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3938 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3939
3940 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3941 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3942 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3943
3944 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3945
3946
3947 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3948 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3949 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3950 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3951 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3952 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3953
3954 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3955 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3956 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3957 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3958 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3959 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3960
3961 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3962 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3963 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3964 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3965 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3966 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3967
3968 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3969 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3970 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3971 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3972 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3973 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3974
3975 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3976 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3977
3978 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3979 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3980
3981 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3982 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3983 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3984 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3985 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3986 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3987
3988 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3989 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3990
3991 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3992 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3993 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3994 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3995 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3996 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3997 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3998 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3999 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
4000 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
4001 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
4002 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
4003 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
4004 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
4005 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
4006 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
4007 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
4008 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
4009 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
4010 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
4011 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
4012 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
4013
4014 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
4015
4016 SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
4017 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
4018 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
4019
4020 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
4021 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
4022 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
4023 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
4024 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
4025 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
4026 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
4027 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
4028
4029 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
4030
4031 SET_ADDRESS(_stubs, StubRoutines::dexp());
4032 SET_ADDRESS(_stubs, StubRoutines::dlog());
4033 SET_ADDRESS(_stubs, StubRoutines::dlog10());
4034 SET_ADDRESS(_stubs, StubRoutines::dpow());
4035 SET_ADDRESS(_stubs, StubRoutines::dsin());
4036 SET_ADDRESS(_stubs, StubRoutines::dcos());
4037 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
4038 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
4039 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
4040 SET_ADDRESS(_stubs, StubRoutines::dtan());
4041
4042 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
4043 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
4044
4045 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
4046 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
4047 }
4048 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
4049
4050 #if defined(AMD64) && !defined(ZERO)
4051 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
4052 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
4053 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
4054 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
4055 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
4056 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
4057 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
4058 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
4059 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
4060 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
4061 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
4062 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
4063 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
4064 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
4065 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
4066 // See C2_MacroAssembler::load_iota_indices().
4067 for (int i = 0; i < 6; i++) {
4068 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
4069 }
4070 #endif
4071 #if defined(AARCH64) && !defined(ZERO)
4072 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
4073 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
4074 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
4075 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
4076 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
4077 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
4078 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
4079 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
4080 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
4081 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
4082 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
4083 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
4084 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
4085
4086 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
4087 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
4088 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
4089 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
4090 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
4091 #endif
4092
4093 _complete = true;
4094 log_info(aot, codecache,init)("Stubs recorded");
4095 }
4096
4097 void AOTCodeAddressTable::init_early_c1() {
4098 #ifdef COMPILER1
4099 // Runtime1 Blobs
4100 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
4101 C1StubId id = (C1StubId)i;
4102 if (Runtime1::blob_for(id) == nullptr) {
4103 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
4104 continue;
4105 }
4106 if (Runtime1::entry_for(id) == nullptr) {
4107 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
4108 continue;
4109 }
4110 address entry = Runtime1::entry_for(id);
4111 SET_ADDRESS(_C1_blobs, entry);
4112 }
4113 #endif // COMPILER1
4114 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
4115 _early_c1_complete = true;
4116 }
4117
4118 void AOTCodeAddressTable::init_c1() {
4119 #ifdef COMPILER1
4120 // Runtime1 Blobs
4121 assert(_early_c1_complete, "early C1 blobs should be initialized");
4122 for (int i = (int)C1StubId::forward_exception_id + 1; i < (int)(C1StubId::NUM_STUBIDS); i++) {
4123 C1StubId id = (C1StubId)i;
4124 if (Runtime1::blob_for(id) == nullptr) {
4125 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
4126 continue;
4127 }
4128 if (Runtime1::entry_for(id) == nullptr) {
4129 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
4130 continue;
4131 }
4132 address entry = Runtime1::entry_for(id);
4133 SET_ADDRESS(_C1_blobs, entry);
4134 }
4135 #if INCLUDE_G1GC
4136 if (UseG1GC) {
4137 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
4138 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
4139 SET_ADDRESS(_C1_blobs, entry);
4140 entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
4141 SET_ADDRESS(_C1_blobs, entry);
4142 }
4143 #endif // INCLUDE_G1GC
4144 #if INCLUDE_ZGC
4145 if (UseZGC) {
4146 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
4147 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
4148 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
4149 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
4150 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
4151 }
4152 #endif // INCLUDE_ZGC
4153 #if INCLUDE_SHENANDOAHGC
4154 if (UseShenandoahGC) {
4155 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
4156 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
4157 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
4158 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
4159 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
4160 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
4161 }
4162 #endif // INCLUDE_SHENANDOAHGC
4163 #endif // COMPILER1
4164
4165 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
4166 _c1_complete = true;
4167 log_info(aot, codecache,init)("Runtime1 Blobs recorded");
4168 }
4169
4170 void AOTCodeAddressTable::init_c2() {
4171 #ifdef COMPILER2
4172 // OptoRuntime Blobs
4173 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
4174 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
4175 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
4176 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
4177 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
4178 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
4179 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
4180 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
4181 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
4182 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
4183 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
4184 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
4185 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
4186 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
4187 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
4188 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
4189 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
4190 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
4191 #if INCLUDE_JVMTI
4192 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
4193 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
4194 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
4195 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
4196 #endif /* INCLUDE_JVMTI */
4197 #endif
4198
4199 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
4200 _c2_complete = true;
4201 log_info(aot, codecache,init)("OptoRuntime Blobs recorded");
4202 }
4203 #undef SET_ADDRESS
4204
4205 AOTCodeAddressTable::~AOTCodeAddressTable() {
4206 if (_extrs_addr != nullptr) {
4207 FREE_C_HEAP_ARRAY(address, _extrs_addr);
4208 }
4209 if (_stubs_addr != nullptr) {
4210 FREE_C_HEAP_ARRAY(address, _stubs_addr);
4211 }
4212 if (_shared_blobs_addr != nullptr) {
4213 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
4214 }
4215 }
4216
4217 #ifdef PRODUCT
4218 #define MAX_STR_COUNT 200
4219 #else
4220 #define MAX_STR_COUNT 500
4221 #endif
4222 #define _c_str_max MAX_STR_COUNT
4223 static const int _c_str_base = _all_max;
4224
4225 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
4226 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
4227 static int _C_strings_count = 0;
4228 static int _C_strings_s[MAX_STR_COUNT] = {0};
4229 static int _C_strings_id[MAX_STR_COUNT] = {0};
4230 static int _C_strings_used = 0;
4231
4362 fatal("AOT Code Cache VM runtime addresses table is not complete");
4363 }
4364 if (idx == -1) {
4365 return (address)-1;
4366 }
4367 uint id = (uint)idx;
4368 // special case for symbols based relative to os::init
4369 if (id > (_c_str_base + _c_str_max)) {
4370 return (address)os::init + idx;
4371 }
4372 if (idx < 0) {
4373 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
4374 }
4375 // no need to compare unsigned id against 0
4376 if (/* id >= _extrs_base && */ id < _extrs_length) {
4377 return _extrs_addr[id - _extrs_base];
4378 }
4379 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
4380 return _stubs_addr[id - _stubs_base];
4381 }
4382 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
4383 return _stubs_addr[id - _stubs_base];
4384 }
4385 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
4386 return _shared_blobs_addr[id - _shared_blobs_base];
4387 }
4388 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
4389 return _C1_blobs_addr[id - _C1_blobs_base];
4390 }
4391 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
4392 return _C1_blobs_addr[id - _C1_blobs_base];
4393 }
4394 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
4395 return _C2_blobs_addr[id - _C2_blobs_base];
4396 }
4397 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
4398 return address_for_C_string(id - _c_str_base);
4399 }
4400 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
4401 return nullptr;
4402 }
4403
4404 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer, CodeBlob* blob) {
4405 if (!_extrs_complete) {
4406 fatal("AOT Code Cache VM runtime addresses table is not complete");
4407 }
4408 int id = -1;
4409 if (addr == (address)-1) { // Static call stub has jump to itself
4410 return id;
4411 }
4412 // Check card_table_base address first since it can point to any address
4413 BarrierSet* bs = BarrierSet::barrier_set();
4414 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4415 if (addr == ci_card_table_address_as<address>()) {
4416 id = search_address(addr, _extrs_addr, _extrs_length);
4417 assert(id > 0 && _extrs_addr[id - _extrs_base] == addr, "sanity");
4418 return id;
4419 }
4420 }
4421
4422 // Seach for C string
4423 id = id_for_C_string(addr);
4424 if (id >= 0) {
4425 return id + _c_str_base;
4426 }
4427 if (StubRoutines::contains(addr)) {
4428 // Search in stubs
4429 id = search_address(addr, _stubs_addr, _stubs_length);
4430 if (id < 0) {
4431 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
4432 if (desc == nullptr) {
4433 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
4434 }
4435 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
4436 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
4437 } else {
4438 return _stubs_base + id;
4439 }
4440 } else {
4441 CodeBlob* cb = CodeCache::find_blob(addr);
4442 if (cb != nullptr) {
4443 int id_base = _shared_blobs_base;
4444 // Search in code blobs
4445 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
4446 if (id == -1) {
4447 id_base = _C1_blobs_base;
4448 // search C1 blobs
4449 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
4450 }
4451 if (id == -1) {
4452 id_base = _C2_blobs_base;
4453 // search C2 blobs
4454 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
4455 }
4456 if (id < 0) {
4457 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
4458 } else {
4459 return id_base + id;
4460 }
4461 } else {
4462 // Search in runtime functions
4463 id = search_address(addr, _extrs_addr, _extrs_length);
4464 if (id < 0) {
4465 ResourceMark rm;
4466 const int buflen = 1024;
4467 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
4468 int offset = 0;
4469 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
4470 if (offset > 0) {
4471 // Could be address of C string
4472 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
4473 CompileTask* task = ciEnv::current()->task();
4474 uint compile_id = 0;
4475 uint comp_level =0;
4476 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
4477 compile_id = task->compile_id();
4478 comp_level = task->comp_level();
4479 }
4480 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
4481 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
4482 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
4483 return dist;
4484 }
4485 reloc.print_current_on(tty);
4486 blob->print_on(tty);
4487 blob->print_code_on(tty);
4488 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
4489 } else {
4490 reloc.print_current_on(tty);
4491 #ifndef PRODUCT
4492 if (buffer != nullptr) {
4493 buffer->print_on(tty);
4494 buffer->decode();
4495 }
4496 if (blob != nullptr) {
4497 blob->print_on(tty);
4498 blob->print_code_on(tty);
4499 }
4500 #endif // !PRODUCT
4501 os::find(addr, tty);
4502 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
4503 }
4504 } else {
4505 return _extrs_base + id;
4506 }
4507 }
4508 }
4509 return id;
4510 }
4511
4512 #undef _extrs_max
4513 #undef _stubs_max
4514 #undef _all_blobs_max
4515 #undef _blobs_max
4516 #undef _C1_blobs_max
4517 #undef _C2_blobs_max
4518 #undef _extrs_base
4519 #undef _stubs_base
4520 #undef _blobs_base
4521 #undef _C1_blobs_base
4522 #undef _C2_blobs_base
4523 #undef _c_str_base
4524
4525 void AOTRuntimeConstants::initialize_from_runtime() {
4526 BarrierSet* bs = BarrierSet::barrier_set();
4527 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4528 CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
4529 _aot_runtime_constants._grain_shift = ctbs->grain_shift();
4530 _aot_runtime_constants._card_shift = ctbs->card_shift();
4531 }
4532 }
4533
4534 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
4535
4536 address AOTRuntimeConstants::_field_addresses_list[] = {
4537 grain_shift_address(),
4538 card_shift_address(),
4539 nullptr
4540 };
4541
4542
4543 void AOTCodeCache::wait_for_no_nmethod_readers() {
4544 while (true) {
4545 int cur = Atomic::load(&_nmethod_readers);
4546 int upd = -(cur + 1);
4547 if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
4548 // Success, no new readers should appear.
4549 break;
4550 }
4551 }
4552
4553 // Now wait for all readers to leave.
4554 SpinYield w;
4555 while (Atomic::load(&_nmethod_readers) != -1) {
4556 w.wait();
4557 }
4558 }
4559
4560 AOTCodeCache::ReadingMark::ReadingMark() {
4561 while (true) {
4562 int cur = Atomic::load(&_nmethod_readers);
4563 if (cur < 0) {
4564 // Cache is already closed, cannot proceed.
4565 _failed = true;
4566 return;
4567 }
4568 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4569 // Successfully recorded ourselves as entered.
4570 _failed = false;
4571 return;
4572 }
4573 }
4574 }
4575
4576 AOTCodeCache::ReadingMark::~ReadingMark() {
4577 if (_failed) {
4578 return;
4579 }
4580 while (true) {
4581 int cur = Atomic::load(&_nmethod_readers);
4582 if (cur > 0) {
4583 // Cache is open, we are counting down towards 0.
4584 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
4585 return;
4586 }
4587 } else {
4588 // Cache is closed, we are counting up towards -1.
4589 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4590 return;
4591 }
4592 }
4593 }
4594 }
4595
4596 void AOTCodeCache::print_timers_on(outputStream* st) {
4597 if (is_using_code()) {
4598 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
4599 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
4600 st->print_cr (" find cached code: %7.3f s", _t_totalFind.seconds());
4601 }
4602 if (is_dumping_code()) {
4603 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
4604 }
4605 }
4606
4607 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
4608 AOTCodeStats result;
4609 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4610 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
4611 }
4612
4613 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4614 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
4615 }
4616 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
4617 return result;
4618 }
4619
4620 void AOTCodeCache::log_stats_on_exit() {
4621 LogStreamHandle(Info, aot, codecache, exit) log;
4622 if (log.is_enabled()) {
4623 AOTCodeStats prev_stats;
4624 AOTCodeStats current_stats;
4625 AOTCodeStats total_stats;
4626 uint max_size = 0;
4627
4628 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
4629
4630 for (uint i = 0; i < load_count; i++) {
4631 prev_stats.collect_entry_stats(&_load_entries[i]);
4632 if (max_size < _load_entries[i].size()) {
4633 max_size = _load_entries[i].size();
4634 }
4635 }
4636 for (uint i = 0; i < _store_entries_cnt; i++) {
4637 current_stats.collect_entry_stats(&_store_entries[i]);
4638 if (max_size < _store_entries[i].size()) {
4639 max_size = _store_entries[i].size();
4640 }
4641 }
4642 total_stats = AOTCodeStats::add_aot_code_stats(prev_stats, current_stats);
4643
4644 log.print_cr("Wrote %d AOTCodeEntry entries(%u max size) to AOT Code Cache",
4645 total_stats.total_count(), max_size);
4646 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4647 if (total_stats.entry_count(kind) > 0) {
4648 log.print_cr(" %s: total=%u(old=%u+new=%u)",
4649 aot_code_entry_kind_name[kind], total_stats.entry_count(kind), prev_stats.entry_count(kind), current_stats.entry_count(kind));
4650 if (kind == AOTCodeEntry::Code) {
4651 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4652 if (total_stats.nmethod_count(lvl) > 0) {
4653 log.print_cr(" Tier %d: total=%u(old=%u+new=%u)",
4654 lvl, total_stats.nmethod_count(lvl), prev_stats.nmethod_count(lvl), current_stats.nmethod_count(lvl));
4655 }
4656 }
4657 }
4658 }
4659 }
4660 log.print_cr("Total=%u(old=%u+new=%u)", total_stats.total_count(), prev_stats.total_count(), current_stats.total_count());
4661 }
4662 }
4663
4664 static void print_helper1(outputStream* st, const char* name, int count) {
4665 if (count > 0) {
4666 st->print(" %s=%d", name, count);
4667 }
4668 }
4669
4670 void AOTCodeCache::print_statistics_on(outputStream* st) {
4671 AOTCodeCache* cache = open_for_use();
4672 if (cache != nullptr) {
4673 ReadingMark rdmk;
4674 if (rdmk.failed()) {
4675 // Cache is closed, cannot touch anything.
4676 return;
4677 }
4678
4679 uint count = cache->_load_header->entries_count();
4680 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
4681 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4682
4683 AOTCodeStats stats;
4684 for (uint i = 0; i < count; i++) {
4685 stats.collect_all_stats(&load_entries[i]);
4686 }
4687
4688 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4689 if (stats.entry_count(kind) > 0) {
4690 st->print(" %s:", aot_code_entry_kind_name[kind]);
4691 print_helper1(st, "total", stats.entry_count(kind));
4692 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4693 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4694 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4695 st->cr();
4696 }
4697 if (kind == AOTCodeEntry::Code) {
4698 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4699 if (stats.nmethod_count(lvl) > 0) {
4700 st->print(" AOT Code T%d", lvl);
4701 print_helper1(st, "total", stats.nmethod_count(lvl));
4702 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4703 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4704 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4705 if (lvl == AOTCompLevel_count-1) {
4706 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4707 }
4708 st->cr();
4709 }
4710 }
4711 }
4712 }
4713 } else {
4714 st->print_cr("failed to map code cache");
4715 }
4716 }
4717
4718 void AOTCodeEntry::print(outputStream* st) const {
4719 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s%s]",
4720 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile,
4721 (_not_entrant? "not_entrant" : "entrant"),
4722 (_loaded ? ", loaded" : ""),
4723 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4724 (_for_preload ? ", for_preload" : ""),
4725 (_ignore_decompile ? ", ignore_decomp" : ""));
4726 }
4727
4728 void AOTCodeCache::print_on(outputStream* st) {
4729 AOTCodeCache* cache = open_for_use();
4730 if (cache != nullptr) {
4731 ReadingMark rdmk;
4732 if (rdmk.failed()) {
4733 // Cache is closed, cannot touch anything.
4734 return;
4735 }
4736
4737 uint count = cache->_load_header->entries_count();
4738 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
4739 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4740
4741 for (uint i = 0; i < count; i++) {
4742 int index = search_entries[2*i + 1];
4743 AOTCodeEntry* entry = &(load_entries[index]);
4744
4745 uint entry_position = entry->offset();
4746 uint name_offset = entry->name_offset() + entry_position;
4747 const char* saved_name = cache->addr(name_offset);
4748
4749 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u L%u offset:%u size=%u '%s' %s%s%s%s",
4750 i, index, entry->kind(), entry->id(), entry->comp_level(), entry->offset(),
4751 entry->size(), saved_name,
4752 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4753 entry->for_preload() ? " for_preload" : "",
4754 entry->is_loaded() ? " loaded" : "",
4755 entry->not_entrant() ? " not_entrant" : "");
4756
4757 st->print_raw(" ");
4758 AOTCodeReader reader(cache, entry, nullptr);
4759 reader.print_on(st);
4760 }
4761 } else {
4762 st->print_cr("failed to map code cache");
4763 }
4764 }
4765
4766 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4767 LogStreamHandle(Info, aot, codecache, init) info;
4768 if (info.is_enabled()) {
4769 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4770 if (entry->is_code() && !entry->is_loaded()) {
4771 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4772 if (mtd != nullptr) {
4773 if (mtd->has_holder()) {
4774 if (mtd->holder()->method_holder()->is_initialized()) {
4775 ResourceMark rm;
4776 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4777 if ((uint)ctd->level() == entry->comp_level()) {
4778 if (ctd->init_deps_left() == 0) {
4779 nmethod* nm = mtd->holder()->code();
4780 if (nm == nullptr) {
4781 if (mtd->holder()->queued_for_compilation()) {
4782 return; // scheduled for compilation
4783 }
4784 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4785 return; // already online compiled and superseded by a more optimal method
4786 }
4787 info.print("AOT Code Cache entry not loaded: ");
4788 ctd->print_on(&info);
4789 info.cr();
4790 }
4791 }
4792 });
4793 } else {
4794 // not yet initialized
4795 }
4796 } else {
4797 info.print("AOT Code Cache entry doesn't have a holder: ");
4798 mtd->print_on(&info);
4799 info.cr();
4800 }
4801 }
4802 }
4803 });
4804 }
4805 }
4806
4807 void AOTCodeReader::print_on(outputStream* st) {
4808 uint entry_position = _entry->offset();
4809 set_read_position(entry_position);
4810
4811 // Read name
4812 uint name_offset = entry_position + _entry->name_offset();
4813 uint name_size = _entry->name_size(); // Includes '/0'
4814 const char* name = addr(name_offset);
4815
4816 st->print_cr(" name: %s", name);
4817 }
4818
|