1 /*
2 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "classfile/javaAssertions.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "code/codeCache.hpp"
35 #include "gc/shared/gcConfig.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/memoryReserver.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/flags/flagSetting.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "utilities/copy.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Runtime1.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
53 #if INCLUDE_G1GC
54 #include "gc/g1/g1BarrierSetRuntime.hpp"
55 #endif
56 #if INCLUDE_SHENANDOAHGC
57 #include "gc/shenandoah/shenandoahRuntime.hpp"
58 #endif
59 #if INCLUDE_ZGC
60 #include "gc/z/zBarrierSetRuntime.hpp"
61 #endif
62
63 #include <sys/stat.h>
64 #include <errno.h>
65
66 const char* aot_code_entry_kind_name[] = {
67 #define DECL_KIND_STRING(kind) XSTR(kind),
68 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
69 #undef DECL_KIND_STRING
70 };
71
72 static void report_load_failure() {
73 if (AbortVMOnAOTCodeFailure) {
74 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
75 }
76 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
77 AOTAdapterCaching = false;
78 AOTStubCaching = false;
79 }
80
81 static void report_store_failure() {
82 if (AbortVMOnAOTCodeFailure) {
83 tty->print_cr("Unable to create AOT Code Cache.");
84 vm_abort(false);
85 }
86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
87 AOTAdapterCaching = false;
88 AOTStubCaching = false;
89 }
90
91 bool AOTCodeCache::is_dumping_adapter() {
92 return AOTAdapterCaching && is_on_for_dump();
93 }
94
95 bool AOTCodeCache::is_using_adapter() {
96 return AOTAdapterCaching && is_on_for_use();
97 }
98
99 bool AOTCodeCache::is_dumping_stub() {
100 return AOTStubCaching && is_on_for_dump();
101 }
102
103 bool AOTCodeCache::is_using_stub() {
104 return AOTStubCaching && is_on_for_use();
105 }
106
107 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
108 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
109 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
110 // becasue both id and kind are used to find an entry, and that combination should be unique
111 if (kind == AOTCodeEntry::Adapter) {
112 return id;
113 } else if (kind == AOTCodeEntry::SharedBlob) {
114 return id;
115 } else if (kind == AOTCodeEntry::C1Blob) {
116 return (int)SharedStubId::NUM_STUBIDS + id;
117 } else {
118 // kind must be AOTCodeEntry::C2Blob
119 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
120 }
121 }
122
123 static uint _max_aot_code_size = 0;
124 uint AOTCodeCache::max_aot_code_size() {
125 return _max_aot_code_size;
126 }
127
128 void AOTCodeCache::initialize() {
129 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
130 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
131 AOTAdapterCaching = false;
132 AOTStubCaching = false;
133 return;
134 #else
135 if (FLAG_IS_DEFAULT(AOTCache)) {
136 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
137 AOTAdapterCaching = false;
138 AOTStubCaching = false;
139 return; // AOTCache must be specified to dump and use AOT code
140 }
141
142 // Disable stubs caching until JDK-8357398 is fixed.
143 FLAG_SET_ERGO(AOTStubCaching, false);
144
145 if (VerifyOops) {
146 // Disable AOT stubs caching when VerifyOops flag is on.
147 // Verify oops code generated a lot of C strings which overflow
148 // AOT C string table (which has fixed size).
149 // AOT C string table will be reworked later to handle such cases.
150 //
151 // Note: AOT adapters are not affected - they don't have oop operations.
152 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
153 FLAG_SET_ERGO(AOTStubCaching, false);
154 }
155
156 bool is_dumping = false;
157 bool is_using = false;
158 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
159 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
160 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
161 is_dumping = true;
162 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
163 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
164 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
165 is_using = true;
166 } else {
167 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
168 return; // nothing to do
169 }
170 if (!AOTAdapterCaching && !AOTStubCaching) {
171 return; // AOT code caching disabled on command line
172 }
173 _max_aot_code_size = AOTCodeMaxSize;
174 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
175 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
176 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
177 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
178 }
179 }
180 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
181 if (is_using && aot_code_size == 0) {
182 log_info(aot, codecache, init)("AOT Code Cache is empty");
183 return;
184 }
185 if (!open_cache(is_dumping, is_using)) {
186 if (is_using) {
187 report_load_failure();
188 } else {
189 report_store_failure();
190 }
191 return;
192 }
193 if (is_dumping) {
194 FLAG_SET_DEFAULT(ForceUnreachable, true);
195 }
196 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
197 #endif // defined(AMD64) || defined(AARCH64)
198 }
199
200 void AOTCodeCache::init2() {
201 if (!is_on()) {
202 return;
203 }
204 if (!verify_vm_config()) {
205 close();
206 report_load_failure();
207 }
208
209 // initialize the table of external routines so we can save
210 // generated code blobs that reference them
211 init_extrs_table();
212 init_early_stubs_table();
213 }
214
215 AOTCodeCache* AOTCodeCache::_cache = nullptr;
216
217 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
218 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
219 if (cache->failed()) {
220 delete cache;
221 _cache = nullptr;
222 return false;
223 }
224 _cache = cache;
225 return true;
226 }
227
228 void AOTCodeCache::close() {
229 if (is_on()) {
230 delete _cache; // Free memory
231 _cache = nullptr;
232 }
233 }
234
235 #define DATA_ALIGNMENT HeapWordSize
236
237 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
238 _load_header(nullptr),
239 _load_buffer(nullptr),
240 _store_buffer(nullptr),
241 _C_store_buffer(nullptr),
242 _write_position(0),
243 _load_size(0),
244 _store_size(0),
245 _for_use(is_using),
246 _for_dump(is_dumping),
247 _closing(false),
248 _failed(false),
249 _lookup_failed(false),
250 _table(nullptr),
251 _load_entries(nullptr),
252 _search_entries(nullptr),
253 _store_entries(nullptr),
254 _C_strings_buf(nullptr),
255 _store_entries_cnt(0)
256 {
257 // Read header at the begining of cache
258 if (_for_use) {
259 // Read cache
260 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
261 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
262 if (!rs.is_reserved()) {
263 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
264 set_failed();
265 return;
266 }
267 if (!AOTCacheAccess::map_aot_code_region(rs)) {
268 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
269 set_failed();
270 return;
271 }
272
273 _load_size = (uint)load_size;
274 _load_buffer = (char*)rs.base();
275 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
276 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
277
278 _load_header = (Header*)addr(0);
279 if (!_load_header->verify_config(_load_size)) {
280 set_failed();
281 return;
282 }
283 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
284 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
285 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
286 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
287 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
288 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
289
290 // Read strings
291 load_strings();
292 }
293 if (_for_dump) {
294 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
295 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
296 // Entries allocated at the end of buffer in reverse (as on stack).
297 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
298 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
299 }
300 _table = new AOTCodeAddressTable();
301 }
302
303 void AOTCodeCache::init_extrs_table() {
304 AOTCodeAddressTable* table = addr_table();
305 if (table != nullptr) {
306 table->init_extrs();
307 }
308 }
309
310 void AOTCodeCache::init_early_stubs_table() {
311 AOTCodeAddressTable* table = addr_table();
312 if (table != nullptr) {
313 table->init_early_stubs();
314 }
315 }
316
317 void AOTCodeCache::init_shared_blobs_table() {
318 AOTCodeAddressTable* table = addr_table();
319 if (table != nullptr) {
320 table->init_shared_blobs();
321 }
322 }
323
324 void AOTCodeCache::init_early_c1_table() {
325 AOTCodeAddressTable* table = addr_table();
326 if (table != nullptr) {
327 table->init_early_c1();
328 }
329 }
330
331 AOTCodeCache::~AOTCodeCache() {
332 if (_closing) {
333 return; // Already closed
334 }
335 // Stop any further access to cache.
336 _closing = true;
337
338 MutexLocker ml(Compile_lock);
339 if (for_dump()) { // Finalize cache
340 finish_write();
341 }
342 _load_buffer = nullptr;
343 if (_C_store_buffer != nullptr) {
344 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
345 _C_store_buffer = nullptr;
346 _store_buffer = nullptr;
347 }
348 if (_table != nullptr) {
349 delete _table;
350 _table = nullptr;
351 }
352 }
353
354 void AOTCodeCache::Config::record() {
355 _flags = 0;
356 #ifdef ASSERT
357 _flags |= debugVM;
363 _flags |= compressedClassPointers;
364 }
365 if (UseTLAB) {
366 _flags |= useTLAB;
367 }
368 if (JavaAssertions::systemClassDefault()) {
369 _flags |= systemClassAssertions;
370 }
371 if (JavaAssertions::userClassDefault()) {
372 _flags |= userClassAssertions;
373 }
374 if (EnableContended) {
375 _flags |= enableContendedPadding;
376 }
377 if (RestrictContended) {
378 _flags |= restrictContendedPadding;
379 }
380 _compressedOopShift = CompressedOops::shift();
381 _compressedOopBase = CompressedOops::base();
382 _compressedKlassShift = CompressedKlassPointers::shift();
383 _contendedPaddingWidth = ContendedPaddingWidth;
384 _objectAlignment = ObjectAlignmentInBytes;
385 _gc = (uint)Universe::heap()->kind();
386 }
387
388 bool AOTCodeCache::Config::verify() const {
389 #ifdef ASSERT
390 if ((_flags & debugVM) == 0) {
391 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
392 return false;
393 }
394 #else
395 if ((_flags & debugVM) != 0) {
396 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
397 return false;
398 }
399 #endif
400
401 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
402 if (aot_gc != Universe::heap()->kind()) {
430 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
431 return false;
432 }
433 if (_compressedOopShift != (uint)CompressedOops::shift()) {
434 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
435 return false;
436 }
437 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
438 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
439 return false;
440 }
441 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
442 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
443 return false;
444 }
445 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
446 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
447 return false;
448 }
449
450 // This should be the last check as it only disables AOTStubCaching
451 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
452 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
453 AOTStubCaching = false;
454 }
455
456 return true;
457 }
458
459 bool AOTCodeCache::Header::verify_config(uint load_size) const {
460 if (_version != AOT_CODE_VERSION) {
461 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
462 return false;
463 }
464 if (load_size < _cache_size) {
465 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
466 return false;
467 }
468 return true;
469 }
470
471 AOTCodeCache* AOTCodeCache::open_for_use() {
472 if (AOTCodeCache::is_on_for_use()) {
473 return AOTCodeCache::cache();
474 }
475 return nullptr;
476 }
477
478 AOTCodeCache* AOTCodeCache::open_for_dump() {
479 if (AOTCodeCache::is_on_for_dump()) {
480 AOTCodeCache* cache = AOTCodeCache::cache();
481 cache->clear_lookup_failed(); // Reset bit
482 return cache;
483 }
484 return nullptr;
485 }
486
487 void copy_bytes(const char* from, address to, uint size) {
488 assert(size > 0, "sanity");
489 bool by_words = true;
490 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
491 // Use wordwise copies if possible:
492 Copy::disjoint_words((HeapWord*)from,
493 (HeapWord*)to,
494 ((size_t)size + HeapWordSize-1) / HeapWordSize);
495 } else {
496 by_words = false;
497 Copy::conjoint_jbytes(from, to, (size_t)size);
498 }
499 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
500 }
501
502 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
503 _cache = cache;
504 _entry = entry;
505 _load_buffer = cache->cache_buffer();
506 _read_position = 0;
507 _lookup_failed = false;
508 }
509
510 void AOTCodeReader::set_read_position(uint pos) {
511 if (pos == _read_position) {
512 return;
513 }
514 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
515 _read_position = pos;
516 }
517
518 bool AOTCodeCache::set_write_position(uint pos) {
519 if (pos == _write_position) {
520 return true;
521 }
522 if (_store_size < _write_position) {
523 _store_size = _write_position; // Adjust during write
524 }
525 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
526 _write_position = pos;
569 if (nbytes == 0) {
570 return 0;
571 }
572 uint new_position = _write_position + nbytes;
573 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
574 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
575 nbytes, _write_position);
576 set_failed();
577 report_store_failure();
578 return 0;
579 }
580 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
581 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
582 _write_position += nbytes;
583 if (_store_size < _write_position) {
584 _store_size = _write_position;
585 }
586 return nbytes;
587 }
588
589 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
590 return (void*)(cache->add_entry());
591 }
592
593 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
594 if (entry->kind() == kind) {
595 assert(entry->id() == id, "sanity");
596 return true; // Found
597 }
598 return false;
599 }
600
601 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
602 assert(_for_use, "sanity");
603 uint count = _load_header->entries_count();
604 if (_load_entries == nullptr) {
605 // Read it
606 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
607 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
608 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
609 }
610 // Binary search
611 int l = 0;
612 int h = count - 1;
613 while (l <= h) {
614 int mid = (l + h) >> 1;
615 int ix = mid * 2;
616 uint is = _search_entries[ix];
617 if (is == id) {
618 int index = _search_entries[ix + 1];
619 AOTCodeEntry* entry = &(_load_entries[index]);
620 if (check_entry(kind, id, entry)) {
621 return entry; // Found
622 }
623 // Linear search around to handle id collission
624 for (int i = mid - 1; i >= l; i--) { // search back
625 ix = i * 2;
626 is = _search_entries[ix];
627 if (is != id) {
628 break;
629 }
630 index = _search_entries[ix + 1];
631 AOTCodeEntry* entry = &(_load_entries[index]);
632 if (check_entry(kind, id, entry)) {
633 return entry; // Found
634 }
635 }
636 for (int i = mid + 1; i <= h; i++) { // search forward
637 ix = i * 2;
638 is = _search_entries[ix];
639 if (is != id) {
640 break;
641 }
642 index = _search_entries[ix + 1];
643 AOTCodeEntry* entry = &(_load_entries[index]);
644 if (check_entry(kind, id, entry)) {
645 return entry; // Found
646 }
647 }
648 break; // Not found match
649 } else if (is < id) {
650 l = mid + 1;
651 } else {
652 h = mid - 1;
653 }
654 }
655 return nullptr;
656 }
657
658 extern "C" {
659 static int uint_cmp(const void *i, const void *j) {
660 uint a = *(uint *)i;
661 uint b = *(uint *)j;
662 return a > b ? 1 : a < b ? -1 : 0;
663 }
664 }
665
666 bool AOTCodeCache::finish_write() {
667 if (!align_write()) {
668 return false;
669 }
670 uint strings_offset = _write_position;
671 int strings_count = store_strings();
672 if (strings_count < 0) {
673 return false;
674 }
675 if (!align_write()) {
676 return false;
677 }
678 uint strings_size = _write_position - strings_offset;
679
680 uint entries_count = 0; // Number of entrant (useful) code entries
681 uint entries_offset = _write_position;
682
683 uint store_count = _store_entries_cnt;
684 if (store_count > 0) {
685 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
686 uint code_count = store_count;
687 uint search_count = code_count * 2;
688 uint search_size = search_count * sizeof(uint);
689 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
690 // _write_position includes size of code and strings
691 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
692 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
693 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
694
695 // Create ordered search table for entries [id, index];
696 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
697 // Allocate in AOT Cache buffer
698 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
699 char* start = align_up(buffer, DATA_ALIGNMENT);
700 char* current = start + header_size; // Skip header
701
702 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
703 uint adapters_count = 0;
704 uint shared_blobs_count = 0;
705 uint C1_blobs_count = 0;
706 uint C2_blobs_count = 0;
707 uint max_size = 0;
708 // AOTCodeEntry entries were allocated in reverse in store buffer.
709 // Process them in reverse order to cache first code first.
710 for (int i = store_count - 1; i >= 0; i--) {
711 entries_address[i].set_next(nullptr); // clear pointers before storing data
712 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
713 if (size > max_size) {
714 max_size = size;
715 }
716 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
717 entries_address[i].set_offset(current - start); // New offset
718 current += size;
719 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
720 if (n != sizeof(AOTCodeEntry)) {
721 FREE_C_HEAP_ARRAY(uint, search);
722 return false;
723 }
724 search[entries_count*2 + 0] = entries_address[i].id();
725 search[entries_count*2 + 1] = entries_count;
726 entries_count++;
727 AOTCodeEntry::Kind kind = entries_address[i].kind();
728 if (kind == AOTCodeEntry::Adapter) {
729 adapters_count++;
730 } else if (kind == AOTCodeEntry::SharedBlob) {
731 shared_blobs_count++;
732 } else if (kind == AOTCodeEntry::C1Blob) {
733 C1_blobs_count++;
734 } else if (kind == AOTCodeEntry::C2Blob) {
735 C2_blobs_count++;
736 }
737 }
738 if (entries_count == 0) {
739 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
740 FREE_C_HEAP_ARRAY(uint, search);
741 return true; // Nothing to write
742 }
743 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
744 // Write strings
745 if (strings_count > 0) {
746 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
747 strings_offset = (current - start); // New offset
748 current += strings_size;
749 }
750
751 uint new_entries_offset = (current - start); // New offset
752 // Sort and store search table
753 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
754 search_size = 2 * entries_count * sizeof(uint);
755 copy_bytes((const char*)search, (address)current, search_size);
756 FREE_C_HEAP_ARRAY(uint, search);
757 current += search_size;
758
759 // Write entries
760 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
761 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
762 current += entries_size;
763 uint size = (current - start);
764 assert(size <= total_size, "%d > %d", size , total_size);
765
766 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
767 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
768 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
769 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
770 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
771
772 // Finalize header
773 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
774 header->init(size, (uint)strings_count, strings_offset,
775 entries_count, new_entries_offset,
776 adapters_count, shared_blobs_count,
777 C1_blobs_count, C2_blobs_count);
778
779 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
780 }
781 return true;
782 }
783
784 //------------------Store/Load AOT code ----------------------
785
786 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
787 AOTCodeCache* cache = open_for_dump();
788 if (cache == nullptr) {
789 return false;
790 }
791 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
792
793 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
794 return false;
795 }
796 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
797 return false;
798 }
799 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
831 return false;
832 }
833 CodeBlob::archive_blob(&blob, archive_buffer);
834
835 uint reloc_data_size = blob.relocation_size();
836 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
837 if (n != reloc_data_size) {
838 return false;
839 }
840
841 bool has_oop_maps = false;
842 if (blob.oop_maps() != nullptr) {
843 if (!cache->write_oop_map_set(blob)) {
844 return false;
845 }
846 has_oop_maps = true;
847 }
848
849 #ifndef PRODUCT
850 // Write asm remarks
851 if (!cache->write_asm_remarks(blob)) {
852 return false;
853 }
854 if (!cache->write_dbg_strings(blob)) {
855 return false;
856 }
857 #endif /* PRODUCT */
858
859 if (!cache->write_relocations(blob)) {
860 return false;
861 }
862
863 // Write entries offsets
864 n = cache->write_bytes(&entry_offset_count, sizeof(int));
865 if (n != sizeof(int)) {
866 return false;
867 }
868 for (int i = 0; i < entry_offset_count; i++) {
869 uint32_t off = (uint32_t)entry_offsets[i];
870 n = cache->write_bytes(&off, sizeof(uint32_t));
871 if (n != sizeof(uint32_t)) {
872 return false;
873 }
874 }
882
883 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
884 AOTCodeCache* cache = open_for_use();
885 if (cache == nullptr) {
886 return nullptr;
887 }
888 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
889
890 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
891 return nullptr;
892 }
893 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
894 return nullptr;
895 }
896 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
897
898 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
899 if (entry == nullptr) {
900 return nullptr;
901 }
902 AOTCodeReader reader(cache, entry);
903 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
904
905 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
906 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
907 return blob;
908 }
909
910 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
911 uint entry_position = _entry->offset();
912
913 // Read name
914 uint name_offset = entry_position + _entry->name_offset();
915 uint name_size = _entry->name_size(); // Includes '/0'
916 const char* stored_name = addr(name_offset);
917
918 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
919 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
920 stored_name, name);
921 set_lookup_failed(); // Skip this blob
922 return nullptr;
923 }
924
925 // Read archived code blob
926 uint offset = entry_position + _entry->blob_offset();
927 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
928 offset += archived_blob->size();
929
930 address reloc_data = (address)addr(offset);
931 offset += archived_blob->relocation_size();
932 set_read_position(offset);
933
934 ImmutableOopMapSet* oop_maps = nullptr;
935 if (_entry->has_oop_maps()) {
936 oop_maps = read_oop_map_set();
937 }
938
939 #ifndef PRODUCT
940 AsmRemarks asm_remarks;
941 read_asm_remarks(asm_remarks);
942 DbgStrings dbg_strings;
943 read_dbg_strings(dbg_strings);
944 #endif // PRODUCT
945
946 CodeBlob* code_blob = CodeBlob::create(archived_blob,
947 stored_name,
948 reloc_data,
949 oop_maps
950 #ifndef PRODUCT
951 , asm_remarks
952 , dbg_strings
953 #endif
954 );
955 if (code_blob == nullptr) { // no space left in CodeCache
956 return nullptr;
957 }
958
959 fix_relocations(code_blob);
960
961 // Read entries offsets
962 offset = read_position();
963 int stored_count = *(int*)addr(offset);
964 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count);
965 offset += sizeof(int);
966 set_read_position(offset);
967 for (int i = 0; i < stored_count; i++) {
968 uint32_t off = *(uint32_t*)addr(offset);
969 offset += sizeof(uint32_t);
970 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
971 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
972 stored_name, entry_name, _entry->id(), off);
973 entry_offsets[i] = off;
974 }
975
976 #ifdef ASSERT
977 LogStreamHandle(Trace, aot, codecache, stubs) log;
978 if (log.is_enabled()) {
979 FlagSetting fs(PrintRelocations, true);
980 code_blob->print_on(&log);
981 }
982 #endif
983 return code_blob;
984 }
985
986 // ------------ process code and data --------------
987
988 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
989 GrowableArray<uint> reloc_data;
990 RelocIterator iter(&code_blob);
991 LogStreamHandle(Trace, aot, codecache, reloc) log;
992 while (iter.next()) {
993 int idx = reloc_data.append(0); // default value
994 switch (iter.type()) {
995 case relocInfo::none:
996 break;
997 case relocInfo::runtime_call_type: {
998 // Record offset of runtime destination
999 CallRelocation* r = (CallRelocation*)iter.reloc();
1000 address dest = r->destination();
1001 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1002 dest = (address)-1; // do nothing in this case when loading this relocation
1003 }
1004 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
1005 break;
1006 }
1007 case relocInfo::runtime_call_w_cp_type:
1008 fatal("runtime_call_w_cp_type unimplemented");
1009 break;
1010 case relocInfo::external_word_type: {
1011 // Record offset of runtime target
1012 address target = ((external_word_Relocation*)iter.reloc())->target();
1013 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob));
1014 break;
1015 }
1016 case relocInfo::internal_word_type:
1017 break;
1018 case relocInfo::section_word_type:
1019 break;
1020 case relocInfo::post_call_nop_type:
1021 break;
1022 default:
1023 fatal("relocation %d unimplemented", (int)iter.type());
1024 break;
1025 }
1026 if (log.is_enabled()) {
1027 iter.print_current_on(&log);
1028 }
1029 }
1030
1031 // Write additional relocation data: uint per relocation
1032 // Write the count first
1033 int count = reloc_data.length();
1034 write_bytes(&count, sizeof(int));
1035 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1036 iter != reloc_data.end(); ++iter) {
1037 uint value = *iter;
1038 int n = write_bytes(&value, sizeof(uint));
1039 if (n != sizeof(uint)) {
1040 return false;
1041 }
1042 }
1043 return true;
1044 }
1045
1046 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1047 LogStreamHandle(Trace, aot, reloc) log;
1048 uint offset = read_position();
1049 int count = *(int*)addr(offset);
1050 offset += sizeof(int);
1051 if (log.is_enabled()) {
1052 log.print_cr("======== extra relocations count=%d", count);
1053 }
1054 uint* reloc_data = (uint*)addr(offset);
1055 offset += (count * sizeof(uint));
1056 set_read_position(offset);
1057
1058 RelocIterator iter(code_blob);
1059 int j = 0;
1060 while (iter.next()) {
1061 switch (iter.type()) {
1062 case relocInfo::none:
1063 break;
1064 case relocInfo::runtime_call_type: {
1065 address dest = _cache->address_for_id(reloc_data[j]);
1066 if (dest != (address)-1) {
1067 ((CallRelocation*)iter.reloc())->set_destination(dest);
1068 }
1069 break;
1070 }
1071 case relocInfo::runtime_call_w_cp_type:
1072 fatal("runtime_call_w_cp_type unimplemented");
1073 break;
1074 case relocInfo::external_word_type: {
1075 address target = _cache->address_for_id(reloc_data[j]);
1076 // Add external address to global table
1077 int index = ExternalsRecorder::find_index(target);
1078 // Update index in relocation
1079 Relocation::add_jint(iter.data(), index);
1080 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1081 assert(reloc->target() == target, "sanity");
1082 reloc->set_value(target); // Patch address in the code
1083 break;
1084 }
1085 case relocInfo::internal_word_type: {
1086 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1087 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1088 break;
1089 }
1090 case relocInfo::section_word_type: {
1091 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1092 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1093 break;
1094 }
1095 case relocInfo::post_call_nop_type:
1096 break;
1097 default:
1098 fatal("relocation %d unimplemented", (int)iter.type());
1099 break;
1100 }
1101 if (log.is_enabled()) {
1102 iter.print_current_on(&log);
1103 }
1104 j++;
1105 }
1106 assert(j == count, "sanity");
1107 }
1108
1109 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1110 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1111 int oopmaps_size = oopmaps->nr_of_bytes();
1112 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1113 return false;
1114 }
1115 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1116 if (n != (uint)oopmaps->nr_of_bytes()) {
1117 return false;
1118 }
1119 return true;
1120 }
1121
1122 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1123 uint offset = read_position();
1124 int size = *(int *)addr(offset);
1125 offset += sizeof(int);
1126 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1127 offset += size;
1128 set_read_position(offset);
1129 return oopmaps;
1130 }
1131
1132 #ifndef PRODUCT
1133 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1134 // Write asm remarks
1135 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1136 if (count_ptr == nullptr) {
1137 return false;
1138 }
1139 uint count = 0;
1140 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1141 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1142 uint n = write_bytes(&offset, sizeof(uint));
1143 if (n != sizeof(uint)) {
1144 return false;
1145 }
1146 const char* cstr = add_C_string(str);
1147 int id = _table->id_for_C_string((address)cstr);
1148 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1149 n = write_bytes(&id, sizeof(int));
1150 if (n != sizeof(int)) {
1151 return false;
1152 }
1153 count += 1;
1154 return true;
1155 });
1156 *count_ptr = count;
1157 return result;
1158 }
1159
1160 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1161 // Read asm remarks
1162 uint offset = read_position();
1163 uint count = *(uint *)addr(offset);
1164 offset += sizeof(uint);
1165 for (uint i = 0; i < count; i++) {
1166 uint remark_offset = *(uint *)addr(offset);
1167 offset += sizeof(uint);
1168 int remark_string_id = *(uint *)addr(offset);
1169 offset += sizeof(int);
1170 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1171 asm_remarks.insert(remark_offset, remark);
1172 }
1173 set_read_position(offset);
1174 }
1175
1176 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1177 // Write dbg strings
1178 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1179 if (count_ptr == nullptr) {
1180 return false;
1181 }
1182 uint count = 0;
1183 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1184 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1185 const char* cstr = add_C_string(str);
1186 int id = _table->id_for_C_string((address)cstr);
1187 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1188 uint n = write_bytes(&id, sizeof(int));
1189 if (n != sizeof(int)) {
1190 return false;
1191 }
1192 count += 1;
1193 return true;
1194 });
1195 *count_ptr = count;
1196 return result;
1197 }
1198
1199 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1200 // Read dbg strings
1201 uint offset = read_position();
1202 uint count = *(uint *)addr(offset);
1203 offset += sizeof(uint);
1204 for (uint i = 0; i < count; i++) {
1205 int string_id = *(uint *)addr(offset);
1206 offset += sizeof(int);
1207 const char* str = (const char*)_cache->address_for_C_string(string_id);
1208 dbg_strings.insert(str);
1209 }
1210 set_read_position(offset);
1211 }
1212 #endif // PRODUCT
1213
1214 //======================= AOTCodeAddressTable ===============
1215
1216 // address table ids for generated routines, external addresses and C
1217 // string addresses are partitioned into positive integer ranges
1218 // defined by the following positive base and max values
1219 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1220 // [_blobs_base, _blobs_base + _blobs_max -1],
1221 // ...
1222 // [_c_str_base, _c_str_base + _c_str_max -1],
1223
1224 #define _extrs_max 100
1225 #define _stubs_max 3
1226
1227 #define _shared_blobs_max 20
1228 #define _C1_blobs_max 10
1229 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1230 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1231
1232 #define _extrs_base 0
1233 #define _stubs_base (_extrs_base + _extrs_max)
1234 #define _shared_blobs_base (_stubs_base + _stubs_max)
1235 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1236 #define _blobs_end (_shared_blobs_base + _blobs_max)
1237
1238 #define SET_ADDRESS(type, addr) \
1239 { \
1240 type##_addr[type##_length++] = (address) (addr); \
1241 assert(type##_length <= type##_max, "increase size"); \
1242 }
1243
1244 static bool initializing_extrs = false;
1245
1246 void AOTCodeAddressTable::init_extrs() {
1247 if (_extrs_complete || initializing_extrs) return; // Done already
1248
1249 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1250
1251 initializing_extrs = true;
1252 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1253
1254 _extrs_length = 0;
1255
1256 // Record addresses of VM runtime methods
1257 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1258 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1259 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1260 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1261 #if defined(AARCH64) && !defined(ZERO)
1262 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1263 #endif
1264 {
1265 // Required by Shared blobs
1266 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1267 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1268 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1269 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1270 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1271 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1272 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1273 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1274 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1275 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1276 }
1277
1278 #ifdef COMPILER1
1279 {
1280 // Required by C1 blobs
1281 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1282 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1283 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1284 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1285 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1286 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1287 SET_ADDRESS(_extrs, Runtime1::new_instance);
1288 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1289 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1290 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1291 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1292 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1293 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1294 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1295 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1296 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1297 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1298 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1299 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1300 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1301 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1302 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1303 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1304 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1305 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1306 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1307 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1308 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1309 SET_ADDRESS(_extrs, Thread::current);
1310 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1311 #ifndef PRODUCT
1312 SET_ADDRESS(_extrs, os::breakpoint);
1313 #endif
1314 }
1315 #endif
1316
1317 #ifdef COMPILER2
1318 {
1319 // Required by C2 blobs
1320 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1321 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1322 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1323 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1324 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1325 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1326 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1327 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1328 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1329 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1330 #if INCLUDE_JVMTI
1331 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
1332 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
1333 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
1334 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
1335 #endif
1336 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1337 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1338 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1339 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1340 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1341 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1342 #if defined(AARCH64)
1343 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1344 #endif // AARCH64
1345 }
1346 #endif // COMPILER2
1347
1348 #if INCLUDE_G1GC
1349 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
1350 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1351 #endif
1352 #if INCLUDE_SHENANDOAHGC
1353 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
1354 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1355 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1356 #endif
1357 #if INCLUDE_ZGC
1358 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1359 #if defined(AMD64)
1360 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1361 #endif
1362 #endif
1363 #ifndef ZERO
1364 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1365 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1366 #endif
1367 #endif // ZERO
1368
1369 _extrs_complete = true;
1370 log_debug(aot, codecache, init)("External addresses recorded");
1371 }
1372
1373 static bool initializing_early_stubs = false;
1374
1375 void AOTCodeAddressTable::init_early_stubs() {
1376 if (_complete || initializing_early_stubs) return; // Done already
1377 initializing_early_stubs = true;
1378 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1379 _stubs_length = 0;
1380 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1381
1382 {
1383 // Required by C1 blobs
1384 #if defined(AMD64) && !defined(ZERO)
1385 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1386 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1387 #endif // AMD64
1388 }
1389
1390 _early_stubs_complete = true;
1391 log_info(aot, codecache, init)("Early stubs recorded");
1392 }
1393
1394 static bool initializing_shared_blobs = false;
1395
1396 void AOTCodeAddressTable::init_shared_blobs() {
1397 if (_complete || initializing_shared_blobs) return; // Done already
1398 initializing_shared_blobs = true;
1399 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1400 _shared_blobs_addr = blobs_addr;
1401 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1402 _shared_blobs_length = _C1_blobs_length = 0;
1403
1404 // clear the address table
1405 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1406
1407 // Record addresses of generated code blobs
1408 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1409 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1410 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1411 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1412 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1413 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1414 #if INCLUDE_JVMCI
1415 if (EnableJVMCI) {
1416 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1417 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1418 }
1419 #endif
1420
1421 _shared_blobs_complete = true;
1422 log_debug(aot, codecache, init)("Early shared blobs recorded");
1423 _complete = true;
1424 }
1425
1426 void AOTCodeAddressTable::init_early_c1() {
1427 #ifdef COMPILER1
1428 // Runtime1 Blobs
1429 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
1430 C1StubId id = (C1StubId)i;
1431 if (Runtime1::blob_for(id) == nullptr) {
1432 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1433 continue;
1434 }
1435 if (Runtime1::entry_for(id) == nullptr) {
1436 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1437 continue;
1438 }
1439 address entry = Runtime1::entry_for(id);
1440 SET_ADDRESS(_C1_blobs, entry);
1441 }
1442 #endif // COMPILER1
1443 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1444 _early_c1_complete = true;
1445 }
1446
1447 #undef SET_ADDRESS
1448
1449 AOTCodeAddressTable::~AOTCodeAddressTable() {
1450 if (_extrs_addr != nullptr) {
1451 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1452 }
1453 if (_shared_blobs_addr != nullptr) {
1454 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1455 }
1456 }
1457
1458 #ifdef PRODUCT
1459 #define MAX_STR_COUNT 200
1460 #else
1461 #define MAX_STR_COUNT 500
1462 #endif
1463 #define _c_str_max MAX_STR_COUNT
1464 static const int _c_str_base = _all_max;
1465
1466 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1467 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1468 static int _C_strings_count = 0;
1469 static int _C_strings_s[MAX_STR_COUNT] = {0};
1470 static int _C_strings_id[MAX_STR_COUNT] = {0};
1471 static int _C_strings_used = 0;
1472
1603 fatal("AOT Code Cache VM runtime addresses table is not complete");
1604 }
1605 if (idx == -1) {
1606 return (address)-1;
1607 }
1608 uint id = (uint)idx;
1609 // special case for symbols based relative to os::init
1610 if (id > (_c_str_base + _c_str_max)) {
1611 return (address)os::init + idx;
1612 }
1613 if (idx < 0) {
1614 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1615 }
1616 // no need to compare unsigned id against 0
1617 if (/* id >= _extrs_base && */ id < _extrs_length) {
1618 return _extrs_addr[id - _extrs_base];
1619 }
1620 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1621 return _stubs_addr[id - _stubs_base];
1622 }
1623 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1624 return _shared_blobs_addr[id - _shared_blobs_base];
1625 }
1626 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1627 return _C1_blobs_addr[id - _C1_blobs_base];
1628 }
1629 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1630 return address_for_C_string(id - _c_str_base);
1631 }
1632 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1633 return nullptr;
1634 }
1635
1636 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1637 if (!_extrs_complete) {
1638 fatal("AOT Code Cache VM runtime addresses table is not complete");
1639 }
1640 int id = -1;
1641 if (addr == (address)-1) { // Static call stub has jump to itself
1642 return id;
1643 }
1644 // Seach for C string
1645 id = id_for_C_string(addr);
1646 if (id >= 0) {
1647 return id + _c_str_base;
1648 }
1649 if (StubRoutines::contains(addr)) {
1650 // Search in stubs
1651 id = search_address(addr, _stubs_addr, _stubs_length);
1652 if (id < 0) {
1653 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1654 if (desc == nullptr) {
1655 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1656 }
1657 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1658 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1659 } else {
1660 return id + _stubs_base;
1661 }
1662 } else {
1663 CodeBlob* cb = CodeCache::find_blob(addr);
1664 if (cb != nullptr) {
1665 // Search in code blobs
1666 int id_base = _shared_blobs_base;
1667 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1668 if (id < 0) {
1669 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1670 } else {
1671 return id_base + id;
1672 }
1673 } else {
1674 // Search in runtime functions
1675 id = search_address(addr, _extrs_addr, _extrs_length);
1676 if (id < 0) {
1677 ResourceMark rm;
1678 const int buflen = 1024;
1679 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1680 int offset = 0;
1681 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1682 if (offset > 0) {
1683 // Could be address of C string
1684 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1685 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1686 p2i(addr), dist, (const char*)addr);
1687 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1688 return dist;
1689 }
1690 reloc.print_current_on(tty);
1691 code_blob->print_on(tty);
1692 code_blob->print_code_on(tty);
1693 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1694 } else {
1695 reloc.print_current_on(tty);
1696 code_blob->print_on(tty);
1697 code_blob->print_code_on(tty);
1698 os::find(addr, tty);
1699 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1700 }
1701 } else {
1702 return _extrs_base + id;
1703 }
1704 }
1705 }
1706 return id;
1707 }
1708
1709 void AOTCodeCache::print_on(outputStream* st) {
1710 AOTCodeCache* cache = open_for_use();
1711 if (cache != nullptr) {
1712 uint count = cache->_load_header->entries_count();
1713 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
1714 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1715
1716 for (uint i = 0; i < count; i++) {
1717 // Use search_entries[] to order ouput
1718 int index = search_entries[2*i + 1];
1719 AOTCodeEntry* entry = &(load_entries[index]);
1720
1721 uint entry_position = entry->offset();
1722 uint name_offset = entry->name_offset() + entry_position;
1723 const char* saved_name = cache->addr(name_offset);
1724
1725 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u size=%u '%s'",
1726 i, index, entry->kind(), entry->id(), entry->size(), saved_name);
1727 }
1728 } else {
1729 st->print_cr("failed to map code cache");
1730 }
1731 }
|
1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/gcConfig.hpp"
55 #include "logging/logStream.hpp"
56 #include "memory/memoryReserver.hpp"
57 #include "memory/universe.hpp"
58 #include "oops/klass.inline.hpp"
59 #include "oops/method.inline.hpp"
60 #include "oops/trainingData.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "runtime/atomic.hpp"
63 #include "runtime/deoptimization.hpp"
64 #include "runtime/flags/flagSetting.hpp"
65 #include "runtime/globals_extension.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/mutexLocker.hpp"
70 #include "runtime/os.inline.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/stubCodeGenerator.hpp"
73 #include "runtime/stubRoutines.hpp"
74 #include "runtime/timerTrace.hpp"
75 #include "runtime/threadIdentifier.hpp"
76 #include "utilities/copy.hpp"
77 #include "utilities/ostream.hpp"
78 #include "utilities/spinYield.hpp"
79 #ifdef COMPILER1
80 #include "c1/c1_Runtime1.hpp"
81 #include "c1/c1_LIRAssembler.hpp"
82 #include "gc/shared/c1/barrierSetC1.hpp"
83 #include "gc/g1/c1/g1BarrierSetC1.hpp"
84 #if INCLUDE_SHENANDOAHGC
85 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
86 #endif // INCLUDE_SHENANDOAHGC
87 #include "gc/z/c1/zBarrierSetC1.hpp"
88 #endif // COMPILER1
89 #ifdef COMPILER2
90 #include "opto/runtime.hpp"
91 #endif
92 #if INCLUDE_JVMCI
93 #include "jvmci/jvmci.hpp"
94 #endif
95 #if INCLUDE_G1GC
96 #include "gc/g1/g1BarrierSetRuntime.hpp"
97 #endif
98 #if INCLUDE_SHENANDOAHGC
99 #include "gc/shenandoah/shenandoahRuntime.hpp"
100 #endif
101 #if INCLUDE_ZGC
102 #include "gc/z/zBarrierSetRuntime.hpp"
103 #endif
104
105 #include <sys/stat.h>
106 #include <errno.h>
107
108 const char* aot_code_entry_kind_name[] = {
109 #define DECL_KIND_STRING(kind) XSTR(kind),
110 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
111 #undef DECL_KIND_STRING
112 };
113
114 static elapsedTimer _t_totalLoad;
115 static elapsedTimer _t_totalRegister;
116 static elapsedTimer _t_totalFind;
117 static elapsedTimer _t_totalStore;
118
119 static bool enable_timers() {
120 return CITime || log_is_enabled(Info, init);
121 }
122
123 static void report_load_failure() {
124 if (AbortVMOnAOTCodeFailure) {
125 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
126 }
127 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
128 AOTCodeCache::disable_caching();
129 }
130
131 static void report_store_failure() {
132 if (AbortVMOnAOTCodeFailure) {
133 tty->print_cr("Unable to create AOT Code Cache.");
134 vm_abort(false);
135 }
136 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
137 AOTCodeCache::disable_caching();
138 }
139
140 // The sequence of AOT code caching flags and parametters settings.
141 //
142 // 1. The initial AOT code caching flags setting is done
143 // during call to CDSConfig::check_vm_args_consistency().
144 //
145 // 2. The earliest AOT code state check done in compilationPolicy_init()
146 // where we set number of compiler threads for AOT assembly phase.
147 //
148 // 3. We determine presence of AOT code in AOT Cache in
149 // MetaspaceShared::open_static_archive() which is calles
150 // after compilationPolicy_init() but before codeCache_init().
151 //
152 // 4. AOTCodeCache::initialize() is called during universe_init()
153 // and does final AOT state and flags settings.
154 //
155 // 5. Finally AOTCodeCache::init2() is called after universe_init()
156 // when all GC settings are finalized.
157
158 // Next methods determine which action we do with AOT code depending
159 // on phase of AOT process: assembly or production.
160
161 bool AOTCodeCache::is_dumping_adapter() {
162 return AOTAdapterCaching && is_on_for_dump();
163 }
164
165 bool AOTCodeCache::is_using_adapter() {
166 return AOTAdapterCaching && is_on_for_use();
167 }
168
169 bool AOTCodeCache::is_dumping_stub() {
170 return AOTStubCaching && is_on_for_dump();
171 }
172
173 bool AOTCodeCache::is_using_stub() {
174 return AOTStubCaching && is_on_for_use();
175 }
176
177 bool AOTCodeCache::is_dumping_code() {
178 return AOTCodeCaching && is_on_for_dump();
179 }
180
181 bool AOTCodeCache::is_using_code() {
182 return AOTCodeCaching && is_on_for_use();
183 }
184
185 void AOTCodeCache::enable_caching() {
186 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
187 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
188 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
189 }
190
191 void AOTCodeCache::disable_caching() {
192 FLAG_SET_ERGO(AOTCodeCaching, false);
193 FLAG_SET_ERGO(AOTStubCaching, false);
194 FLAG_SET_ERGO(AOTAdapterCaching, false);
195 }
196
197 bool AOTCodeCache::is_caching_enabled() {
198 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
199 }
200
201 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
202 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
203 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
204 // becasue both id and kind are used to find an entry, and that combination should be unique
205 if (kind == AOTCodeEntry::Adapter) {
206 return id;
207 } else if (kind == AOTCodeEntry::SharedBlob) {
208 return id;
209 } else if (kind == AOTCodeEntry::C1Blob) {
210 return (int)SharedStubId::NUM_STUBIDS + id;
211 } else {
212 // kind must be AOTCodeEntry::C2Blob
213 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
214 }
215 }
216
217 static uint _max_aot_code_size = 0;
218 uint AOTCodeCache::max_aot_code_size() {
219 return _max_aot_code_size;
220 }
221
222 bool AOTCodeCache::is_C3_on() {
223 #if INCLUDE_JVMCI
224 if (UseJVMCICompiler) {
225 return (AOTCodeCaching) && UseC2asC3;
226 }
227 #endif
228 return false;
229 }
230
231 bool AOTCodeCache::is_code_load_thread_on() {
232 // We cannot trust AOTCodeCache status here, due to bootstrapping circularity.
233 // Compilation policy init runs before AOT cache is fully initialized, so the
234 // normal AOT cache status check would always fail.
235 // See: https://bugs.openjdk.org/browse/JDK-8358690
236 // return UseCodeLoadThread && is_using_code();
237 return UseCodeLoadThread && AOTCodeCaching && CDSConfig::is_using_archive();
238 }
239
240 bool AOTCodeCache::allow_const_field(ciConstant& value) {
241 return !is_on() || !is_dumping_code() // Restrict only when we generate cache
242 // Can not trust primitive too || !is_reference_type(value.basic_type())
243 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
244 ;
245 }
246
247 // It is called from MetaspaceShared::initialize_shared_spaces()
248 // which is called from universe_init().
249 // At this point all AOT class linking seetings are finilized
250 // and AOT cache is open so we can map AOT code region.
251 void AOTCodeCache::initialize() {
252 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
253 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
254 disable_caching();
255 return;
256 #else
257 if (FLAG_IS_DEFAULT(AOTCache)) {
258 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
259 disable_caching();
260 return; // AOTCache must be specified to dump and use AOT code
261 }
262
263 // Disable stubs caching until JDK-8357398 is fixed.
264 FLAG_SET_ERGO(AOTStubCaching, false);
265
266 if (VerifyOops) {
267 // Disable AOT stubs caching when VerifyOops flag is on.
268 // Verify oops code generated a lot of C strings which overflow
269 // AOT C string table (which has fixed size).
270 // AOT C string table will be reworked later to handle such cases.
271 //
272 // Note: AOT adapters are not affected - they don't have oop operations.
273 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
274 FLAG_SET_ERGO(AOTStubCaching, false);
275 }
276
277 bool is_dumping = false;
278 bool is_using = false;
279 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
280 enable_caching();
281 is_dumping = is_caching_enabled();
282 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
283 enable_caching();
284 is_using = is_caching_enabled();
285 } else {
286 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
287 disable_caching();
288 return; // nothing to do
289 }
290 if (!(is_dumping || is_using)) {
291 disable_caching();
292 return; // AOT code caching disabled on command line
293 }
294 if (AOTCodeCaching) {
295 if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
296 FLAG_SET_ERGO(ClassInitBarrierMode, 1);
297 }
298 } else if (ClassInitBarrierMode > 0) {
299 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOTCodeCaching is false.");
300 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
301 }
302 // Reserve AOT Cache region when we dumping AOT code.
303 _max_aot_code_size = AOTCodeMaxSize;
304 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
305 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
306 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
307 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
308 }
309 }
310 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
311 if (is_using && aot_code_size == 0) {
312 log_info(aot, codecache, init)("AOT Code Cache is empty");
313 disable_caching();
314 return;
315 }
316 if (!open_cache(is_dumping, is_using)) {
317 if (is_using) {
318 report_load_failure();
319 } else {
320 report_store_failure();
321 }
322 return;
323 }
324 if (is_dumping) {
325 FLAG_SET_DEFAULT(FoldStableValues, false);
326 FLAG_SET_DEFAULT(ForceUnreachable, true);
327 }
328 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
329 #endif // defined(AMD64) || defined(AARCH64)
330 }
331
332 // It is called after universe_init() when all GC settings are finalized.
333 void AOTCodeCache::init2() {
334 if (!is_on()) {
335 return;
336 }
337 // After Universe initialized
338 BarrierSet* bs = BarrierSet::barrier_set();
339 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
340 address byte_map_base = ci_card_table_address_as<address>();
341 if (is_on_for_dump() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
342 // Bail out since we can't encode card table base address with relocation
343 log_warning(aot, codecache, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
344 close();
345 report_load_failure();
346 return;
347 }
348 }
349 if (!verify_vm_config()) {
350 close();
351 report_load_failure();
352 return;
353 }
354
355 // initialize aot runtime constants as appropriate to this runtime
356 AOTRuntimeConstants::initialize_from_runtime();
357 // initialize the table of external routines so we can save
358 // generated code blobs that reference them
359 init_extrs_table();
360 // initialize the table of initial stubs so we can save
361 // generated code blobs that reference them
362 init_early_stubs_table();
363 }
364
365 AOTCodeCache* AOTCodeCache::_cache = nullptr;
366
367 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
368 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
369 if (cache->failed()) {
370 delete cache;
371 _cache = nullptr;
372 return false;
373 }
374 _cache = cache;
375 return true;
376 }
377
378 static void print_helper(nmethod* nm, outputStream* st) {
379 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
380 if (e->method() == nm->method()) {
381 ResourceMark rm;
382 stringStream ss;
383 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
384 ss.print("[%s%s%s]",
385 (e->is_loaded() ? "L" : ""),
386 (e->load_fail() ? "F" : ""),
387 (e->not_entrant() ? "I" : ""));
388 ss.print("#%d", e->comp_id());
389
390 st->print(" %s", ss.freeze());
391 }
392 });
393 }
394
395 void AOTCodeCache::close() {
396 if (is_on()) {
397 delete _cache; // Free memory
398 _cache = nullptr;
399 }
400 }
401
402 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
403 public:
404 uint _aot_code_size;
405 char* _aot_code_data;
406
407 void set_aot_code_data(uint size, char* aot_data) {
408 _aot_code_size = size;
409 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
410 }
411
412 static CachedCodeDirectory* create();
413 };
414
415 // Storing AOT code in the cached code region of AOT Cache:
416 //
417 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
418 // E.g., you can build a hashtable to record what methods have been archived.
419 //
420 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
421 // allocated using AOTCacheAccess::allocate_aot_code_region().
422 //
423 // [3] CachedCodeDirectory must be the very first allocation.
424 //
425 // [4] Two kinds of pointer can be stored:
426 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
427 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
428 // (It's OK to point to an interior location within this buffer).
429 // Such pointers must be stored using AOTCacheAccess::set_pointer()
430 //
431 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
432 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
433 // (e.g., to account for the runtime location of the CodeCache).
434 //
435 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
436 static CachedCodeDirectory* _aot_code_directory = nullptr;
437
438 CachedCodeDirectory* CachedCodeDirectory::create() {
439 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
440 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
441 dir->dumptime_init_internal();
442 return dir;
443 }
444
445 #define DATA_ALIGNMENT HeapWordSize
446
447 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
448 _load_header(nullptr),
449 _load_buffer(nullptr),
450 _store_buffer(nullptr),
451 _C_store_buffer(nullptr),
452 _write_position(0),
453 _load_size(0),
454 _store_size(0),
455 _for_use(is_using),
456 _for_dump(is_dumping),
457 _closing(false),
458 _failed(false),
459 _lookup_failed(false),
460 _for_preload(false),
461 _gen_preload_code(false),
462 _has_clinit_barriers(false),
463 _table(nullptr),
464 _load_entries(nullptr),
465 _search_entries(nullptr),
466 _store_entries(nullptr),
467 _C_strings_buf(nullptr),
468 _store_entries_cnt(0),
469 _compile_id(0),
470 _comp_level(0)
471 {
472 // Read header at the begining of cache
473 if (_for_use) {
474 // Read cache
475 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
476 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
477 if (!rs.is_reserved()) {
478 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
479 set_failed();
480 return;
481 }
482 if (!AOTCacheAccess::map_aot_code_region(rs)) {
483 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
484 set_failed();
485 return;
486 }
487 _aot_code_directory = (CachedCodeDirectory*)rs.base();
488 _aot_code_directory->runtime_init_internal();
489
490 _load_size = _aot_code_directory->_aot_code_size;
491 _load_buffer = _aot_code_directory->_aot_code_data;
492 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
493 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
494
495 _load_header = (AOTCodeCache::Header*)addr(0);
496 if (!_load_header->verify_config(_load_size)) {
497 set_failed();
498 return;
499 }
500 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
501 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
502 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
503 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
504 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
505 log_debug(aot, codecache, init)(" Stubs: total=%u", _load_header->stubs_count());
506 log_debug(aot, codecache, init)(" Nmethods: total=%u", _load_header->nmethods_count());
507 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
508
509 // Read strings
510 load_strings();
511 }
512 if (_for_dump) {
513 _gen_preload_code = (ClassInitBarrierMode > 0);
514
515 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
516 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
517 // Entries allocated at the end of buffer in reverse (as on stack).
518 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
519 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
520 }
521 _table = new AOTCodeAddressTable();
522 }
523
524 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
525 // This could be concurent execution
526 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
527 _cache->invalidate_entry(entry);
528 }
529 }
530
531 bool AOTCodeCache::is_loaded(AOTCodeEntry* entry) {
532 if (is_on() && _cache->cache_buffer() != nullptr) {
533 return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
534 }
535 return false;
536 }
537
538 void AOTCodeCache::init_extrs_table() {
539 AOTCodeAddressTable* table = addr_table();
540 if (table != nullptr) {
541 table->init_extrs();
542 }
543 }
544
545 void AOTCodeCache::init_early_stubs_table() {
546 AOTCodeAddressTable* table = addr_table();
547 if (table != nullptr) {
548 table->init_early_stubs();
549 }
550 }
551
552 void AOTCodeCache::init_shared_blobs_table() {
553 AOTCodeAddressTable* table = addr_table();
554 if (table != nullptr) {
555 table->init_shared_blobs();
556 }
557 }
558
559 void AOTCodeCache::init_stubs_table() {
560 AOTCodeAddressTable* table = addr_table();
561 if (table != nullptr) {
562 table->init_stubs();
563 }
564 }
565
566 void AOTCodeCache::init_early_c1_table() {
567 AOTCodeAddressTable* table = addr_table();
568 if (table != nullptr) {
569 table->init_early_c1();
570 }
571 }
572
573 void AOTCodeCache::init_c1_table() {
574 AOTCodeAddressTable* table = addr_table();
575 if (table != nullptr) {
576 table->init_c1();
577 }
578 }
579
580 void AOTCodeCache::init_c2_table() {
581 AOTCodeAddressTable* table = addr_table();
582 if (table != nullptr) {
583 table->init_c2();
584 }
585 }
586
587 AOTCodeCache::~AOTCodeCache() {
588 if (_closing) {
589 return; // Already closed
590 }
591 // Stop any further access to cache.
592 // Checked on entry to load_nmethod() and store_nmethod().
593 _closing = true;
594 if (_for_use) {
595 // Wait for all load_nmethod() finish.
596 wait_for_no_nmethod_readers();
597 }
598 // Prevent writing code into cache while we are closing it.
599 // This lock held by ciEnv::register_method() which calls store_nmethod().
600 MutexLocker ml(Compile_lock);
601 if (for_dump()) { // Finalize cache
602 finish_write();
603 }
604 _load_buffer = nullptr;
605 if (_C_store_buffer != nullptr) {
606 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
607 _C_store_buffer = nullptr;
608 _store_buffer = nullptr;
609 }
610 if (_table != nullptr) {
611 delete _table;
612 _table = nullptr;
613 }
614 }
615
616 void AOTCodeCache::Config::record() {
617 _flags = 0;
618 #ifdef ASSERT
619 _flags |= debugVM;
625 _flags |= compressedClassPointers;
626 }
627 if (UseTLAB) {
628 _flags |= useTLAB;
629 }
630 if (JavaAssertions::systemClassDefault()) {
631 _flags |= systemClassAssertions;
632 }
633 if (JavaAssertions::userClassDefault()) {
634 _flags |= userClassAssertions;
635 }
636 if (EnableContended) {
637 _flags |= enableContendedPadding;
638 }
639 if (RestrictContended) {
640 _flags |= restrictContendedPadding;
641 }
642 _compressedOopShift = CompressedOops::shift();
643 _compressedOopBase = CompressedOops::base();
644 _compressedKlassShift = CompressedKlassPointers::shift();
645 _compressedKlassBase = CompressedKlassPointers::base();
646 _contendedPaddingWidth = ContendedPaddingWidth;
647 _objectAlignment = ObjectAlignmentInBytes;
648 _gc = (uint)Universe::heap()->kind();
649 }
650
651 bool AOTCodeCache::Config::verify() const {
652 #ifdef ASSERT
653 if ((_flags & debugVM) == 0) {
654 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
655 return false;
656 }
657 #else
658 if ((_flags & debugVM) != 0) {
659 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
660 return false;
661 }
662 #endif
663
664 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
665 if (aot_gc != Universe::heap()->kind()) {
693 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
694 return false;
695 }
696 if (_compressedOopShift != (uint)CompressedOops::shift()) {
697 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
698 return false;
699 }
700 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
701 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
702 return false;
703 }
704 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
705 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
706 return false;
707 }
708 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
709 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
710 return false;
711 }
712
713 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
714 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
715 return false;
716 }
717
718 // This should be the last check as it only disables AOTStubCaching
719 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
720 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
721 return false;
722 }
723
724 return true;
725 }
726
727 bool AOTCodeCache::Header::verify_config(uint load_size) const {
728 if (_version != AOT_CODE_VERSION) {
729 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
730 return false;
731 }
732 if (load_size < _cache_size) {
733 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
734 return false;
735 }
736 return true;
737 }
738
739 volatile int AOTCodeCache::_nmethod_readers = 0;
740
741 AOTCodeCache* AOTCodeCache::open_for_use() {
742 if (AOTCodeCache::is_on_for_use()) {
743 return AOTCodeCache::cache();
744 }
745 return nullptr;
746 }
747
748 AOTCodeCache* AOTCodeCache::open_for_dump() {
749 if (AOTCodeCache::is_on_for_dump()) {
750 AOTCodeCache* cache = AOTCodeCache::cache();
751 cache->clear_lookup_failed(); // Reset bit
752 return cache;
753 }
754 return nullptr;
755 }
756
757 bool AOTCodeCache::is_address_in_aot_cache(address p) {
758 AOTCodeCache* cache = open_for_use();
759 if (cache == nullptr) {
760 return false;
761 }
762 if ((p >= (address)cache->cache_buffer()) &&
763 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
764 return true;
765 }
766 return false;
767 }
768
769 static void copy_bytes(const char* from, address to, uint size) {
770 assert(size > 0, "sanity");
771 bool by_words = true;
772 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
773 // Use wordwise copies if possible:
774 Copy::disjoint_words((HeapWord*)from,
775 (HeapWord*)to,
776 ((size_t)size + HeapWordSize-1) / HeapWordSize);
777 } else {
778 by_words = false;
779 Copy::conjoint_jbytes(from, to, (size_t)size);
780 }
781 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
782 }
783
784 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
785 _cache = cache;
786 _entry = entry;
787 _load_buffer = cache->cache_buffer();
788 _read_position = 0;
789 if (task != nullptr) {
790 _compile_id = task->compile_id();
791 _comp_level = task->comp_level();
792 _preload = task->preload();
793 } else {
794 _compile_id = 0;
795 _comp_level = 0;
796 _preload = false;
797 }
798 _lookup_failed = false;
799 }
800
801 void AOTCodeReader::set_read_position(uint pos) {
802 if (pos == _read_position) {
803 return;
804 }
805 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
806 _read_position = pos;
807 }
808
809 bool AOTCodeCache::set_write_position(uint pos) {
810 if (pos == _write_position) {
811 return true;
812 }
813 if (_store_size < _write_position) {
814 _store_size = _write_position; // Adjust during write
815 }
816 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
817 _write_position = pos;
860 if (nbytes == 0) {
861 return 0;
862 }
863 uint new_position = _write_position + nbytes;
864 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
865 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
866 nbytes, _write_position);
867 set_failed();
868 report_store_failure();
869 return 0;
870 }
871 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
872 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
873 _write_position += nbytes;
874 if (_store_size < _write_position) {
875 _store_size = _write_position;
876 }
877 return nbytes;
878 }
879
880 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
881 switch (comp_level) {
882 case CompLevel_simple:
883 if ((DisableCachedCode & (1 << 0)) != 0) {
884 return nullptr;
885 }
886 break;
887 case CompLevel_limited_profile:
888 if ((DisableCachedCode & (1 << 1)) != 0) {
889 return nullptr;
890 }
891 break;
892 case CompLevel_full_optimization:
893 if ((DisableCachedCode & (1 << 2)) != 0) {
894 return nullptr;
895 }
896 break;
897
898 default: return nullptr; // Level 1, 2, and 4 only
899 }
900 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
901 if (is_on() && _cache->cache_buffer() != nullptr) {
902 ResourceMark rm;
903 const char* target_name = method->name_and_sig_as_C_string();
904 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
905 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Code, hash, comp_level);
906 if (entry == nullptr) {
907 log_info(aot, codecache, nmethod)("Missing entry for '%s' (comp_level %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, hash);
908 #ifdef ASSERT
909 } else {
910 uint name_offset = entry->offset() + entry->name_offset();
911 uint name_size = entry->name_size(); // Includes '/0'
912 const char* name = _cache->cache_buffer() + name_offset;
913 if (strncmp(target_name, name, name_size) != 0) {
914 assert(false, "AOTCodeCache: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
915 }
916 #endif
917 }
918
919 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
920 if (directives->IgnorePrecompiledOption) {
921 LogStreamHandle(Info, aot, codecache, compilation) log;
922 if (log.is_enabled()) {
923 log.print("Ignore cached code entry on level %d for ", comp_level);
924 method->print_value_on(&log);
925 }
926 return nullptr;
927 }
928
929 return entry;
930 }
931 return nullptr;
932 }
933
934 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
935 return (void*)(cache->add_entry());
936 }
937
938 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
939 if (entry->kind() == kind) {
940 assert(entry->id() == id, "sanity");
941 if (kind != AOTCodeEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
942 (entry->comp_level() == comp_level))) {
943 return true; // Found
944 }
945 }
946 return false;
947 }
948
949 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
950 assert(_for_use, "sanity");
951 uint count = _load_header->entries_count();
952 if (_load_entries == nullptr) {
953 // Read it
954 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
955 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
956 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
957 }
958 // Binary search
959 int l = 0;
960 int h = count - 1;
961 while (l <= h) {
962 int mid = (l + h) >> 1;
963 int ix = mid * 2;
964 uint is = _search_entries[ix];
965 if (is == id) {
966 int index = _search_entries[ix + 1];
967 AOTCodeEntry* entry = &(_load_entries[index]);
968 if (check_entry(kind, id, comp_level, entry)) {
969 return entry; // Found
970 }
971 // Leaner search around
972 for (int i = mid - 1; i >= l; i--) { // search back
973 ix = i * 2;
974 is = _search_entries[ix];
975 if (is != id) {
976 break;
977 }
978 index = _search_entries[ix + 1];
979 AOTCodeEntry* entry = &(_load_entries[index]);
980 if (check_entry(kind, id, comp_level, entry)) {
981 return entry; // Found
982 }
983 }
984 for (int i = mid + 1; i <= h; i++) { // search forward
985 ix = i * 2;
986 is = _search_entries[ix];
987 if (is != id) {
988 break;
989 }
990 index = _search_entries[ix + 1];
991 AOTCodeEntry* entry = &(_load_entries[index]);
992 if (check_entry(kind, id, comp_level, entry)) {
993 return entry; // Found
994 }
995 }
996 break; // No match found
997 } else if (is < id) {
998 l = mid + 1;
999 } else {
1000 h = mid - 1;
1001 }
1002 }
1003 return nullptr;
1004 }
1005
1006 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1007 assert(entry!= nullptr, "all entries should be read already");
1008 if (entry->not_entrant()) {
1009 return; // Someone invalidated it already
1010 }
1011 #ifdef ASSERT
1012 bool found = false;
1013 if (_for_use) {
1014 uint count = _load_header->entries_count();
1015 uint i = 0;
1016 for(; i < count; i++) {
1017 if (entry == &(_load_entries[i])) {
1018 break;
1019 }
1020 }
1021 found = (i < count);
1022 }
1023 if (!found && _for_dump) {
1024 uint count = _store_entries_cnt;
1025 uint i = 0;
1026 for(; i < count; i++) {
1027 if (entry == &(_store_entries[i])) {
1028 break;
1029 }
1030 }
1031 found = (i < count);
1032 }
1033 assert(found, "entry should exist");
1034 #endif
1035 entry->set_not_entrant();
1036 {
1037 uint name_offset = entry->offset() + entry->name_offset();
1038 const char* name;
1039 if (AOTCodeCache::is_loaded(entry)) {
1040 name = _load_buffer + name_offset;
1041 } else {
1042 name = _store_buffer + name_offset;
1043 }
1044 uint level = entry->comp_level();
1045 uint comp_id = entry->comp_id();
1046 bool clinit_brs = entry->has_clinit_barriers();
1047 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s)",
1048 name, comp_id, level, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1049 }
1050 if (entry->next() != nullptr) {
1051 entry = entry->next();
1052 assert(entry->has_clinit_barriers(), "expecting only such entries here");
1053 invalidate_entry(entry);
1054 }
1055 }
1056
1057 void AOTCodeEntry::update_method_for_writing() {
1058 if (_method != nullptr) {
1059 _method_offset = AOTCacheAccess::delta_from_base_address((address)_method);
1060 _method = nullptr;
1061 }
1062 }
1063
1064 static int uint_cmp(const void *i, const void *j) {
1065 uint a = *(uint *)i;
1066 uint b = *(uint *)j;
1067 return a > b ? 1 : a < b ? -1 : 0;
1068 }
1069
1070 bool AOTCodeCache::finish_write() {
1071 if (!align_write()) {
1072 return false;
1073 }
1074 uint strings_offset = _write_position;
1075 int strings_count = store_strings();
1076 if (strings_count < 0) {
1077 return false;
1078 }
1079 if (!align_write()) {
1080 return false;
1081 }
1082 uint strings_size = _write_position - strings_offset;
1083
1084 uint entries_count = 0; // Number of entrant (useful) code entries
1085 uint entries_offset = _write_position;
1086
1087 uint store_count = _store_entries_cnt;
1088 if (store_count > 0) {
1089 _aot_code_directory = CachedCodeDirectory::create();
1090 assert(_aot_code_directory != nullptr, "Sanity check");
1091
1092 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1093 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1094 uint code_count = store_count + load_count;
1095 uint search_count = code_count * 2;
1096 uint search_size = search_count * sizeof(uint);
1097 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1098 uint preload_entries_cnt = 0;
1099 uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1100 uint preload_entries_size = code_count * sizeof(uint);
1101 // _write_position should include code and strings
1102 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1103 uint total_size = _write_position + _load_size + header_size +
1104 code_alignment + search_size + preload_entries_size + entries_size;
1105 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1106
1107
1108 // Create ordered search table for entries [id, index];
1109 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1110 // Allocate in AOT Cache buffer
1111 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1112 char* start = align_up(buffer, DATA_ALIGNMENT);
1113 char* current = start + header_size; // Skip header
1114
1115 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1116 uint adapters_count = 0;
1117 uint shared_blobs_count = 0;
1118 uint C1_blobs_count = 0;
1119 uint C2_blobs_count = 0;
1120 uint stubs_count = 0;
1121 uint nmethods_count = 0;
1122 uint max_size = 0;
1123 // Add old entries first
1124 if (_for_use && (_load_header != nullptr)) {
1125 for(uint i = 0; i < load_count; i++) {
1126 AOTCodeEntry* entry = &(_load_entries[i]);
1127 if (entry->load_fail()) {
1128 continue;
1129 }
1130 if (entry->not_entrant()) {
1131 log_info(aot, codecache, exit)("Not entrant load entry id: %d, hash: " UINT32_FORMAT_X_0, i, entry->id());
1132 if (entry->for_preload()) {
1133 // Skip not entrant preload code:
1134 // we can't pre-load code which may have failing dependencies.
1135 continue;
1136 }
1137 entry->set_entrant(); // Reset
1138 } else if (entry->for_preload() && entry->method() != nullptr) {
1139 // record entrant first version code for pre-loading
1140 preload_entries[preload_entries_cnt++] = entries_count;
1141 }
1142 {
1143 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1144 if (size > max_size) {
1145 max_size = size;
1146 }
1147 copy_bytes((_load_buffer + entry->offset()), (address)current, size);
1148 entry->set_offset(current - start); // New offset
1149 current += size;
1150 uint n = write_bytes(entry, sizeof(AOTCodeEntry));
1151 if (n != sizeof(AOTCodeEntry)) {
1152 FREE_C_HEAP_ARRAY(uint, search);
1153 return false;
1154 }
1155 search[entries_count*2 + 0] = entry->id();
1156 search[entries_count*2 + 1] = entries_count;
1157 entries_count++;
1158 AOTCodeEntry::Kind kind = entry->kind();
1159 if (kind == AOTCodeEntry::Adapter) {
1160 adapters_count++;
1161 } else if (kind == AOTCodeEntry::SharedBlob) {
1162 shared_blobs_count++;
1163 } else if (kind == AOTCodeEntry::C1Blob) {
1164 C1_blobs_count++;
1165 } else if (kind == AOTCodeEntry::C2Blob) {
1166 C2_blobs_count++;
1167 } else if (kind == AOTCodeEntry::Stub) {
1168 stubs_count++;
1169 } else {
1170 assert(kind == AOTCodeEntry::Code, "sanity");
1171 nmethods_count++;
1172 }
1173 }
1174 }
1175 }
1176 // AOTCodeEntry entries were allocated in reverse in store buffer.
1177 // Process them in reverse order to cache first code first.
1178 for (int i = store_count - 1; i >= 0; i--) {
1179 AOTCodeEntry* entry = &entries_address[i];
1180 if (entry->load_fail()) {
1181 continue;
1182 }
1183 if (entry->not_entrant()) {
1184 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1185 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1186 if (entry->for_preload()) {
1187 // Skip not entrant preload code:
1188 // we can't pre-load code which may have failing dependencies.
1189 continue;
1190 }
1191 entry->set_entrant(); // Reset
1192 } else if (entry->for_preload() && entry->method() != nullptr) {
1193 // record entrant first version code for pre-loading
1194 preload_entries[preload_entries_cnt++] = entries_count;
1195 }
1196 {
1197 entry->set_next(nullptr); // clear pointers before storing data
1198 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1199 if (size > max_size) {
1200 max_size = size;
1201 }
1202 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1203 entry->set_offset(current - start); // New offset
1204 entry->update_method_for_writing();
1205 current += size;
1206 uint n = write_bytes(entry, sizeof(AOTCodeEntry));
1207 if (n != sizeof(AOTCodeEntry)) {
1208 FREE_C_HEAP_ARRAY(uint, search);
1209 return false;
1210 }
1211 search[entries_count*2 + 0] = entry->id();
1212 search[entries_count*2 + 1] = entries_count;
1213 entries_count++;
1214 AOTCodeEntry::Kind kind = entry->kind();
1215 if (kind == AOTCodeEntry::Adapter) {
1216 adapters_count++;
1217 } else if (kind == AOTCodeEntry::SharedBlob) {
1218 shared_blobs_count++;
1219 } else if (kind == AOTCodeEntry::C1Blob) {
1220 C1_blobs_count++;
1221 } else if (kind == AOTCodeEntry::C2Blob) {
1222 C2_blobs_count++;
1223 } else if (kind == AOTCodeEntry::Stub) {
1224 stubs_count++;
1225 } else {
1226 assert(kind == AOTCodeEntry::Code, "sanity");
1227 nmethods_count++;
1228 }
1229 }
1230 }
1231
1232 if (entries_count == 0) {
1233 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
1234 FREE_C_HEAP_ARRAY(uint, search);
1235 return true; // Nothing to write
1236 }
1237 assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1238 // Write strings
1239 if (strings_count > 0) {
1240 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1241 strings_offset = (current - start); // New offset
1242 current += strings_size;
1243 }
1244 uint preload_entries_offset = (current - start);
1245 preload_entries_size = preload_entries_cnt * sizeof(uint);
1246 if (preload_entries_size > 0) {
1247 copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1248 current += preload_entries_size;
1249 log_info(aot, codecache, exit)("Wrote %d preload entries to AOT Code Cache", preload_entries_cnt);
1250 }
1251 if (preload_entries != nullptr) {
1252 FREE_C_HEAP_ARRAY(uint, preload_entries);
1253 }
1254
1255 uint new_entries_offset = (current - start); // New offset
1256 // Sort and store search table
1257 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1258 search_size = 2 * entries_count * sizeof(uint);
1259 copy_bytes((const char*)search, (address)current, search_size);
1260 FREE_C_HEAP_ARRAY(uint, search);
1261 current += search_size;
1262
1263 // Write entries
1264 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1265 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1266 current += entries_size;
1267
1268 log_stats_on_exit();
1269
1270 uint size = (current - start);
1271 assert(size <= total_size, "%d > %d", size , total_size);
1272 uint blobs_count = shared_blobs_count + C1_blobs_count + C2_blobs_count;
1273 assert(nmethods_count == (entries_count - (stubs_count + blobs_count + adapters_count)), "sanity");
1274 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
1275 log_debug(aot, codecache, exit)(" Shared Blobs: total=%u", shared_blobs_count);
1276 log_debug(aot, codecache, exit)(" C1 Blobs: total=%u", C1_blobs_count);
1277 log_debug(aot, codecache, exit)(" C2 Blobs: total=%u", C2_blobs_count);
1278 log_debug(aot, codecache, exit)(" Stubs: total=%u", stubs_count);
1279 log_debug(aot, codecache, exit)(" Nmethods: total=%u", nmethods_count);
1280 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1281
1282 // Finalize header
1283 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1284 header->init(size, (uint)strings_count, strings_offset,
1285 entries_count, new_entries_offset,
1286 preload_entries_cnt, preload_entries_offset,
1287 adapters_count, shared_blobs_count,
1288 C1_blobs_count, C2_blobs_count, stubs_count);
1289
1290 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1291
1292 _aot_code_directory->set_aot_code_data(size, start);
1293 }
1294 return true;
1295 }
1296
1297 //------------------Store/Load AOT code ----------------------
1298
1299 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1300 AOTCodeCache* cache = open_for_dump();
1301 if (cache == nullptr) {
1302 return false;
1303 }
1304 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1305
1306 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1307 return false;
1308 }
1309 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1310 return false;
1311 }
1312 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1344 return false;
1345 }
1346 CodeBlob::archive_blob(&blob, archive_buffer);
1347
1348 uint reloc_data_size = blob.relocation_size();
1349 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1350 if (n != reloc_data_size) {
1351 return false;
1352 }
1353
1354 bool has_oop_maps = false;
1355 if (blob.oop_maps() != nullptr) {
1356 if (!cache->write_oop_map_set(blob)) {
1357 return false;
1358 }
1359 has_oop_maps = true;
1360 }
1361
1362 #ifndef PRODUCT
1363 // Write asm remarks
1364 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1365 return false;
1366 }
1367 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1368 return false;
1369 }
1370 #endif /* PRODUCT */
1371
1372 if (!cache->write_relocations(blob)) {
1373 return false;
1374 }
1375
1376 // Write entries offsets
1377 n = cache->write_bytes(&entry_offset_count, sizeof(int));
1378 if (n != sizeof(int)) {
1379 return false;
1380 }
1381 for (int i = 0; i < entry_offset_count; i++) {
1382 uint32_t off = (uint32_t)entry_offsets[i];
1383 n = cache->write_bytes(&off, sizeof(uint32_t));
1384 if (n != sizeof(uint32_t)) {
1385 return false;
1386 }
1387 }
1395
1396 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1397 AOTCodeCache* cache = open_for_use();
1398 if (cache == nullptr) {
1399 return nullptr;
1400 }
1401 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1402
1403 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1404 return nullptr;
1405 }
1406 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1407 return nullptr;
1408 }
1409 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1410
1411 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1412 if (entry == nullptr) {
1413 return nullptr;
1414 }
1415 AOTCodeReader reader(cache, entry, nullptr);
1416 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
1417
1418 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1419 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1420 return blob;
1421 }
1422
1423 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
1424 uint entry_position = _entry->offset();
1425
1426 // Read name
1427 uint name_offset = entry_position + _entry->name_offset();
1428 uint name_size = _entry->name_size(); // Includes '/0'
1429 const char* stored_name = addr(name_offset);
1430
1431 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1432 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1433 stored_name, name);
1434 set_lookup_failed(); // Skip this blob
1435 return nullptr;
1436 }
1437
1438 // Read archived code blob
1439 uint offset = entry_position + _entry->code_offset();
1440 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1441 offset += archived_blob->size();
1442
1443 address reloc_data = (address)addr(offset);
1444 offset += archived_blob->relocation_size();
1445 set_read_position(offset);
1446
1447 ImmutableOopMapSet* oop_maps = nullptr;
1448 if (_entry->has_oop_maps()) {
1449 oop_maps = read_oop_map_set();
1450 }
1451
1452 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1453 stored_name,
1454 reloc_data,
1455 oop_maps
1456 );
1457 if (code_blob == nullptr) { // no space left in CodeCache
1458 return nullptr;
1459 }
1460
1461 #ifndef PRODUCT
1462 code_blob->asm_remarks().init();
1463 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1464 code_blob->dbg_strings().init();
1465 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1466 #endif // PRODUCT
1467
1468 fix_relocations(code_blob);
1469
1470 // Read entries offsets
1471 offset = read_position();
1472 int stored_count = *(int*)addr(offset);
1473 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count);
1474 offset += sizeof(int);
1475 set_read_position(offset);
1476 for (int i = 0; i < stored_count; i++) {
1477 uint32_t off = *(uint32_t*)addr(offset);
1478 offset += sizeof(uint32_t);
1479 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
1480 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
1481 stored_name, entry_name, _entry->id(), off);
1482 entry_offsets[i] = off;
1483 }
1484
1485 #ifdef ASSERT
1486 LogStreamHandle(Trace, aot, codecache, stubs) log;
1487 if (log.is_enabled()) {
1488 FlagSetting fs(PrintRelocations, true);
1489 code_blob->print_on(&log);
1490 }
1491 #endif
1492 return code_blob;
1493 }
1494
1495 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1496 if (!is_dumping_stub()) {
1497 return false;
1498 }
1499 AOTCodeCache* cache = open_for_dump();
1500 if (cache == nullptr) {
1501 return false;
1502 }
1503 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1504 if (!cache->align_write()) {
1505 return false;
1506 }
1507 #ifdef ASSERT
1508 CodeSection* cs = cgen->assembler()->code_section();
1509 if (cs->has_locs()) {
1510 uint reloc_count = cs->locs_count();
1511 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1512 // Collect additional data
1513 RelocIterator iter(cs);
1514 while (iter.next()) {
1515 switch (iter.type()) {
1516 case relocInfo::none:
1517 break;
1518 default: {
1519 iter.print_current_on(tty);
1520 fatal("stub's relocation %d unimplemented", (int)iter.type());
1521 break;
1522 }
1523 }
1524 }
1525 }
1526 #endif
1527 uint entry_position = cache->_write_position;
1528
1529 // Write code
1530 uint code_offset = 0;
1531 uint code_size = cgen->assembler()->pc() - start;
1532 uint n = cache->write_bytes(start, code_size);
1533 if (n != code_size) {
1534 return false;
1535 }
1536 // Write name
1537 uint name_offset = cache->_write_position - entry_position;
1538 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1539 n = cache->write_bytes(name, name_size);
1540 if (n != name_size) {
1541 return false;
1542 }
1543 uint entry_size = cache->_write_position - entry_position;
1544 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1545 code_offset, code_size,
1546 AOTCodeEntry::Stub, (uint32_t)id);
1547 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1548 return true;
1549 }
1550
1551 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1552 if (!is_using_stub()) {
1553 return false;
1554 }
1555 assert(start == cgen->assembler()->pc(), "wrong buffer");
1556 AOTCodeCache* cache = open_for_use();
1557 if (cache == nullptr) {
1558 return false;
1559 }
1560 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1561 if (entry == nullptr) {
1562 return false;
1563 }
1564 uint entry_position = entry->offset();
1565 // Read name
1566 uint name_offset = entry->name_offset() + entry_position;
1567 uint name_size = entry->name_size(); // Includes '/0'
1568 const char* saved_name = cache->addr(name_offset);
1569 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1570 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1571 cache->set_failed();
1572 report_load_failure();
1573 return false;
1574 }
1575 log_info(aot, codecache,stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1576 // Read code
1577 uint code_offset = entry->code_offset() + entry_position;
1578 uint code_size = entry->code_size();
1579 copy_bytes(cache->addr(code_offset), start, code_size);
1580 cgen->assembler()->code_section()->set_end(start + code_size);
1581 log_info(aot, codecache,stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1582 return true;
1583 }
1584
1585 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1586 if (!is_dumping_code()) {
1587 return nullptr;
1588 }
1589 if (!CDSConfig::is_dumping_aot_code()) {
1590 return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
1591 }
1592 AOTCodeCache* cache = open_for_dump();
1593 if (cache == nullptr) {
1594 return nullptr; // Cache file is closed
1595 }
1596 if (nm->is_osr_method()) {
1597 return nullptr; // No OSR
1598 }
1599 if (!compiler->is_c1() && !compiler->is_c2()) {
1600 // Only c1 and c2 compilers
1601 return nullptr;
1602 }
1603 int comp_level = nm->comp_level();
1604 if (comp_level == CompLevel_full_profile) {
1605 // Do not cache C1 compiles with full profile i.e. tier3
1606 return nullptr;
1607 }
1608 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1609
1610 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1611 AOTCodeEntry* entry = nullptr;
1612 entry = cache->write_nmethod(nm, for_preload);
1613 if (entry == nullptr) {
1614 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1615 }
1616 return entry;
1617 }
1618
1619 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1620 AOTCodeCache* cache = open_for_dump();
1621 assert(cache != nullptr, "sanity check");
1622 assert(!nm->has_clinit_barriers() || _gen_preload_code, "sanity");
1623 uint comp_id = nm->compile_id();
1624 uint comp_level = nm->comp_level();
1625 Method* method = nm->method();
1626 bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)method);
1627 InstanceKlass* holder = method->method_holder();
1628 bool klass_in_cds = holder->is_shared() && !holder->defined_by_other_loaders();
1629 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1630 if (!builtin_loader) {
1631 ResourceMark rm;
1632 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1633 return nullptr;
1634 }
1635 if (for_preload && !(method_in_cds && klass_in_cds)) {
1636 ResourceMark rm;
1637 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
1638 return nullptr;
1639 }
1640 assert(!for_preload || method_in_cds, "sanity");
1641 _for_preload = for_preload;
1642 _has_clinit_barriers = nm->has_clinit_barriers();
1643
1644 if (!align_write()) {
1645 return nullptr;
1646 }
1647
1648 uint entry_position = _write_position;
1649
1650 // Write name
1651 uint name_offset = 0;
1652 uint name_size = 0;
1653 uint hash = 0;
1654 uint n;
1655 {
1656 ResourceMark rm;
1657 const char* name = method->name_and_sig_as_C_string();
1658 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1659 comp_id, (int)comp_level, name, comp_level,
1660 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1661
1662 LogStreamHandle(Info, aot, codecache, loader) log;
1663 if (log.is_enabled()) {
1664 oop loader = holder->class_loader();
1665 oop domain = holder->protection_domain();
1666 log.print("Holder: ");
1667 holder->print_value_on(&log);
1668 log.print(" loader: ");
1669 if (loader == nullptr) {
1670 log.print("nullptr");
1671 } else {
1672 loader->print_value_on(&log);
1673 }
1674 log.print(" domain: ");
1675 if (domain == nullptr) {
1676 log.print("nullptr");
1677 } else {
1678 domain->print_value_on(&log);
1679 }
1680 log.cr();
1681 }
1682 name_offset = _write_position - entry_position;
1683 name_size = (uint)strlen(name) + 1; // Includes '/0'
1684 n = write_bytes(name, name_size);
1685 if (n != name_size) {
1686 return nullptr;
1687 }
1688 hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
1689 }
1690
1691 // Write CodeBlob
1692 if (!cache->align_write()) {
1693 return nullptr;
1694 }
1695 uint blob_offset = cache->_write_position - entry_position;
1696 address archive_buffer = cache->reserve_bytes(nm->size());
1697 if (archive_buffer == nullptr) {
1698 return nullptr;
1699 }
1700 CodeBlob::archive_blob(nm, archive_buffer);
1701
1702 uint reloc_data_size = nm->relocation_size();
1703 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1704 if (n != reloc_data_size) {
1705 return nullptr;
1706 }
1707
1708 // Write oops and metadata present in the nmethod's data region
1709 if (!write_oops(nm)) {
1710 if (lookup_failed() && !failed()) {
1711 // Skip this method and reposition file
1712 set_write_position(entry_position);
1713 }
1714 return nullptr;
1715 }
1716 if (!write_metadata(nm)) {
1717 if (lookup_failed() && !failed()) {
1718 // Skip this method and reposition file
1719 set_write_position(entry_position);
1720 }
1721 return nullptr;
1722 }
1723
1724 bool has_oop_maps = false;
1725 if (nm->oop_maps() != nullptr) {
1726 if (!cache->write_oop_map_set(*nm)) {
1727 return nullptr;
1728 }
1729 has_oop_maps = true;
1730 }
1731
1732 uint immutable_data_size = nm->immutable_data_size();
1733 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1734 if (n != immutable_data_size) {
1735 return nullptr;
1736 }
1737
1738 JavaThread* thread = JavaThread::current();
1739 HandleMark hm(thread);
1740 GrowableArray<Handle> oop_list;
1741 GrowableArray<Metadata*> metadata_list;
1742
1743 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1744 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1745 if (lookup_failed() && !failed()) {
1746 // Skip this method and reposition file
1747 set_write_position(entry_position);
1748 }
1749 return nullptr;
1750 }
1751
1752 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1753 return nullptr;
1754 }
1755
1756 #ifndef PRODUCT
1757 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1758 return nullptr;
1759 }
1760 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1761 return nullptr;
1762 }
1763 #endif /* PRODUCT */
1764
1765 uint entry_size = _write_position - entry_position;
1766 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Code, hash,
1767 entry_position, entry_size,
1768 name_offset, name_size,
1769 blob_offset, has_oop_maps,
1770 nm->content_begin(), comp_level, comp_id,
1771 nm->has_clinit_barriers(), for_preload);
1772 if (method_in_cds) {
1773 entry->set_method(method);
1774 }
1775 #ifdef ASSERT
1776 if (nm->has_clinit_barriers() || for_preload) {
1777 assert(for_preload, "sanity");
1778 assert(entry->method() != nullptr, "sanity");
1779 }
1780 #endif
1781 {
1782 ResourceMark rm;
1783 const char* name = nm->method()->name_and_sig_as_C_string();
1784 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1785 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1786 }
1787 if (VerifyCachedCode) {
1788 return nullptr;
1789 }
1790 return entry;
1791 }
1792
1793 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1794 if (!is_using_code()) {
1795 return false;
1796 }
1797 AOTCodeCache* cache = open_for_use();
1798 if (cache == nullptr) {
1799 return false;
1800 }
1801 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1802 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1803 CompileTask* task = env->task();
1804 task->mark_aot_load_start(os::elapsed_counter());
1805 AOTCodeEntry* entry = task->aot_code_entry();
1806 bool preload = task->preload();
1807 assert(entry != nullptr, "sanity");
1808 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1809 VM_ENTRY_MARK;
1810 ResourceMark rm;
1811 methodHandle method(THREAD, target->get_Method());
1812 const char* target_name = method->name_and_sig_as_C_string();
1813 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
1814 bool clinit_brs = entry->has_clinit_barriers();
1815 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (hash: " UINT32_FORMAT_X_0 "%s)",
1816 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1817 target_name, hash, (clinit_brs ? ", has clinit barriers" : ""));
1818 }
1819 ReadingMark rdmk;
1820 if (rdmk.failed()) {
1821 // Cache is closed, cannot touch anything.
1822 return false;
1823 }
1824
1825 AOTCodeReader reader(cache, entry, task);
1826 bool success = reader.compile_nmethod(env, target, compiler);
1827 if (success) {
1828 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1829 } else {
1830 entry->set_load_fail();
1831 }
1832 task->mark_aot_load_finish(os::elapsed_counter());
1833 return success;
1834 }
1835
1836 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1837 CompileTask* task = env->task();
1838 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1839 nmethod* nm = nullptr;
1840
1841 uint entry_position = aot_code_entry->offset();
1842 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1843 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1844 set_read_position(archived_nm_offset + archived_nm->size());
1845
1846 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1847 env->set_oop_recorder(oop_recorder);
1848
1849 uint offset;
1850
1851 offset = read_position();
1852 address reloc_data = (address)addr(offset);
1853 offset += archived_nm->relocation_size();
1854 set_read_position(offset);
1855
1856 // Read oops and metadata
1857 VM_ENTRY_MARK
1858 GrowableArray<Handle> oop_list;
1859 GrowableArray<Metadata*> metadata_list;
1860
1861 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1862 return false;
1863 }
1864
1865 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1866
1867 offset = read_position();
1868 address immutable_data = (address)addr(offset);
1869 offset += archived_nm->immutable_data_size();
1870 set_read_position(offset);
1871
1872 GrowableArray<Handle> reloc_immediate_oop_list;
1873 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1874 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1875 return false;
1876 }
1877
1878 // Read Dependencies (compressed already)
1879 Dependencies* dependencies = new Dependencies(env);
1880 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1881 env->set_dependencies(dependencies);
1882
1883 const char* name = addr(entry_position + aot_code_entry->name_offset());
1884
1885 if (VerifyCachedCode) {
1886 return false;
1887 }
1888
1889 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1890 nm = env->register_aot_method(THREAD,
1891 target,
1892 compiler,
1893 archived_nm,
1894 reloc_data,
1895 oop_list,
1896 metadata_list,
1897 oopmaps,
1898 immutable_data,
1899 reloc_immediate_oop_list,
1900 reloc_immediate_metadata_list,
1901 this);
1902 bool success = task->is_success();
1903 if (success) {
1904 aot_code_entry->set_loaded();
1905 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1906 #ifdef ASSERT
1907 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1908 if (log.is_enabled()) {
1909 FlagSetting fs(PrintRelocations, true);
1910 nm->print_on(&log);
1911 nm->decode2(&log);
1912 }
1913 #endif
1914 }
1915
1916 return success;
1917 }
1918
1919 bool skip_preload(methodHandle mh) {
1920 if (!mh->method_holder()->is_loaded()) {
1921 return true;
1922 }
1923 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1924 if (directives->DontPreloadOption) {
1925 LogStreamHandle(Info, aot, codecache, init) log;
1926 if (log.is_enabled()) {
1927 log.print("Exclude preloading code for ");
1928 mh->print_value_on(&log);
1929 }
1930 return true;
1931 }
1932 return false;
1933 }
1934
1935 bool AOTCodeCache::gen_preload_code(ciMethod* m, int entry_bci) {
1936 VM_ENTRY_MARK;
1937 return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
1938 AOTCacheAccess::can_generate_aot_code(m->get_Method());
1939 }
1940
1941 void AOTCodeCache::preload_code(JavaThread* thread) {
1942 if ((ClassInitBarrierMode == 0) || !is_on_for_use()) {
1943 return;
1944 }
1945 if ((DisableCachedCode & (1 << 3)) != 0) {
1946 return; // no preloaded code (level 5);
1947 }
1948 _cache->preload_startup_code(thread);
1949 }
1950
1951 void AOTCodeCache::preload_startup_code(TRAPS) {
1952 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1953 // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
1954 // level we want (that is CompLevel_full_optimization).
1955 return;
1956 }
1957 assert(_for_use, "sanity");
1958 uint count = _load_header->entries_count();
1959 if (_load_entries == nullptr) {
1960 // Read it
1961 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
1962 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
1963 log_info(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1964 }
1965 uint preload_entries_count = _load_header->preload_entries_count();
1966 if (preload_entries_count > 0) {
1967 uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
1968 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
1969 uint count = MIN2(preload_entries_count, SCLoadStop);
1970 for (uint i = SCLoadStart; i < count; i++) {
1971 uint index = entries_index[i];
1972 AOTCodeEntry* entry = &(_load_entries[index]);
1973 if (entry->not_entrant()) {
1974 continue;
1975 }
1976 Method* m = AOTCacheAccess::convert_offset_to_method(entry->method_offset());
1977 entry->set_method(m);
1978 methodHandle mh(THREAD, entry->method());
1979 assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
1980 if (skip_preload(mh)) {
1981 continue; // Exclude preloading for this method
1982 }
1983 assert(mh->method_holder()->is_loaded(), "");
1984 if (!mh->method_holder()->is_linked()) {
1985 assert(!HAS_PENDING_EXCEPTION, "");
1986 mh->method_holder()->link_class(THREAD);
1987 if (HAS_PENDING_EXCEPTION) {
1988 LogStreamHandle(Info, aot, codecache) log;
1989 if (log.is_enabled()) {
1990 ResourceMark rm;
1991 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
1992 THREAD->pending_exception()->print_value_on(&log);
1993 if (log_is_enabled(Debug, aot, codecache)) {
1994 THREAD->pending_exception()->print_on(&log);
1995 }
1996 }
1997 CLEAR_PENDING_EXCEPTION;
1998 }
1999 }
2000 if (mh->aot_code_entry() != nullptr) {
2001 // Second C2 compilation of the same method could happen for
2002 // different reasons without marking first entry as not entrant.
2003 continue; // Keep old entry to avoid issues
2004 }
2005 mh->set_aot_code_entry(entry);
2006 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
2007 }
2008 }
2009 }
2010
2011 // ------------ process code and data --------------
2012
2013 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2014 GrowableArray<uint> reloc_data;
2015 RelocIterator iter(&code_blob);
2016 LogStreamHandle(Trace, aot, codecache, reloc) log;
2017 while (iter.next()) {
2018 int idx = reloc_data.append(0); // default value
2019 switch (iter.type()) {
2020 case relocInfo::none:
2021 break;
2022 case relocInfo::oop_type: {
2023 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2024 if (r->oop_is_immediate()) {
2025 assert(oop_list != nullptr, "sanity check");
2026 // store index of oop in the reloc immediate oop list
2027 Handle h(JavaThread::current(), r->oop_value());
2028 int oop_idx = oop_list->find(h);
2029 assert(oop_idx != -1, "sanity check");
2030 reloc_data.at_put(idx, (uint)oop_idx);
2031 }
2032 break;
2033 }
2034 case relocInfo::metadata_type: {
2035 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2036 if (r->metadata_is_immediate()) {
2037 assert(metadata_list != nullptr, "sanity check");
2038 // store index of metadata in the reloc immediate metadata list
2039 int metadata_idx = metadata_list->find(r->metadata_value());
2040 assert(metadata_idx != -1, "sanity check");
2041 reloc_data.at_put(idx, (uint)metadata_idx);
2042 }
2043 break;
2044 }
2045 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2046 case relocInfo::opt_virtual_call_type:
2047 case relocInfo::static_call_type: {
2048 CallRelocation* r = (CallRelocation*)iter.reloc();
2049 address dest = r->destination();
2050 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2051 dest = (address)-1; // do nothing in this case when loading this relocation
2052 }
2053 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
2054 break;
2055 }
2056 case relocInfo::trampoline_stub_type: {
2057 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2058 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
2059 break;
2060 }
2061 case relocInfo::static_stub_type:
2062 break;
2063 case relocInfo::runtime_call_type: {
2064 // Record offset of runtime destination
2065 CallRelocation* r = (CallRelocation*)iter.reloc();
2066 address dest = r->destination();
2067 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2068 dest = (address)-1; // do nothing in this case when loading this relocation
2069 }
2070 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
2071 break;
2072 }
2073 case relocInfo::runtime_call_w_cp_type:
2074 fatal("runtime_call_w_cp_type unimplemented");
2075 break;
2076 case relocInfo::external_word_type: {
2077 // Record offset of runtime target
2078 address target = ((external_word_Relocation*)iter.reloc())->target();
2079 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob));
2080 break;
2081 }
2082 case relocInfo::internal_word_type:
2083 break;
2084 case relocInfo::section_word_type:
2085 break;
2086 case relocInfo::poll_type:
2087 break;
2088 case relocInfo::poll_return_type:
2089 break;
2090 case relocInfo::post_call_nop_type:
2091 break;
2092 case relocInfo::entry_guard_type:
2093 break;
2094 default:
2095 fatal("relocation %d unimplemented", (int)iter.type());
2096 break;
2097 }
2098 if (log.is_enabled()) {
2099 iter.print_current_on(&log);
2100 }
2101 }
2102
2103 // Write additional relocation data: uint per relocation
2104 // Write the count first
2105 int count = reloc_data.length();
2106 write_bytes(&count, sizeof(int));
2107 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2108 iter != reloc_data.end(); ++iter) {
2109 uint value = *iter;
2110 int n = write_bytes(&value, sizeof(uint));
2111 if (n != sizeof(uint)) {
2112 return false;
2113 }
2114 }
2115 return true;
2116 }
2117
2118 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2119 LogStreamHandle(Trace, aot, reloc) log;
2120 uint offset = read_position();
2121 int count = *(int*)addr(offset);
2122 offset += sizeof(int);
2123 if (log.is_enabled()) {
2124 log.print_cr("======== extra relocations count=%d", count);
2125 }
2126 uint* reloc_data = (uint*)addr(offset);
2127 offset += (count * sizeof(uint));
2128 set_read_position(offset);
2129
2130 RelocIterator iter(code_blob);
2131 int j = 0;
2132 while (iter.next()) {
2133 switch (iter.type()) {
2134 case relocInfo::none:
2135 break;
2136 case relocInfo::oop_type: {
2137 assert(code_blob->is_nmethod(), "sanity check");
2138 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2139 if (r->oop_is_immediate()) {
2140 assert(oop_list != nullptr, "sanity check");
2141 Handle h = oop_list->at(reloc_data[j]);
2142 r->set_value(cast_from_oop<address>(h()));
2143 } else {
2144 r->fix_oop_relocation();
2145 }
2146 break;
2147 }
2148 case relocInfo::metadata_type: {
2149 assert(code_blob->is_nmethod(), "sanity check");
2150 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2151 Metadata* m;
2152 if (r->metadata_is_immediate()) {
2153 assert(metadata_list != nullptr, "sanity check");
2154 m = metadata_list->at(reloc_data[j]);
2155 } else {
2156 // Get already updated value from nmethod.
2157 int index = r->metadata_index();
2158 m = code_blob->as_nmethod()->metadata_at(index);
2159 }
2160 r->set_value((address)m);
2161 break;
2162 }
2163 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2164 case relocInfo::opt_virtual_call_type:
2165 case relocInfo::static_call_type: {
2166 address dest = _cache->address_for_id(reloc_data[j]);
2167 if (dest != (address)-1) {
2168 ((CallRelocation*)iter.reloc())->set_destination(dest);
2169 }
2170 break;
2171 }
2172 case relocInfo::trampoline_stub_type: {
2173 address dest = _cache->address_for_id(reloc_data[j]);
2174 if (dest != (address)-1) {
2175 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2176 }
2177 break;
2178 }
2179 case relocInfo::static_stub_type:
2180 break;
2181 case relocInfo::runtime_call_type: {
2182 address dest = _cache->address_for_id(reloc_data[j]);
2183 if (dest != (address)-1) {
2184 ((CallRelocation*)iter.reloc())->set_destination(dest);
2185 }
2186 break;
2187 }
2188 case relocInfo::runtime_call_w_cp_type:
2189 fatal("runtime_call_w_cp_type unimplemented");
2190 break;
2191 case relocInfo::external_word_type: {
2192 address target = _cache->address_for_id(reloc_data[j]);
2193 // Add external address to global table
2194 int index = ExternalsRecorder::find_index(target);
2195 // Update index in relocation
2196 Relocation::add_jint(iter.data(), index);
2197 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2198 assert(reloc->target() == target, "sanity");
2199 reloc->set_value(target); // Patch address in the code
2200 break;
2201 }
2202 case relocInfo::internal_word_type: {
2203 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2204 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2205 break;
2206 }
2207 case relocInfo::section_word_type: {
2208 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2209 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2210 break;
2211 }
2212 case relocInfo::poll_type:
2213 break;
2214 case relocInfo::poll_return_type:
2215 break;
2216 case relocInfo::post_call_nop_type:
2217 break;
2218 case relocInfo::entry_guard_type:
2219 break;
2220 default:
2221 fatal("relocation %d unimplemented", (int)iter.type());
2222 break;
2223 }
2224 if (log.is_enabled()) {
2225 iter.print_current_on(&log);
2226 }
2227 j++;
2228 }
2229 assert(j == count, "sanity");
2230 }
2231
2232 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2233 int count = oop_list.length();
2234 if (!write_bytes(&count, sizeof(int))) {
2235 return false;
2236 }
2237 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2238 iter != oop_list.end(); ++iter) {
2239 Handle h = *iter;
2240 if (!write_oop(h())) {
2241 return false;
2242 }
2243 }
2244
2245 count = metadata_list.length();
2246 if (!write_bytes(&count, sizeof(int))) {
2247 return false;
2248 }
2249 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2250 iter != metadata_list.end(); ++iter) {
2251 Metadata* m = *iter;
2252 if (!write_metadata(m)) {
2253 return false;
2254 }
2255 }
2256 return true;
2257 }
2258
2259 bool AOTCodeCache::write_metadata(nmethod* nm) {
2260 int count = nm->metadata_count()-1;
2261 if (!write_bytes(&count, sizeof(int))) {
2262 return false;
2263 }
2264 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2265 if (!write_metadata(*p)) {
2266 return false;
2267 }
2268 }
2269 return true;
2270 }
2271
2272 bool AOTCodeCache::write_metadata(Metadata* m) {
2273 uint n = 0;
2274 if (m == nullptr) {
2275 DataKind kind = DataKind::Null;
2276 n = write_bytes(&kind, sizeof(int));
2277 if (n != sizeof(int)) {
2278 return false;
2279 }
2280 } else if (m == (Metadata*)Universe::non_oop_word()) {
2281 DataKind kind = DataKind::No_Data;
2282 n = write_bytes(&kind, sizeof(int));
2283 if (n != sizeof(int)) {
2284 return false;
2285 }
2286 } else if (m->is_klass()) {
2287 if (!write_klass((Klass*)m)) {
2288 return false;
2289 }
2290 } else if (m->is_method()) {
2291 if (!write_method((Method*)m)) {
2292 return false;
2293 }
2294 } else if (m->is_methodCounters()) {
2295 DataKind kind = DataKind::MethodCnts;
2296 n = write_bytes(&kind, sizeof(int));
2297 if (n != sizeof(int)) {
2298 return false;
2299 }
2300 if (!write_method(((MethodCounters*)m)->method())) {
2301 return false;
2302 }
2303 log_info(aot, codecache)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2304 } else { // Not supported
2305 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2306 return false;
2307 }
2308 return true;
2309 }
2310
2311 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2312 uint code_offset = read_position();
2313 Metadata* m = nullptr;
2314 DataKind kind = *(DataKind*)addr(code_offset);
2315 code_offset += sizeof(DataKind);
2316 set_read_position(code_offset);
2317 if (kind == DataKind::Null) {
2318 m = (Metadata*)nullptr;
2319 } else if (kind == DataKind::No_Data) {
2320 m = (Metadata*)Universe::non_oop_word();
2321 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2322 m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2323 } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2324 m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2325 } else if (kind == DataKind::MethodCnts) {
2326 kind = *(DataKind*)addr(code_offset);
2327 bool shared = (kind == DataKind::Method_Shared);
2328 assert(kind == DataKind::Method || shared, "Sanity");
2329 code_offset += sizeof(DataKind);
2330 set_read_position(code_offset);
2331 m = (Metadata*)read_method(comp_method, shared);
2332 if (m != nullptr) {
2333 Method* method = (Method*)m;
2334 m = method->get_method_counters(Thread::current());
2335 if (m == nullptr) {
2336 set_lookup_failed();
2337 log_info(aot, codecache)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2338 } else {
2339 log_info(aot, codecache)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2340 }
2341 }
2342 } else {
2343 set_lookup_failed();
2344 log_info(aot, codecache)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2345 }
2346 return m;
2347 }
2348
2349 bool AOTCodeCache::write_method(Method* method) {
2350 ResourceMark rm; // To method's name printing
2351 if (AOTCacheAccess::can_generate_aot_code(method)) {
2352 DataKind kind = DataKind::Method_Shared;
2353 uint n = write_bytes(&kind, sizeof(int));
2354 if (n != sizeof(int)) {
2355 return false;
2356 }
2357 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2358 n = write_bytes(&method_offset, sizeof(uint));
2359 if (n != sizeof(uint)) {
2360 return false;
2361 }
2362 log_info(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2363 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2364 return true;
2365 }
2366 log_warning(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2367 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2368 set_lookup_failed();
2369 return false;
2370 }
2371
2372 Method* AOTCodeReader::read_method(const methodHandle& comp_method, bool shared) {
2373 uint code_offset = read_position();
2374 if (shared) {
2375 uint method_offset = *(uint*)addr(code_offset);
2376 code_offset += sizeof(uint);
2377 set_read_position(code_offset);
2378 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2379 if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
2380 // Something changed in CDS
2381 set_lookup_failed();
2382 log_info(aot, codecache)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2383 return nullptr;
2384 }
2385 assert(m->is_method(), "sanity");
2386 ResourceMark rm;
2387 Klass* k = m->method_holder();
2388 if (!k->is_instance_klass()) {
2389 set_lookup_failed();
2390 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2391 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2392 return nullptr;
2393 } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2394 set_lookup_failed();
2395 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2396 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2397 return nullptr;
2398 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2399 set_lookup_failed();
2400 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2401 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2402 return nullptr;
2403 } else if (!InstanceKlass::cast(k)->is_linked()) {
2404 set_lookup_failed();
2405 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2406 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2407 return nullptr;
2408 }
2409 log_info(aot, codecache)("%d (L%d): Shared method lookup: %s",
2410 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2411 return m;
2412 }
2413 set_lookup_failed();
2414 return nullptr;
2415 }
2416
2417 bool AOTCodeCache::write_klass(Klass* klass) {
2418 uint array_dim = 0;
2419 if (klass->is_objArray_klass()) {
2420 array_dim = ObjArrayKlass::cast(klass)->dimension();
2421 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2422 }
2423 uint init_state = 0;
2424 bool can_write = true;
2425 if (klass->is_instance_klass()) {
2426 InstanceKlass* ik = InstanceKlass::cast(klass);
2427 init_state = (ik->is_initialized() ? 1 : 0);
2428 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2429 } else {
2430 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2431 }
2432 ResourceMark rm;
2433 uint state = (array_dim << 1) | (init_state & 1);
2434 if (can_write) {
2435 DataKind kind = DataKind::Klass_Shared;
2436 uint n = write_bytes(&kind, sizeof(int));
2437 if (n != sizeof(int)) {
2438 return false;
2439 }
2440 // Record state of instance klass initialization and array dimentions.
2441 n = write_bytes(&state, sizeof(int));
2442 if (n != sizeof(int)) {
2443 return false;
2444 }
2445 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2446 n = write_bytes(&klass_offset, sizeof(uint));
2447 if (n != sizeof(uint)) {
2448 return false;
2449 }
2450 log_info(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2451 compile_id(), comp_level(), klass->external_name(),
2452 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2453 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2454 return true;
2455 }
2456 log_warning(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2457 compile_id(), comp_level(), klass->external_name(),
2458 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2459 (array_dim > 0 ? " (object array)" : ""));
2460 set_lookup_failed();
2461 return false;
2462 }
2463
2464 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method, bool shared) {
2465 uint code_offset = read_position();
2466 uint state = *(uint*)addr(code_offset);
2467 uint init_state = (state & 1);
2468 uint array_dim = (state >> 1);
2469 code_offset += sizeof(int);
2470 if (shared) {
2471 uint klass_offset = *(uint*)addr(code_offset);
2472 code_offset += sizeof(uint);
2473 set_read_position(code_offset);
2474 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2475 if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2476 // Something changed in CDS
2477 set_lookup_failed();
2478 log_info(aot, codecache)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2479 return nullptr;
2480 }
2481 assert(k->is_klass(), "sanity");
2482 ResourceMark rm;
2483 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2484 set_lookup_failed();
2485 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2486 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2487 return nullptr;
2488 } else
2489 // Allow not initialized klass which was uninitialized during code caching or for preload
2490 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2491 set_lookup_failed();
2492 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2493 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2494 return nullptr;
2495 }
2496 if (array_dim > 0) {
2497 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2498 Klass* ak = k->array_klass_or_null(array_dim);
2499 // FIXME: what would it take to create an array class on the fly?
2500 // Klass* ak = k->array_klass(dim, JavaThread::current());
2501 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2502 if (ak == nullptr) {
2503 set_lookup_failed();
2504 log_info(aot, codecache)("%d (L%d): %d-dimension array klass lookup failed: %s",
2505 compile_id(), comp_level(), array_dim, k->external_name());
2506 }
2507 log_info(aot, codecache)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2508 return ak;
2509 } else {
2510 log_info(aot, codecache)("%d (L%d): Shared klass lookup: %s",
2511 compile_id(), comp_level(), k->external_name());
2512 return k;
2513 }
2514 }
2515 set_lookup_failed();
2516 return nullptr;
2517 }
2518
2519 bool AOTCodeCache::write_oop(jobject& jo) {
2520 oop obj = JNIHandles::resolve(jo);
2521 return write_oop(obj);
2522 }
2523
2524 bool AOTCodeCache::write_oop(oop obj) {
2525 DataKind kind;
2526 uint n = 0;
2527 if (obj == nullptr) {
2528 kind = DataKind::Null;
2529 n = write_bytes(&kind, sizeof(int));
2530 if (n != sizeof(int)) {
2531 return false;
2532 }
2533 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2534 kind = DataKind::No_Data;
2535 n = write_bytes(&kind, sizeof(int));
2536 if (n != sizeof(int)) {
2537 return false;
2538 }
2539 } else if (java_lang_Class::is_instance(obj)) {
2540 if (java_lang_Class::is_primitive(obj)) {
2541 int bt = (int)java_lang_Class::primitive_type(obj);
2542 kind = DataKind::Primitive;
2543 n = write_bytes(&kind, sizeof(int));
2544 if (n != sizeof(int)) {
2545 return false;
2546 }
2547 n = write_bytes(&bt, sizeof(int));
2548 if (n != sizeof(int)) {
2549 return false;
2550 }
2551 log_info(aot, codecache)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2552 } else {
2553 Klass* klass = java_lang_Class::as_Klass(obj);
2554 if (!write_klass(klass)) {
2555 return false;
2556 }
2557 }
2558 } else if (java_lang_String::is_instance(obj)) { // herere
2559 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2560 if (k >= 0) {
2561 kind = DataKind::String_Shared;
2562 n = write_bytes(&kind, sizeof(int));
2563 if (n != sizeof(int)) {
2564 return false;
2565 }
2566 n = write_bytes(&k, sizeof(int));
2567 if (n != sizeof(int)) {
2568 return false;
2569 }
2570 return true;
2571 }
2572 kind = DataKind::String;
2573 n = write_bytes(&kind, sizeof(int));
2574 if (n != sizeof(int)) {
2575 return false;
2576 }
2577 ResourceMark rm;
2578 size_t length_sz = 0;
2579 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2580 int length = (int)length_sz; // FIXME -- cast
2581 length++; // write tailing '/0'
2582 n = write_bytes(&length, sizeof(int));
2583 if (n != sizeof(int)) {
2584 return false;
2585 }
2586 n = write_bytes(string, (uint)length);
2587 if (n != (uint)length) {
2588 return false;
2589 }
2590 log_info(aot, codecache)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
2591 } else if (java_lang_Module::is_instance(obj)) {
2592 fatal("Module object unimplemented");
2593 } else if (java_lang_ClassLoader::is_instance(obj)) {
2594 if (obj == SystemDictionary::java_system_loader()) {
2595 kind = DataKind::SysLoader;
2596 log_info(aot, codecache)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2597 } else if (obj == SystemDictionary::java_platform_loader()) {
2598 kind = DataKind::PlaLoader;
2599 log_info(aot, codecache)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2600 } else {
2601 fatal("ClassLoader object unimplemented");
2602 return false;
2603 }
2604 n = write_bytes(&kind, sizeof(int));
2605 if (n != sizeof(int)) {
2606 return false;
2607 }
2608 } else { // herere
2609 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2610 if (k >= 0) {
2611 kind = DataKind::MH_Oop_Shared;
2612 n = write_bytes(&kind, sizeof(int));
2613 if (n != sizeof(int)) {
2614 return false;
2615 }
2616 n = write_bytes(&k, sizeof(int));
2617 if (n != sizeof(int)) {
2618 return false;
2619 }
2620 return true;
2621 }
2622 // Unhandled oop - bailout
2623 set_lookup_failed();
2624 log_info(aot, codecache, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
2625 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2626 return false;
2627 }
2628 return true;
2629 }
2630
2631 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2632 uint code_offset = read_position();
2633 oop obj = nullptr;
2634 DataKind kind = *(DataKind*)addr(code_offset);
2635 code_offset += sizeof(DataKind);
2636 set_read_position(code_offset);
2637 if (kind == DataKind::Null) {
2638 return nullptr;
2639 } else if (kind == DataKind::No_Data) {
2640 return cast_to_oop(Universe::non_oop_word());
2641 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2642 Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
2643 if (k == nullptr) {
2644 return nullptr;
2645 }
2646 obj = k->java_mirror();
2647 if (obj == nullptr) {
2648 set_lookup_failed();
2649 log_info(aot, codecache)("Lookup failed for java_mirror of klass %s", k->external_name());
2650 return nullptr;
2651 }
2652 } else if (kind == DataKind::Primitive) {
2653 code_offset = read_position();
2654 int t = *(int*)addr(code_offset);
2655 code_offset += sizeof(int);
2656 set_read_position(code_offset);
2657 BasicType bt = (BasicType)t;
2658 obj = java_lang_Class::primitive_mirror(bt);
2659 log_info(aot, codecache)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2660 } else if (kind == DataKind::String_Shared) {
2661 code_offset = read_position();
2662 int k = *(int*)addr(code_offset);
2663 code_offset += sizeof(int);
2664 set_read_position(code_offset);
2665 obj = AOTCacheAccess::get_archived_object(k);
2666 } else if (kind == DataKind::String) {
2667 code_offset = read_position();
2668 int length = *(int*)addr(code_offset);
2669 code_offset += sizeof(int);
2670 set_read_position(code_offset);
2671 const char* dest = addr(code_offset);
2672 set_read_position(code_offset + length);
2673 obj = StringTable::intern(&(dest[0]), thread);
2674 if (obj == nullptr) {
2675 set_lookup_failed();
2676 log_info(aot, codecache)("%d (L%d): Lookup failed for String %s",
2677 compile_id(), comp_level(), &(dest[0]));
2678 return nullptr;
2679 }
2680 assert(java_lang_String::is_instance(obj), "must be string");
2681 log_info(aot, codecache)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
2682 } else if (kind == DataKind::SysLoader) {
2683 obj = SystemDictionary::java_system_loader();
2684 log_info(aot, codecache)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2685 } else if (kind == DataKind::PlaLoader) {
2686 obj = SystemDictionary::java_platform_loader();
2687 log_info(aot, codecache)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2688 } else if (kind == DataKind::MH_Oop_Shared) {
2689 code_offset = read_position();
2690 int k = *(int*)addr(code_offset);
2691 code_offset += sizeof(int);
2692 set_read_position(code_offset);
2693 obj = AOTCacheAccess::get_archived_object(k);
2694 } else {
2695 set_lookup_failed();
2696 log_info(aot, codecache)("%d (L%d): Unknown oop's kind: %d",
2697 compile_id(), comp_level(), (int)kind);
2698 return nullptr;
2699 }
2700 return obj;
2701 }
2702
2703 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2704 methodHandle comp_method(JavaThread::current(), target->get_Method());
2705 JavaThread* current = JavaThread::current();
2706 uint offset = read_position();
2707 int count = *(int *)addr(offset);
2708 offset += sizeof(int);
2709 set_read_position(offset);
2710 for (int i = 0; i < count; i++) {
2711 oop obj = read_oop(current, comp_method);
2712 if (lookup_failed()) {
2713 return false;
2714 }
2715 Handle h(thread, obj);
2716 oop_list.append(h);
2717 if (oop_recorder != nullptr) {
2718 jobject jo = JNIHandles::make_local(thread, obj);
2719 if (oop_recorder->is_real(jo)) {
2720 oop_recorder->find_index(jo);
2721 } else {
2722 oop_recorder->allocate_oop_index(jo);
2723 }
2724 }
2725 LogStreamHandle(Debug, aot, codecache, oops) log;
2726 if (log.is_enabled()) {
2727 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2728 if (obj == Universe::non_oop_word()) {
2729 log.print("non-oop word");
2730 } else if (obj == nullptr) {
2731 log.print("nullptr-oop");
2732 } else {
2733 obj->print_value_on(&log);
2734 }
2735 log.cr();
2736 }
2737 }
2738
2739 offset = read_position();
2740 count = *(int *)addr(offset);
2741 offset += sizeof(int);
2742 set_read_position(offset);
2743 for (int i = 0; i < count; i++) {
2744 Metadata* m = read_metadata(comp_method);
2745 if (lookup_failed()) {
2746 return false;
2747 }
2748 metadata_list.append(m);
2749 if (oop_recorder != nullptr) {
2750 if (oop_recorder->is_real(m)) {
2751 oop_recorder->find_index(m);
2752 } else {
2753 oop_recorder->allocate_metadata_index(m);
2754 }
2755 }
2756 LogTarget(Debug, aot, codecache, metadata) log;
2757 if (log.is_enabled()) {
2758 LogStream ls(log);
2759 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2760 if (m == (Metadata*)Universe::non_oop_word()) {
2761 ls.print("non-metadata word");
2762 } else if (m == nullptr) {
2763 ls.print("nullptr-oop");
2764 } else {
2765 Metadata::print_value_on_maybe_null(&ls, m);
2766 }
2767 ls.cr();
2768 }
2769 }
2770 return true;
2771 }
2772
2773 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2774 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2775 int oopmaps_size = oopmaps->nr_of_bytes();
2776 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2777 return false;
2778 }
2779 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2780 if (n != (uint)oopmaps->nr_of_bytes()) {
2781 return false;
2782 }
2783 return true;
2784 }
2785
2786 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2787 uint offset = read_position();
2788 int size = *(int *)addr(offset);
2789 offset += sizeof(int);
2790 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2791 offset += size;
2792 set_read_position(offset);
2793 return oopmaps;
2794 }
2795
2796 bool AOTCodeCache::write_oops(nmethod* nm) {
2797 int count = nm->oops_count()-1;
2798 if (!write_bytes(&count, sizeof(int))) {
2799 return false;
2800 }
2801 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2802 if (!write_oop(*p)) {
2803 return false;
2804 }
2805 }
2806 return true;
2807 }
2808
2809 #ifndef PRODUCT
2810 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2811 // Write asm remarks
2812 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2813 if (count_ptr == nullptr) {
2814 return false;
2815 }
2816 uint count = 0;
2817 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2818 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2819 uint n = write_bytes(&offset, sizeof(uint));
2820 if (n != sizeof(uint)) {
2821 return false;
2822 }
2823 if (use_string_table) {
2824 const char* cstr = add_C_string(str);
2825 int id = _table->id_for_C_string((address)cstr);
2826 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2827 n = write_bytes(&id, sizeof(int));
2828 if (n != sizeof(int)) {
2829 return false;
2830 }
2831 } else {
2832 n = write_bytes(str, (uint)strlen(str) + 1);
2833 if (n != strlen(str) + 1) {
2834 return false;
2835 }
2836 }
2837 count += 1;
2838 return true;
2839 });
2840 *count_ptr = count;
2841 return result;
2842 }
2843
2844 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2845 // Read asm remarks
2846 uint offset = read_position();
2847 uint count = *(uint *)addr(offset);
2848 offset += sizeof(uint);
2849 for (uint i = 0; i < count; i++) {
2850 uint remark_offset = *(uint *)addr(offset);
2851 offset += sizeof(uint);
2852 const char* remark = nullptr;
2853 if (use_string_table) {
2854 int remark_string_id = *(uint *)addr(offset);
2855 offset += sizeof(int);
2856 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2857 } else {
2858 remark = (const char*)addr(offset);
2859 offset += (uint)strlen(remark)+1;
2860 }
2861 asm_remarks.insert(remark_offset, remark);
2862 }
2863 set_read_position(offset);
2864 }
2865
2866 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2867 // Write dbg strings
2868 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2869 if (count_ptr == nullptr) {
2870 return false;
2871 }
2872 uint count = 0;
2873 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2874 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2875 if (use_string_table) {
2876 const char* cstr = add_C_string(str);
2877 int id = _table->id_for_C_string((address)cstr);
2878 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2879 uint n = write_bytes(&id, sizeof(int));
2880 if (n != sizeof(int)) {
2881 return false;
2882 }
2883 } else {
2884 uint n = write_bytes(str, (uint)strlen(str) + 1);
2885 if (n != strlen(str) + 1) {
2886 return false;
2887 }
2888 }
2889 count += 1;
2890 return true;
2891 });
2892 *count_ptr = count;
2893 return result;
2894 }
2895
2896 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2897 // Read dbg strings
2898 uint offset = read_position();
2899 uint count = *(uint *)addr(offset);
2900 offset += sizeof(uint);
2901 for (uint i = 0; i < count; i++) {
2902 const char* str = nullptr;
2903 if (use_string_table) {
2904 int string_id = *(uint *)addr(offset);
2905 offset += sizeof(int);
2906 str = (const char*)_cache->address_for_C_string(string_id);
2907 } else {
2908 str = (const char*)addr(offset);
2909 offset += (uint)strlen(str)+1;
2910 }
2911 dbg_strings.insert(str);
2912 }
2913 set_read_position(offset);
2914 }
2915 #endif // PRODUCT
2916
2917 //======================= AOTCodeAddressTable ===============
2918
2919 // address table ids for generated routines, external addresses and C
2920 // string addresses are partitioned into positive integer ranges
2921 // defined by the following positive base and max values
2922 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2923 // [_stubs_base, _stubs_base + _stubs_max -1],
2924 // ...
2925 // [_c_str_base, _c_str_base + _c_str_max -1],
2926 #define _extrs_max 140
2927 #define _stubs_max 210
2928 #define _shared_blobs_max 25
2929 #define _C1_blobs_max 50
2930 #define _C2_blobs_max 25
2931 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2932 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2933
2934 #define _extrs_base 0
2935 #define _stubs_base (_extrs_base + _extrs_max)
2936 #define _shared_blobs_base (_stubs_base + _stubs_max)
2937 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2938 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2939 #define _blobs_end (_shared_blobs_base + _blobs_max)
2940 #if (_C2_blobs_base >= _all_max)
2941 #error AOTCodeAddressTable ranges need adjusting
2942 #endif
2943
2944 #define SET_ADDRESS(type, addr) \
2945 { \
2946 type##_addr[type##_length++] = (address) (addr); \
2947 assert(type##_length <= type##_max, "increase size"); \
2948 }
2949
2950 static bool initializing_extrs = false;
2951
2952 void AOTCodeAddressTable::init_extrs() {
2953 if (_extrs_complete || initializing_extrs) return; // Done already
2954
2955 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
2956
2957 initializing_extrs = true;
2958 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
2959
2960 _extrs_length = 0;
2961
2962 // Record addresses of VM runtime methods
2963 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
2964 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
2965 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
2966 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
2967 #if defined(AARCH64) && !defined(ZERO)
2968 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
2969 #endif
2970 {
2971 // Required by Shared blobs
2972 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
2973 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
2974 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
2975 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
2976 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
2977 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
2978 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
2979 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
2980 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
2981 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
2982 SET_ADDRESS(_extrs, CompressedOops::base_addr());
2983 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
2984
2985 }
2986
2987 #ifdef COMPILER1
2988 {
2989 // Required by C1 blobs
2990 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
2991 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
2992 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
2993 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
2994 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
2995 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
2996 SET_ADDRESS(_extrs, Runtime1::new_instance);
2997 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
2998 SET_ADDRESS(_extrs, Runtime1::new_type_array);
2999 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3000 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3001 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3002 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3003 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3004 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3005 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3006 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3007 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3008 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3009 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3010 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3011 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3012 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3013 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3014 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3015 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3016 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3017 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3018 #ifndef PRODUCT
3019 SET_ADDRESS(_extrs, os::breakpoint);
3020 #endif
3021 }
3022 #endif // COMPILER1
3023
3024 #ifdef COMPILER2
3025 {
3026 // Required by C2 blobs
3027 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3028 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3029 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3030 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3031 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3032 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3033 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3034 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3035 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3036 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3037 #if INCLUDE_JVMTI
3038 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
3039 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
3040 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
3041 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
3042 #endif
3043 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3044 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3045 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3046 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3047 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3048 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3049 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3050 }
3051 #endif // COMPILER2
3052
3053 #if INCLUDE_G1GC
3054 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3055 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3056 #endif
3057
3058 #if INCLUDE_SHENANDOAHGC
3059 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3060 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3061 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3062 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3063 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3064 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3065 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3066 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3067 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3068 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3069 #endif
3070
3071 #if INCLUDE_ZGC
3072 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3073 #if defined(AMD64)
3074 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3075 #endif
3076 #endif // INCLUDE_ZGC
3077
3078 SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3079 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3080 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3081 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3082
3083 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3084 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3085 #if defined(AMD64) && !defined(ZERO)
3086 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3087 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3088 #endif // AMD64
3089 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3090 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3091 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3092 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3093 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3094 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3095 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3096 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3097 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3098 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3099 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3100 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3101 #ifndef ZERO
3102 SET_ADDRESS(_extrs, SharedRuntime::drem);
3103 SET_ADDRESS(_extrs, SharedRuntime::frem);
3104 #endif
3105 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3106 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3107 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3108 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3109 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3110 #if INCLUDE_JVMTI
3111 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3112 #endif /* INCLUDE_JVMTI */
3113 BarrierSet* bs = BarrierSet::barrier_set();
3114 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3115 SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3116 }
3117 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3118 SET_ADDRESS(_extrs, Thread::current);
3119
3120 SET_ADDRESS(_extrs, os::javaTimeMillis);
3121 SET_ADDRESS(_extrs, os::javaTimeNanos);
3122
3123 #if INCLUDE_JVMTI
3124 SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3125 #endif /* INCLUDE_JVMTI */
3126 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3127 #ifndef PRODUCT
3128 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3129 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3130 #endif
3131
3132 #ifndef ZERO
3133 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3134 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3135 #endif
3136 #if defined(AMD64)
3137 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3138 #endif
3139 #endif // ZERO
3140
3141 #ifdef COMPILER1
3142 #ifdef X86
3143 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3144 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3145 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3146 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3147 #endif
3148 #endif
3149
3150 // addresses of fields in AOT runtime constants area
3151 address* p = AOTRuntimeConstants::field_addresses_list();
3152 while (*p != nullptr) {
3153 SET_ADDRESS(_extrs, *p++);
3154 }
3155
3156 _extrs_complete = true;
3157 log_info(aot, codecache,init)("External addresses recorded");
3158 }
3159
3160 static bool initializing_early_stubs = false;
3161
3162 void AOTCodeAddressTable::init_early_stubs() {
3163 if (_complete || initializing_early_stubs) return; // Done already
3164 initializing_early_stubs = true;
3165 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3166 _stubs_length = 0;
3167 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3168
3169 {
3170 // Required by C1 blobs
3171 #if defined(AMD64) && !defined(ZERO)
3172 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3173 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3174 #endif // AMD64
3175 }
3176
3177 _early_stubs_complete = true;
3178 log_info(aot, codecache, init)("Early stubs recorded");
3179 }
3180
3181 static bool initializing_shared_blobs = false;
3182
3183 void AOTCodeAddressTable::init_shared_blobs() {
3184 if (_complete || initializing_shared_blobs) return; // Done already
3185 initializing_shared_blobs = true;
3186 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3187 _shared_blobs_addr = blobs_addr;
3188 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3189 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3190
3191 _shared_blobs_length = 0;
3192 _C1_blobs_length = 0;
3193 _C2_blobs_length = 0;
3194
3195 // clear the address table
3196 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3197
3198 // Record addresses of generated code blobs
3199 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3200 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3201 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3202 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3203 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3204 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3205 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3206 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3207 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3208 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3209 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3210 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3211 #ifdef COMPILER2
3212 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3213 #endif
3214 #if INCLUDE_JVMCI
3215 if (EnableJVMCI) {
3216 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3217 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3218 }
3219 #endif
3220 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3221 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3222 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3223 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3224 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3225
3226 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3227 _shared_blobs_complete = true;
3228 log_info(aot, codecache,init)("All shared blobs recorded");
3229 }
3230
3231 static bool initializing_stubs = false;
3232 void AOTCodeAddressTable::init_stubs() {
3233 if (_complete || initializing_stubs) return; // Done already
3234 assert(_early_stubs_complete, "early stubs whould be initialized");
3235 initializing_stubs = true;
3236
3237 // Stubs
3238 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3239 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3240 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3241 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3242 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3243 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3244
3245 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3246 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3247 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3248
3249 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3250
3251
3252 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3253 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3254 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3255 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3256 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3257 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3258
3259 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3260 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3261 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3262 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3263 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3264 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3265
3266 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3267 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3268 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3269 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3270 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3271 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3272
3273 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3274 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3275 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3276 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3277 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3278 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3279
3280 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3281 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3282
3283 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3284 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3285
3286 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3287 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3288 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3289 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3290 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3291 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3292
3293 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3294 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3295
3296 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3297 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3298 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3299 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3300 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3301 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3302 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3303 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3304 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3305 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3306 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3307 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3308 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3309 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3310 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3311 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3312 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3313 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3314 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3315 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3316 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3317 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3318
3319 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3320
3321 SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3322 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3323 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3324
3325 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3326 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3327 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3328 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3329 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3330 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3331 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3332 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3333
3334 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3335
3336 SET_ADDRESS(_stubs, StubRoutines::dexp());
3337 SET_ADDRESS(_stubs, StubRoutines::dlog());
3338 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3339 SET_ADDRESS(_stubs, StubRoutines::dpow());
3340 SET_ADDRESS(_stubs, StubRoutines::dsin());
3341 SET_ADDRESS(_stubs, StubRoutines::dcos());
3342 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3343 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3344 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3345 SET_ADDRESS(_stubs, StubRoutines::dtan());
3346
3347 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3348 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3349
3350 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3351 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3352 }
3353 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3354
3355 #if defined(AMD64) && !defined(ZERO)
3356 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3357 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3358 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3359 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3360 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3361 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3362 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3363 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3364 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3365 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3366 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3367 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3368 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3369 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3370 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3371 // See C2_MacroAssembler::load_iota_indices().
3372 for (int i = 0; i < 6; i++) {
3373 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3374 }
3375 #endif
3376 #if defined(AARCH64) && !defined(ZERO)
3377 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3378 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3379 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3380 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3381 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3382 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3383 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3384 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3385 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3386 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3387 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3388 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3389 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3390
3391 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3392 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3393 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3394 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3395 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3396 #endif
3397
3398 _complete = true;
3399 log_info(aot, codecache,init)("Stubs recorded");
3400 }
3401
3402 void AOTCodeAddressTable::init_early_c1() {
3403 #ifdef COMPILER1
3404 // Runtime1 Blobs
3405 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
3406 C1StubId id = (C1StubId)i;
3407 if (Runtime1::blob_for(id) == nullptr) {
3408 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3409 continue;
3410 }
3411 if (Runtime1::entry_for(id) == nullptr) {
3412 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3413 continue;
3414 }
3415 address entry = Runtime1::entry_for(id);
3416 SET_ADDRESS(_C1_blobs, entry);
3417 }
3418 #endif // COMPILER1
3419 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3420 _early_c1_complete = true;
3421 }
3422
3423 void AOTCodeAddressTable::init_c1() {
3424 #ifdef COMPILER1
3425 // Runtime1 Blobs
3426 assert(_early_c1_complete, "early C1 blobs should be initialized");
3427 for (int i = (int)C1StubId::forward_exception_id + 1; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3428 C1StubId id = (C1StubId)i;
3429 if (Runtime1::blob_for(id) == nullptr) {
3430 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3431 continue;
3432 }
3433 if (Runtime1::entry_for(id) == nullptr) {
3434 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3435 continue;
3436 }
3437 address entry = Runtime1::entry_for(id);
3438 SET_ADDRESS(_C1_blobs, entry);
3439 }
3440 #if INCLUDE_G1GC
3441 if (UseG1GC) {
3442 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3443 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3444 SET_ADDRESS(_C1_blobs, entry);
3445 entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3446 SET_ADDRESS(_C1_blobs, entry);
3447 }
3448 #endif // INCLUDE_G1GC
3449 #if INCLUDE_ZGC
3450 if (UseZGC) {
3451 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3452 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3453 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3454 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3455 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3456 }
3457 #endif // INCLUDE_ZGC
3458 #if INCLUDE_SHENANDOAHGC
3459 if (UseShenandoahGC) {
3460 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3461 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3462 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3463 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3464 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3465 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3466 }
3467 #endif // INCLUDE_SHENANDOAHGC
3468 #endif // COMPILER1
3469
3470 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3471 _c1_complete = true;
3472 log_info(aot, codecache,init)("Runtime1 Blobs recorded");
3473 }
3474
3475 void AOTCodeAddressTable::init_c2() {
3476 #ifdef COMPILER2
3477 // OptoRuntime Blobs
3478 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3479 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3480 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3481 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3482 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3483 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3484 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3485 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3486 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3487 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3488 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3489 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3490 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3491 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3492 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3493 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3494 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3495 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3496 #if INCLUDE_JVMTI
3497 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3498 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3499 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3500 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3501 #endif /* INCLUDE_JVMTI */
3502 #endif
3503
3504 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3505 _c2_complete = true;
3506 log_info(aot, codecache,init)("OptoRuntime Blobs recorded");
3507 }
3508 #undef SET_ADDRESS
3509
3510 AOTCodeAddressTable::~AOTCodeAddressTable() {
3511 if (_extrs_addr != nullptr) {
3512 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3513 }
3514 if (_stubs_addr != nullptr) {
3515 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3516 }
3517 if (_shared_blobs_addr != nullptr) {
3518 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3519 }
3520 }
3521
3522 #ifdef PRODUCT
3523 #define MAX_STR_COUNT 200
3524 #else
3525 #define MAX_STR_COUNT 500
3526 #endif
3527 #define _c_str_max MAX_STR_COUNT
3528 static const int _c_str_base = _all_max;
3529
3530 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3531 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3532 static int _C_strings_count = 0;
3533 static int _C_strings_s[MAX_STR_COUNT] = {0};
3534 static int _C_strings_id[MAX_STR_COUNT] = {0};
3535 static int _C_strings_used = 0;
3536
3667 fatal("AOT Code Cache VM runtime addresses table is not complete");
3668 }
3669 if (idx == -1) {
3670 return (address)-1;
3671 }
3672 uint id = (uint)idx;
3673 // special case for symbols based relative to os::init
3674 if (id > (_c_str_base + _c_str_max)) {
3675 return (address)os::init + idx;
3676 }
3677 if (idx < 0) {
3678 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3679 }
3680 // no need to compare unsigned id against 0
3681 if (/* id >= _extrs_base && */ id < _extrs_length) {
3682 return _extrs_addr[id - _extrs_base];
3683 }
3684 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3685 return _stubs_addr[id - _stubs_base];
3686 }
3687 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3688 return _stubs_addr[id - _stubs_base];
3689 }
3690 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3691 return _shared_blobs_addr[id - _shared_blobs_base];
3692 }
3693 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3694 return _C1_blobs_addr[id - _C1_blobs_base];
3695 }
3696 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3697 return _C1_blobs_addr[id - _C1_blobs_base];
3698 }
3699 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3700 return _C2_blobs_addr[id - _C2_blobs_base];
3701 }
3702 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3703 return address_for_C_string(id - _c_str_base);
3704 }
3705 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3706 return nullptr;
3707 }
3708
3709 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3710 if (!_extrs_complete) {
3711 fatal("AOT Code Cache VM runtime addresses table is not complete");
3712 }
3713 int id = -1;
3714 if (addr == (address)-1) { // Static call stub has jump to itself
3715 return id;
3716 }
3717 // Check card_table_base address first since it can point to any address
3718 BarrierSet* bs = BarrierSet::barrier_set();
3719 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3720 if (addr == ci_card_table_address_as<address>()) {
3721 id = search_address(addr, _extrs_addr, _extrs_length);
3722 assert(id > 0 && _extrs_addr[id - _extrs_base] == addr, "sanity");
3723 return id;
3724 }
3725 }
3726
3727 // Seach for C string
3728 id = id_for_C_string(addr);
3729 if (id >= 0) {
3730 return id + _c_str_base;
3731 }
3732 if (StubRoutines::contains(addr)) {
3733 // Search in stubs
3734 id = search_address(addr, _stubs_addr, _stubs_length);
3735 if (id < 0) {
3736 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3737 if (desc == nullptr) {
3738 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3739 }
3740 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3741 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3742 } else {
3743 return _stubs_base + id;
3744 }
3745 } else {
3746 CodeBlob* cb = CodeCache::find_blob(addr);
3747 if (cb != nullptr) {
3748 int id_base = _shared_blobs_base;
3749 // Search in code blobs
3750 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3751 if (id == -1) {
3752 id_base = _C1_blobs_base;
3753 // search C1 blobs
3754 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3755 }
3756 if (id == -1) {
3757 id_base = _C2_blobs_base;
3758 // search C2 blobs
3759 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3760 }
3761 if (id < 0) {
3762 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3763 } else {
3764 return id_base + id;
3765 }
3766 } else {
3767 // Search in runtime functions
3768 id = search_address(addr, _extrs_addr, _extrs_length);
3769 if (id < 0) {
3770 ResourceMark rm;
3771 const int buflen = 1024;
3772 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3773 int offset = 0;
3774 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3775 if (offset > 0) {
3776 // Could be address of C string
3777 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3778 CompileTask* task = ciEnv::current()->task();
3779 uint compile_id = 0;
3780 uint comp_level =0;
3781 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3782 compile_id = task->compile_id();
3783 comp_level = task->comp_level();
3784 }
3785 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3786 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3787 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3788 return dist;
3789 }
3790 reloc.print_current_on(tty);
3791 blob->print_on(tty);
3792 blob->print_code_on(tty);
3793 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3794 } else {
3795 reloc.print_current_on(tty);
3796 #ifndef PRODUCT
3797 if (blob != nullptr) {
3798 blob->print_on(tty);
3799 blob->print_code_on(tty);
3800 }
3801 #endif // !PRODUCT
3802 os::find(addr, tty);
3803 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3804 }
3805 } else {
3806 return _extrs_base + id;
3807 }
3808 }
3809 }
3810 return id;
3811 }
3812
3813 #undef _extrs_max
3814 #undef _stubs_max
3815 #undef _shared_blobs_max
3816 #undef _C1_blobs_max
3817 #undef _C2_blobs_max
3818 #undef _blobs_max
3819 #undef _extrs_base
3820 #undef _stubs_base
3821 #undef _shared_blobs_base
3822 #undef _C1_blobs_base
3823 #undef _C2_blobs_base
3824 #undef _blobs_end
3825
3826 void AOTRuntimeConstants::initialize_from_runtime() {
3827 BarrierSet* bs = BarrierSet::barrier_set();
3828 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3829 CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
3830 _aot_runtime_constants._grain_shift = ctbs->grain_shift();
3831 _aot_runtime_constants._card_shift = ctbs->card_shift();
3832 }
3833 }
3834
3835 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3836
3837 address AOTRuntimeConstants::_field_addresses_list[] = {
3838 grain_shift_address(),
3839 card_shift_address(),
3840 nullptr
3841 };
3842
3843
3844 void AOTCodeCache::wait_for_no_nmethod_readers() {
3845 while (true) {
3846 int cur = Atomic::load(&_nmethod_readers);
3847 int upd = -(cur + 1);
3848 if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3849 // Success, no new readers should appear.
3850 break;
3851 }
3852 }
3853
3854 // Now wait for all readers to leave.
3855 SpinYield w;
3856 while (Atomic::load(&_nmethod_readers) != -1) {
3857 w.wait();
3858 }
3859 }
3860
3861 AOTCodeCache::ReadingMark::ReadingMark() {
3862 while (true) {
3863 int cur = Atomic::load(&_nmethod_readers);
3864 if (cur < 0) {
3865 // Cache is already closed, cannot proceed.
3866 _failed = true;
3867 return;
3868 }
3869 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3870 // Successfully recorded ourselves as entered.
3871 _failed = false;
3872 return;
3873 }
3874 }
3875 }
3876
3877 AOTCodeCache::ReadingMark::~ReadingMark() {
3878 if (_failed) {
3879 return;
3880 }
3881 while (true) {
3882 int cur = Atomic::load(&_nmethod_readers);
3883 if (cur > 0) {
3884 // Cache is open, we are counting down towards 0.
3885 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3886 return;
3887 }
3888 } else {
3889 // Cache is closed, we are counting up towards -1.
3890 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3891 return;
3892 }
3893 }
3894 }
3895 }
3896
3897 void AOTCodeCache::print_timers_on(outputStream* st) {
3898 if (is_using_code()) {
3899 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3900 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
3901 st->print_cr (" find cached code: %7.3f s", _t_totalFind.seconds());
3902 }
3903 if (is_dumping_code()) {
3904 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
3905 }
3906 }
3907
3908 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
3909 AOTCodeStats result;
3910 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3911 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
3912 }
3913
3914 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3915 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
3916 }
3917 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
3918 return result;
3919 }
3920
3921 void AOTCodeCache::log_stats_on_exit() {
3922 LogStreamHandle(Debug, aot, codecache, exit) log;
3923 if (log.is_enabled()) {
3924 AOTCodeStats prev_stats;
3925 AOTCodeStats current_stats;
3926 AOTCodeStats total_stats;
3927 uint max_size = 0;
3928
3929 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
3930
3931 for (uint i = 0; i < load_count; i++) {
3932 prev_stats.collect_entry_stats(&_load_entries[i]);
3933 if (max_size < _load_entries[i].size()) {
3934 max_size = _load_entries[i].size();
3935 }
3936 }
3937 for (uint i = 0; i < _store_entries_cnt; i++) {
3938 current_stats.collect_entry_stats(&_store_entries[i]);
3939 if (max_size < _store_entries[i].size()) {
3940 max_size = _store_entries[i].size();
3941 }
3942 }
3943 total_stats = AOTCodeStats::add_aot_code_stats(prev_stats, current_stats);
3944
3945 log.print_cr("Wrote %d AOTCodeEntry entries(%u max size) to AOT Code Cache",
3946 total_stats.total_count(), max_size);
3947 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3948 if (total_stats.entry_count(kind) > 0) {
3949 log.print_cr(" %s: total=%u(old=%u+new=%u)",
3950 aot_code_entry_kind_name[kind], total_stats.entry_count(kind), prev_stats.entry_count(kind), current_stats.entry_count(kind));
3951 if (kind == AOTCodeEntry::Code) {
3952 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3953 if (total_stats.nmethod_count(lvl) > 0) {
3954 log.print_cr(" Tier %d: total=%u(old=%u+new=%u)",
3955 lvl, total_stats.nmethod_count(lvl), prev_stats.nmethod_count(lvl), current_stats.nmethod_count(lvl));
3956 }
3957 }
3958 }
3959 }
3960 }
3961 log.print_cr("Total=%u(old=%u+new=%u)", total_stats.total_count(), prev_stats.total_count(), current_stats.total_count());
3962 }
3963 }
3964
3965 static void print_helper1(outputStream* st, const char* name, int count) {
3966 if (count > 0) {
3967 st->print(" %s=%d", name, count);
3968 }
3969 }
3970
3971 void AOTCodeCache::print_statistics_on(outputStream* st) {
3972 AOTCodeCache* cache = open_for_use();
3973 if (cache != nullptr) {
3974 ReadingMark rdmk;
3975 if (rdmk.failed()) {
3976 // Cache is closed, cannot touch anything.
3977 return;
3978 }
3979
3980 uint count = cache->_load_header->entries_count();
3981 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
3982 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
3983
3984 AOTCodeStats stats;
3985 for (uint i = 0; i < count; i++) {
3986 stats.collect_all_stats(&load_entries[i]);
3987 }
3988
3989 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3990 if (stats.entry_count(kind) > 0) {
3991 st->print(" %s:", aot_code_entry_kind_name[kind]);
3992 print_helper1(st, "total", stats.entry_count(kind));
3993 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
3994 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
3995 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
3996 st->cr();
3997 }
3998 if (kind == AOTCodeEntry::Code) {
3999 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4000 if (stats.nmethod_count(lvl) > 0) {
4001 st->print(" AOT Code T%d", lvl);
4002 print_helper1(st, "total", stats.nmethod_count(lvl));
4003 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4004 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4005 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4006 if (lvl == AOTCompLevel_count-1) {
4007 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4008 }
4009 st->cr();
4010 }
4011 }
4012 }
4013 }
4014 LogStreamHandle(Debug, aot, codecache, init) log;
4015 if (log.is_enabled()) {
4016 AOTCodeCache::print_unused_entries_on(&log);
4017 }
4018 LogStreamHandle(Trace, aot, codecache) aot_info;
4019 // need a lock to traverse the code cache
4020 if (aot_info.is_enabled()) {
4021 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4022 NMethodIterator iter(NMethodIterator::all);
4023 while (iter.next()) {
4024 nmethod* nm = iter.method();
4025 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4026 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4027 (nm->method()->is_shared() ? 'S' : ' '),
4028 (nm->is_aot() ? 'A' : ' '),
4029 (nm->preloaded() ? 'P' : ' '),
4030 nm->comp_level());
4031 print_helper(nm, &aot_info);
4032 aot_info.print(": ");
4033 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4034 LogStreamHandle(Trace, aot, codecache) aot_debug;
4035 if (aot_debug.is_enabled()) {
4036 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4037 if (mtd != nullptr) {
4038 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4039 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4040 });
4041 }
4042 }
4043 }
4044 }
4045 }
4046 } else {
4047 st->print_cr("failed to map code cache");
4048 }
4049 }
4050
4051 void AOTCodeEntry::print(outputStream* st) const {
4052 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4053 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4054 (_not_entrant? "not_entrant" : "entrant"),
4055 (_loaded ? ", loaded" : ""),
4056 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4057 (_for_preload ? ", for_preload" : ""));
4058 }
4059
4060 void AOTCodeCache::print_on(outputStream* st) {
4061 AOTCodeCache* cache = open_for_use();
4062 if (cache != nullptr) {
4063 ReadingMark rdmk;
4064 if (rdmk.failed()) {
4065 // Cache is closed, cannot touch anything.
4066 return;
4067 }
4068
4069 uint count = cache->_load_header->entries_count();
4070 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
4071 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4072
4073 for (uint i = 0; i < count; i++) {
4074 int index = search_entries[2*i + 1];
4075 AOTCodeEntry* entry = &(load_entries[index]);
4076
4077 uint entry_position = entry->offset();
4078 uint name_offset = entry->name_offset() + entry_position;
4079 const char* saved_name = cache->addr(name_offset);
4080
4081 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u L%u offset:%u size=%u '%s' %s%s%s%s",
4082 i, index, entry->kind(), entry->id(), entry->comp_level(), entry->offset(),
4083 entry->size(), saved_name,
4084 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4085 entry->for_preload() ? " for_preload" : "",
4086 entry->is_loaded() ? " loaded" : "",
4087 entry->not_entrant() ? " not_entrant" : "");
4088
4089 st->print_raw(" ");
4090 AOTCodeReader reader(cache, entry, nullptr);
4091 reader.print_on(st);
4092 }
4093 } else {
4094 st->print_cr("failed to map code cache");
4095 }
4096 }
4097
4098 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4099 LogStreamHandle(Info, aot, codecache, init) info;
4100 if (info.is_enabled()) {
4101 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4102 if (entry->is_code() && !entry->is_loaded()) {
4103 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4104 if (mtd != nullptr) {
4105 if (mtd->has_holder()) {
4106 if (mtd->holder()->method_holder()->is_initialized()) {
4107 ResourceMark rm;
4108 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4109 if ((uint)ctd->level() == entry->comp_level()) {
4110 if (ctd->init_deps_left() == 0) {
4111 nmethod* nm = mtd->holder()->code();
4112 if (nm == nullptr) {
4113 if (mtd->holder()->queued_for_compilation()) {
4114 return; // scheduled for compilation
4115 }
4116 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4117 return; // already online compiled and superseded by a more optimal method
4118 }
4119 info.print("AOT Code Cache entry not loaded: ");
4120 ctd->print_on(&info);
4121 info.cr();
4122 }
4123 }
4124 });
4125 } else {
4126 // not yet initialized
4127 }
4128 } else {
4129 info.print("AOT Code Cache entry doesn't have a holder: ");
4130 mtd->print_on(&info);
4131 info.cr();
4132 }
4133 }
4134 }
4135 });
4136 }
4137 }
4138
4139 void AOTCodeReader::print_on(outputStream* st) {
4140 uint entry_position = _entry->offset();
4141 set_read_position(entry_position);
4142
4143 // Read name
4144 uint name_offset = entry_position + _entry->name_offset();
4145 uint name_size = _entry->name_size(); // Includes '/0'
4146 const char* name = addr(name_offset);
4147
4148 st->print_cr(" name: %s", name);
4149 }
4150
|