1 /*
2 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCacheAccess.hpp"
26 #include "cds/cds_globals.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/heapShared.hpp"
29 #include "cds/metaspaceShared.hpp"
30 #include "classfile/javaAssertions.hpp"
31 #include "code/aotCodeCache.hpp"
32 #include "code/codeCache.hpp"
33 #include "gc/shared/gcConfig.hpp"
34 #include "logging/logStream.hpp"
35 #include "memory/memoryReserver.hpp"
36 #include "runtime/flags/flagSetting.hpp"
37 #include "runtime/globals_extension.hpp"
38 #include "runtime/java.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "runtime/os.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #ifdef COMPILER2
44 #include "opto/runtime.hpp"
45 #endif
46 #if INCLUDE_G1GC
47 #include "gc/g1/g1BarrierSetRuntime.hpp"
48 #endif
49 #if INCLUDE_ZGC
50 #include "gc/z/zBarrierSetRuntime.hpp"
51 #endif
52
53 #include <sys/stat.h>
54 #include <errno.h>
55
56 static void report_load_failure() {
57 if (AbortVMOnAOTCodeFailure) {
58 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
59 }
60 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
61 AOTAdapterCaching = false;
62 }
63
64 static void report_store_failure() {
65 if (AbortVMOnAOTCodeFailure) {
66 tty->print_cr("Unable to create AOT Code Cache.");
67 vm_abort(false);
68 }
69 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
70 AOTAdapterCaching = false;
71 }
72
73 bool AOTCodeCache::is_dumping_adapters() {
74 return AOTAdapterCaching && is_on_for_dump();
75 }
76
77 bool AOTCodeCache::is_using_adapters() {
78 return AOTAdapterCaching && is_on_for_use();
79 }
80
81 static uint _max_aot_code_size = 0;
82 uint AOTCodeCache::max_aot_code_size() {
83 return _max_aot_code_size;
84 }
85
86 void AOTCodeCache::initialize() {
87 if (FLAG_IS_DEFAULT(AOTCache)) {
88 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
89 return; // AOTCache must be specified to dump and use AOT code
90 }
91
92 bool is_dumping = false;
93 bool is_using = false;
94 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
95 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
96 is_dumping = true;
97 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
98 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
99 is_using = true;
100 } else {
101 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
102 return; // nothing to do
103 }
104 if (!AOTAdapterCaching) {
105 return; // AOT code caching disabled on command line
106 }
107 _max_aot_code_size = AOTCodeMaxSize;
108 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
109 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
110 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
111 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
112 }
113 }
114 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
115 if (is_using && aot_code_size == 0) {
116 log_info(aot, codecache, init)("AOT Code Cache is empty");
117 return;
118 }
119 if (!open_cache(is_dumping, is_using)) {
120 if (is_using) {
121 report_load_failure();
122 } else {
123 report_store_failure();
124 }
125 return;
126 }
127 if (is_dumping) {
128 FLAG_SET_DEFAULT(ForceUnreachable, true);
129 }
130 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
131 }
132
133 void AOTCodeCache::init2() {
134 if (!is_on()) {
135 return;
136 }
137 if (!verify_vm_config()) {
138 close();
139 report_load_failure();
140 }
141 // initialize the table of external routines so we can save
142 // generated code blobs that reference them
143 init_extrs_table();
144 }
145
146 AOTCodeCache* AOTCodeCache::_cache = nullptr;
147
148 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
149 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
150 if (cache->failed()) {
151 delete cache;
152 _cache = nullptr;
153 return false;
154 }
155 _cache = cache;
156 return true;
157 }
158
159 void AOTCodeCache::close() {
160 if (is_on()) {
161 delete _cache; // Free memory
162 _cache = nullptr;
163 }
164 }
165
166 #define DATA_ALIGNMENT HeapWordSize
167
168 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
169 _load_header(nullptr),
170 _load_buffer(nullptr),
171 _store_buffer(nullptr),
172 _C_store_buffer(nullptr),
173 _write_position(0),
174 _load_size(0),
175 _store_size(0),
176 _for_use(is_using),
177 _for_dump(is_dumping),
178 _closing(false),
179 _failed(false),
180 _lookup_failed(false),
181 _table(nullptr),
182 _load_entries(nullptr),
183 _search_entries(nullptr),
184 _store_entries(nullptr),
185 _C_strings_buf(nullptr),
186 _store_entries_cnt(0)
187 {
188 // Read header at the begining of cache
189 if (_for_use) {
190 // Read cache
191 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
192 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
193 if (!rs.is_reserved()) {
194 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
195 set_failed();
196 return;
197 }
198 if (!AOTCacheAccess::map_aot_code_region(rs)) {
199 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
200 set_failed();
201 return;
202 }
203
204 _load_size = (uint)load_size;
205 _load_buffer = (char*)rs.base();
206 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
207 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
208
209 _load_header = (Header*)addr(0);
210 if (!_load_header->verify_config(_load_size)) {
211 set_failed();
212 return;
213 }
214 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
215 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
216 log_debug(aot, codecache, init)(" All Blobs: total=%u", _load_header->blobs_count());
217 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
218
219 // Read strings
220 load_strings();
221 }
222 if (_for_dump) {
223 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
224 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
225 // Entries allocated at the end of buffer in reverse (as on stack).
226 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
227 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
228 }
229 _table = new AOTCodeAddressTable();
230 }
231
232 void AOTCodeCache::init_extrs_table() {
233 AOTCodeAddressTable* table = addr_table();
234 if (table != nullptr) {
235 table->init_extrs();
236 }
237 }
238
239 void AOTCodeCache::init_shared_blobs_table() {
240 AOTCodeAddressTable* table = addr_table();
241 if (table != nullptr) {
242 table->init_shared_blobs();
243 }
244 }
245
246 AOTCodeCache::~AOTCodeCache() {
247 if (_closing) {
248 return; // Already closed
249 }
250 // Stop any further access to cache.
251 _closing = true;
252
253 MutexLocker ml(Compile_lock);
254 if (for_dump()) { // Finalize cache
255 finish_write();
256 }
257 _load_buffer = nullptr;
258 if (_C_store_buffer != nullptr) {
259 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
260 _C_store_buffer = nullptr;
261 _store_buffer = nullptr;
262 }
263 if (_table != nullptr) {
264 delete _table;
265 _table = nullptr;
266 }
267 }
268
269 void AOTCodeCache::Config::record() {
270 _flags = 0;
271 #ifdef ASSERT
272 _flags |= debugVM;
273 #endif
274 if (UseCompressedOops) {
275 _flags |= compressedOops;
276 }
277 if (UseCompressedClassPointers) {
278 _flags |= compressedClassPointers;
279 }
280 if (UseTLAB) {
281 _flags |= useTLAB;
282 }
283 if (JavaAssertions::systemClassDefault()) {
284 _flags |= systemClassAssertions;
285 }
286 if (JavaAssertions::userClassDefault()) {
287 _flags |= userClassAssertions;
288 }
289 if (EnableContended) {
290 _flags |= enableContendedPadding;
358 }
359 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
360 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
361 return false;
362 }
363 return true;
364 }
365
366 bool AOTCodeCache::Header::verify_config(uint load_size) const {
367 if (_version != AOT_CODE_VERSION) {
368 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
369 return false;
370 }
371 if (load_size < _cache_size) {
372 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
373 return false;
374 }
375 return true;
376 }
377
378 AOTCodeCache* AOTCodeCache::open_for_use() {
379 if (AOTCodeCache::is_on_for_use()) {
380 return AOTCodeCache::cache();
381 }
382 return nullptr;
383 }
384
385 AOTCodeCache* AOTCodeCache::open_for_dump() {
386 if (AOTCodeCache::is_on_for_dump()) {
387 AOTCodeCache* cache = AOTCodeCache::cache();
388 cache->clear_lookup_failed(); // Reset bit
389 return cache;
390 }
391 return nullptr;
392 }
393
394 void copy_bytes(const char* from, address to, uint size) {
395 assert(size > 0, "sanity");
396 bool by_words = true;
397 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
398 // Use wordwise copies if possible:
399 Copy::disjoint_words((HeapWord*)from,
400 (HeapWord*)to,
401 ((size_t)size + HeapWordSize-1) / HeapWordSize);
402 } else {
403 by_words = false;
404 Copy::conjoint_jbytes(from, to, (size_t)size);
405 }
406 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
407 }
408
409 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
410 _cache = cache;
411 _entry = entry;
412 _load_buffer = cache->cache_buffer();
413 _read_position = 0;
414 _lookup_failed = false;
415 }
416
417 void AOTCodeReader::set_read_position(uint pos) {
418 if (pos == _read_position) {
419 return;
420 }
421 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
422 _read_position = pos;
423 }
424
425 bool AOTCodeCache::set_write_position(uint pos) {
426 if (pos == _write_position) {
427 return true;
428 }
429 if (_store_size < _write_position) {
430 _store_size = _write_position; // Adjust during write
431 }
432 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
433 _write_position = pos;
439 bool AOTCodeCache::align_write() {
440 // We are not executing code from cache - we copy it by bytes first.
441 // No need for big alignment (or at all).
442 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
443 if (padding == DATA_ALIGNMENT) {
444 return true;
445 }
446 uint n = write_bytes((const void*)&align_buffer, padding);
447 if (n != padding) {
448 return false;
449 }
450 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
451 return true;
452 }
453
454 // Check to see if AOT code cache has required space to store "nbytes" of data
455 address AOTCodeCache::reserve_bytes(uint nbytes) {
456 assert(for_dump(), "Code Cache file is not created");
457 uint new_position = _write_position + nbytes;
458 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
459 log_warning(aot,codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
460 nbytes, _write_position);
461 set_failed();
462 report_store_failure();
463 return nullptr;
464 }
465 address buffer = (address)(_store_buffer + _write_position);
466 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
467 _write_position += nbytes;
468 if (_store_size < _write_position) {
469 _store_size = _write_position;
470 }
471 return buffer;
472 }
473
474 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
475 assert(for_dump(), "Code Cache file is not created");
476 if (nbytes == 0) {
477 return 0;
478 }
479 uint new_position = _write_position + nbytes;
480 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
481 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
482 nbytes, _write_position);
483 set_failed();
484 report_store_failure();
485 return 0;
486 }
487 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
488 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
489 _write_position += nbytes;
490 if (_store_size < _write_position) {
491 _store_size = _write_position;
492 }
493 return nbytes;
494 }
495
496 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
497 return (void*)(cache->add_entry());
498 }
499
500 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
501 if (entry->kind() == kind) {
502 assert(entry->id() == id, "sanity");
503 return true; // Found
504 }
505 return false;
506 }
507
508 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
509 assert(_for_use, "sanity");
510 uint count = _load_header->entries_count();
511 if (_load_entries == nullptr) {
512 // Read it
513 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
514 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
515 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
516 }
517 // Binary search
518 int l = 0;
519 int h = count - 1;
520 while (l <= h) {
521 int mid = (l + h) >> 1;
522 int ix = mid * 2;
523 uint is = _search_entries[ix];
524 if (is == id) {
525 int index = _search_entries[ix + 1];
526 AOTCodeEntry* entry = &(_load_entries[index]);
527 if (check_entry(kind, id, entry)) {
528 return entry; // Found
529 }
530 break; // Not found match
531 } else if (is < id) {
532 l = mid + 1;
533 } else {
534 h = mid - 1;
535 }
536 }
537 return nullptr;
538 }
539
540 extern "C" {
541 static int uint_cmp(const void *i, const void *j) {
542 uint a = *(uint *)i;
543 uint b = *(uint *)j;
544 return a > b ? 1 : a < b ? -1 : 0;
545 }
546 }
547
548 bool AOTCodeCache::finish_write() {
549 if (!align_write()) {
550 return false;
551 }
552 uint strings_offset = _write_position;
553 int strings_count = store_strings();
554 if (strings_count < 0) {
555 return false;
556 }
557 if (!align_write()) {
558 return false;
559 }
560 uint strings_size = _write_position - strings_offset;
561
562 uint entries_count = 0; // Number of entrant (useful) code entries
563 uint entries_offset = _write_position;
564
565 uint store_count = _store_entries_cnt;
566 if (store_count > 0) {
567 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
568 uint code_count = store_count;
569 uint search_count = code_count * 2;
570 uint search_size = search_count * sizeof(uint);
571 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
572 // _write_position includes size of code and strings
573 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
574 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
575 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
576
577 // Create ordered search table for entries [id, index];
578 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
579 // Allocate in AOT Cache buffer
580 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
581 char* start = align_up(buffer, DATA_ALIGNMENT);
582 char* current = start + header_size; // Skip header
583
584 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
585 uint adapters_count = 0;
586 uint blobs_count = 0;
587 uint max_size = 0;
588 // AOTCodeEntry entries were allocated in reverse in store buffer.
589 // Process them in reverse order to cache first code first.
590 for (int i = store_count - 1; i >= 0; i--) {
591 entries_address[i].set_next(nullptr); // clear pointers before storing data
592 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
593 if (size > max_size) {
594 max_size = size;
595 }
596 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
597 entries_address[i].set_offset(current - start); // New offset
598 current += size;
599 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
600 if (n != sizeof(AOTCodeEntry)) {
601 FREE_C_HEAP_ARRAY(uint, search);
602 return false;
603 }
604 search[entries_count*2 + 0] = entries_address[i].id();
605 search[entries_count*2 + 1] = entries_count;
606 entries_count++;
607 AOTCodeEntry::Kind kind = entries_address[i].kind();
608 if (kind == AOTCodeEntry::Adapter) {
609 adapters_count++;
610 } else if (kind == AOTCodeEntry::Blob) {
611 blobs_count++;
612 }
613 }
614 if (entries_count == 0) {
615 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
616 FREE_C_HEAP_ARRAY(uint, search);
617 return true; // Nothing to write
618 }
619 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
620 // Write strings
621 if (strings_count > 0) {
622 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
623 strings_offset = (current - start); // New offset
624 current += strings_size;
625 }
626
627 uint new_entries_offset = (current - start); // New offset
628 // Sort and store search table
629 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
630 search_size = 2 * entries_count * sizeof(uint);
631 copy_bytes((const char*)search, (address)current, search_size);
632 FREE_C_HEAP_ARRAY(uint, search);
633 current += search_size;
634
635 // Write entries
636 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
637 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
638 current += entries_size;
639 uint size = (current - start);
640 assert(size <= total_size, "%d > %d", size , total_size);
641
642 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
643 log_debug(aot, codecache, exit)(" All Blobs: total=%u", blobs_count);
644 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
645
646 // Finalize header
647 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
648 header->init(size, (uint)strings_count, strings_offset,
649 entries_count, new_entries_offset,
650 adapters_count, blobs_count);
651
652 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
653 }
654 return true;
655 }
656
657 //------------------Store/Load AOT code ----------------------
658
659 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
660 AOTCodeCache* cache = open_for_dump();
661 if (cache == nullptr) {
662 return false;
663 }
664 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
665
666 if ((entry_kind == AOTCodeEntry::Adapter) && !AOTAdapterCaching) {
667 return false;
668 }
669 log_debug(aot, codecache, stubs)("Writing blob '%s' to AOT Code Cache", name);
670
671 #ifdef ASSERT
672 LogStreamHandle(Trace, aot, codecache, stubs) log;
673 if (log.is_enabled()) {
674 FlagSetting fs(PrintRelocations, true);
675 blob.print_on(&log);
676 }
677 #endif
678 // we need to take a lock to prevent race between compiler threads generating AOT code
679 // and the main thread generating adapter
680 MutexLocker ml(Compile_lock);
681 if (!cache->align_write()) {
682 return false;
683 }
684 uint entry_position = cache->_write_position;
685
686 // Write name
730 n = cache->write_bytes(&off, sizeof(uint32_t));
731 if (n != sizeof(uint32_t)) {
732 return false;
733 }
734 }
735 uint entry_size = cache->_write_position - entry_position;
736 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, id,
737 entry_position, entry_size, name_offset, name_size,
738 blob_offset, has_oop_maps, blob.content_begin());
739 log_debug(aot, codecache, stubs)("Wrote code blob '%s(id=%d)' to AOT Code Cache", name, id);
740 return true;
741 }
742
743 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
744 AOTCodeCache* cache = open_for_use();
745 if (cache == nullptr) {
746 return nullptr;
747 }
748 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
749
750 if ((entry_kind == AOTCodeEntry::Adapter) && !AOTAdapterCaching) {
751 return nullptr;
752 }
753 log_debug(aot, codecache, stubs)("Reading blob '%s' from AOT Code Cache", name);
754
755 AOTCodeEntry* entry = cache->find_entry(entry_kind, id);
756 if (entry == nullptr) {
757 return nullptr;
758 }
759 AOTCodeReader reader(cache, entry);
760 return reader.compile_code_blob(name, entry_offset_count, entry_offsets);
761 }
762
763 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
764 uint entry_position = _entry->offset();
765
766 // Read name
767 uint name_offset = entry_position + _entry->name_offset();
768 uint name_size = _entry->name_size(); // Includes '/0'
769 const char* stored_name = addr(name_offset);
770
771 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
772 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
773 stored_name, name);
774 ((AOTCodeCache*)_cache)->set_failed();
775 report_load_failure();
776 return nullptr;
777 }
778
779 // Read archived code blob
780 uint offset = entry_position + _entry->blob_offset();
781 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
782 offset += archived_blob->size();
783
784 address reloc_data = (address)addr(offset);
785 offset += archived_blob->relocation_size();
786 set_read_position(offset);
787
788 ImmutableOopMapSet* oop_maps = nullptr;
789 if (_entry->has_oop_maps()) {
790 oop_maps = read_oop_map_set();
791 }
792
793 CodeBlob* code_blob = CodeBlob::create(archived_blob, stored_name, reloc_data, oop_maps);
794 if (code_blob == nullptr) { // no space left in CodeCache
795 return nullptr;
796 }
806 for (int i = 0; i < stored_count; i++) {
807 uint32_t off = *(uint32_t*)addr(offset);
808 offset += sizeof(uint32_t);
809 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
810 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
811 stored_name, entry_name, _entry->id(), off);
812 entry_offsets[i] = off;
813 }
814
815 log_debug(aot, codecache, stubs)("Read blob '%s' from AOT Code Cache", name);
816 #ifdef ASSERT
817 LogStreamHandle(Trace, aot, codecache, stubs) log;
818 if (log.is_enabled()) {
819 FlagSetting fs(PrintRelocations, true);
820 code_blob->print_on(&log);
821 }
822 #endif
823 return code_blob;
824 }
825
826 // ------------ process code and data --------------
827
828 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
829 GrowableArray<uint> reloc_data;
830 RelocIterator iter(&code_blob);
831 LogStreamHandle(Trace, aot, codecache, reloc) log;
832 while (iter.next()) {
833 int idx = reloc_data.append(0); // default value
834 switch (iter.type()) {
835 case relocInfo::none:
836 break;
837 case relocInfo::runtime_call_type: {
838 // Record offset of runtime destination
839 CallRelocation* r = (CallRelocation*)iter.reloc();
840 address dest = r->destination();
841 if (dest == r->addr()) { // possible call via trampoline on Aarch64
842 dest = (address)-1; // do nothing in this case when loading this relocation
843 }
844 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
845 break;
846 }
847 case relocInfo::runtime_call_w_cp_type:
848 fatal("runtime_call_w_cp_type unimplemented");
849 break;
850 case relocInfo::external_word_type: {
851 // Record offset of runtime target
852 address target = ((external_word_Relocation*)iter.reloc())->target();
853 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob));
854 break;
855 }
856 case relocInfo::internal_word_type:
857 break;
858 case relocInfo::section_word_type:
859 break;
860 default:
861 fatal("relocation %d unimplemented", (int)iter.type());
862 break;
863 }
864 if (log.is_enabled()) {
865 iter.print_current_on(&log);
866 }
867 }
868
869 // Write additional relocation data: uint per relocation
870 // Write the count first
871 int count = reloc_data.length();
872 write_bytes(&count, sizeof(int));
873 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
874 iter != reloc_data.end(); ++iter) {
875 uint value = *iter;
876 int n = write_bytes(&value, sizeof(uint));
877 if (n != sizeof(uint)) {
878 return false;
879 }
880 }
881 return true;
882 }
883
884 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
885 LogStreamHandle(Trace, aot, reloc) log;
886 uint offset = read_position();
887 int count = *(int*)addr(offset);
888 offset += sizeof(int);
889 if (log.is_enabled()) {
890 log.print_cr("======== extra relocations count=%d", count);
891 }
892 uint* reloc_data = (uint*)addr(offset);
893 offset += (count * sizeof(uint));
894 set_read_position(offset);
895
896 RelocIterator iter(code_blob);
897 int j = 0;
898 while (iter.next()) {
899 switch (iter.type()) {
900 case relocInfo::none:
901 break;
902 case relocInfo::runtime_call_type: {
903 address dest = _cache->address_for_id(reloc_data[j]);
904 if (dest != (address)-1) {
905 ((CallRelocation*)iter.reloc())->set_destination(dest);
906 }
907 break;
908 }
909 case relocInfo::runtime_call_w_cp_type:
910 fatal("runtime_call_w_cp_type unimplemented");
911 break;
912 case relocInfo::external_word_type: {
913 address target = _cache->address_for_id(reloc_data[j]);
914 // Add external address to global table
915 int index = ExternalsRecorder::find_index(target);
916 // Update index in relocation
917 Relocation::add_jint(iter.data(), index);
918 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
919 assert(reloc->target() == target, "sanity");
920 reloc->set_value(target); // Patch address in the code
921 break;
922 }
923 case relocInfo::internal_word_type: {
924 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
925 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
926 break;
927 }
928 case relocInfo::section_word_type: {
929 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
930 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
931 break;
932 }
933 default:
934 fatal("relocation %d unimplemented", (int)iter.type());
935 break;
936 }
937 if (log.is_enabled()) {
938 iter.print_current_on(&log);
939 }
940 j++;
941 }
942 assert(j == count, "sanity");
943 }
944
945 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
946 ImmutableOopMapSet* oopmaps = cb.oop_maps();
947 int oopmaps_size = oopmaps->nr_of_bytes();
948 if (!write_bytes(&oopmaps_size, sizeof(int))) {
949 return false;
950 }
951 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
952 if (n != (uint)oopmaps->nr_of_bytes()) {
953 return false;
954 }
955 return true;
956 }
957
958 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
959 uint offset = read_position();
960 int size = *(int *)addr(offset);
961 offset += sizeof(int);
962 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
963 offset += size;
964 set_read_position(offset);
965 return oopmaps;
966 }
967
968 //======================= AOTCodeAddressTable ===============
969
970 // address table ids for generated routines, external addresses and C
971 // string addresses are partitioned into positive integer ranges
972 // defined by the following positive base and max values
973 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
974 // [_blobs_base, _blobs_base + _blobs_max -1],
975 // ...
976 // [_c_str_base, _c_str_base + _c_str_max -1],
977 #define _extrs_max 10
978 #define _blobs_max 10
979 #define _all_max 20
980
981 #define _extrs_base 0
982 #define _blobs_base (_extrs_base + _extrs_max)
983 #define _blobs_end (_blobs_base + _blobs_max)
984
985 #if (_blobs_end > _all_max)
986 #error AOTCodeAddress table ranges need adjusting
987 #endif
988
989 #define SET_ADDRESS(type, addr) \
990 { \
991 type##_addr[type##_length++] = (address) (addr); \
992 assert(type##_length <= type##_max, "increase size"); \
993 }
994
995 static bool initializing_extrs = false;
996
997 void AOTCodeAddressTable::init_extrs() {
998 if (_extrs_complete || initializing_extrs) return; // Done already
999 initializing_extrs = true;
1000 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1001
1002 _extrs_length = 0;
1003
1004 // Recored addresses of VM runtime methods
1005 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1006 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1007 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1008 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1009 #if INCLUDE_G1GC
1010 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
1011 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1012 #endif
1013 #if INCLUDE_ZGC
1014 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1015 #if defined(AMD64)
1016 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1017 #endif
1018 #endif
1019 #ifdef COMPILER2
1020 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1021 #endif
1022 #ifndef ZERO
1023 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1024 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1025 #endif
1026 #endif // ZERO
1027
1028 _extrs_complete = true;
1029 log_debug(aot, codecache, init)("External addresses recorded");
1030 }
1031
1032 static bool initializing_shared_blobs = false;
1033
1034 void AOTCodeAddressTable::init_shared_blobs() {
1035 if (_complete || initializing_shared_blobs) return; // Done already
1036 initializing_shared_blobs = true;
1037 _blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1038
1039 _blobs_length = 0; // for shared blobs
1040
1041 // Recored addresses of generated code blobs
1042 SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub());
1043 SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub());
1044
1045 _shared_blobs_complete = true;
1046 log_debug(aot, codecache, init)("Early shared blobs recorded");
1047 _complete = true;
1048 }
1049
1050 #undef SET_ADDRESS
1051
1052 AOTCodeAddressTable::~AOTCodeAddressTable() {
1053 if (_extrs_addr != nullptr) {
1054 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1055 }
1056 if (_blobs_addr != nullptr) {
1057 FREE_C_HEAP_ARRAY(address, _blobs_addr);
1058 }
1059 }
1060
1061 #ifdef PRODUCT
1062 #define MAX_STR_COUNT 200
1063 #else
1064 #define MAX_STR_COUNT 500
1065 #endif
1066 #define _c_str_max MAX_STR_COUNT
1067 #define _c_str_base _all_max
1068
1069 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1070 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1071 static int _C_strings_count = 0;
1072 static int _C_strings_s[MAX_STR_COUNT] = {0};
1073 static int _C_strings_id[MAX_STR_COUNT] = {0};
1074 static int _C_strings_used = 0;
1075
1076 void AOTCodeCache::load_strings() {
1077 uint strings_count = _load_header->strings_count();
1078 if (strings_count == 0) {
1079 return;
1080 }
1081 uint strings_offset = _load_header->strings_offset();
1082 uint* string_lengths = (uint*)addr(strings_offset);
1083 strings_offset += (strings_count * sizeof(uint));
1084 uint strings_size = _load_header->entries_offset() - strings_offset;
1085 // We have to keep cached strings longer than _cache buffer
1203
1204 address AOTCodeAddressTable::address_for_id(int idx) {
1205 if (!_extrs_complete) {
1206 fatal("AOT Code Cache VM runtime addresses table is not complete");
1207 }
1208 if (idx == -1) {
1209 return (address)-1;
1210 }
1211 uint id = (uint)idx;
1212 // special case for symbols based relative to os::init
1213 if (id > (_c_str_base + _c_str_max)) {
1214 return (address)os::init + idx;
1215 }
1216 if (idx < 0) {
1217 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1218 }
1219 // no need to compare unsigned id against 0
1220 if (/* id >= _extrs_base && */ id < _extrs_length) {
1221 return _extrs_addr[id - _extrs_base];
1222 }
1223 if (id >= _blobs_base && id < _blobs_base + _blobs_length) {
1224 return _blobs_addr[id - _blobs_base];
1225 }
1226 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1227 return address_for_C_string(id - _c_str_base);
1228 }
1229 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1230 return nullptr;
1231 }
1232
1233 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1234 if (!_extrs_complete) {
1235 fatal("AOT Code Cache VM runtime addresses table is not complete");
1236 }
1237 int id = -1;
1238 if (addr == (address)-1) { // Static call stub has jump to itself
1239 return id;
1240 }
1241 // Seach for C string
1242 id = id_for_C_string(addr);
1243 if (id >= 0) {
1244 return id + _c_str_base;
1245 }
1246 if (StubRoutines::contains(addr)) {
1247 // Search in stubs
1248 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1249 if (desc == nullptr) {
1250 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1251 }
1252 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1253 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1254 } else {
1255 CodeBlob* cb = CodeCache::find_blob(addr);
1256 if (cb != nullptr) {
1257 // Search in code blobs
1258 int id_base = _blobs_base;
1259 id = search_address(addr, _blobs_addr, _blobs_length);
1260 if (id < 0) {
1261 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1262 } else {
1263 return id_base + id;
1264 }
1265 } else {
1266 // Search in runtime functions
1267 id = search_address(addr, _extrs_addr, _extrs_length);
1268 if (id < 0) {
1269 ResourceMark rm;
1270 const int buflen = 1024;
1271 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1272 int offset = 0;
1273 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1274 if (offset > 0) {
1275 // Could be address of C string
1276 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1277 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1278 p2i(addr), dist, (const char*)addr);
1279 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1280 return dist;
1281 }
1282 reloc.print_current_on(tty);
1283 code_blob->print_on(tty);
1284 code_blob->print_code_on(tty);
1285 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1286 } else {
1287 reloc.print_current_on(tty);
1288 code_blob->print_on(tty);
1289 code_blob->print_code_on(tty);
1290 os::find(addr, tty);
1291 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1292 }
1293 } else {
1294 return _extrs_base + id;
1295 }
1296 }
1297 }
1298 return id;
1299 }
1300
1301 void AOTCodeCache::print_on(outputStream* st) {
1302 AOTCodeCache* cache = open_for_use();
1303 if (cache != nullptr) {
1304 uint count = cache->_load_header->entries_count();
1305 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
1306 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1307
1308 for (uint i = 0; i < count; i++) {
1309 // Use search_entries[] to order ouput
1310 int index = search_entries[2*i + 1];
1311 AOTCodeEntry* entry = &(load_entries[index]);
1312
1313 uint entry_position = entry->offset();
1314 uint name_offset = entry->name_offset() + entry_position;
1315 const char* saved_name = cache->addr(name_offset);
1316
1317 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u size=%u '%s'",
1318 i, index, entry->kind(), entry->id(), entry->size(), saved_name);
1319 }
1320 } else {
1321 st->print_cr("failed to map code cache");
1322 }
1323 }
1324
|
1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/codeBuffer.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/heapShared.hpp"
30 #include "cds/metaspaceShared.hpp"
31 #include "ci/ciConstant.hpp"
32 #include "ci/ciEnv.hpp"
33 #include "ci/ciField.hpp"
34 #include "ci/ciMethod.hpp"
35 #include "ci/ciMethodData.hpp"
36 #include "ci/ciObject.hpp"
37 #include "ci/ciUtilities.inline.hpp"
38 #include "classfile/javaAssertions.hpp"
39 #include "classfile/stringTable.hpp"
40 #include "classfile/symbolTable.hpp"
41 #include "classfile/systemDictionary.hpp"
42 #include "classfile/vmClasses.hpp"
43 #include "classfile/vmIntrinsics.hpp"
44 #include "code/aotCodeCache.hpp"
45 #include "code/codeBlob.hpp"
46 #include "code/codeCache.hpp"
47 #include "code/oopRecorder.inline.hpp"
48 #include "compiler/abstractCompiler.hpp"
49 #include "compiler/compilationPolicy.hpp"
50 #include "compiler/compileBroker.hpp"
51 #include "compiler/compileTask.hpp"
52 #include "gc/g1/g1BarrierSetRuntime.hpp"
53 #include "gc/shared/gcConfig.hpp"
54 #include "logging/logStream.hpp"
55 #include "memory/memoryReserver.hpp"
56 #include "memory/universe.hpp"
57 #include "oops/klass.inline.hpp"
58 #include "oops/method.inline.hpp"
59 #include "oops/trainingData.hpp"
60 #include "prims/jvmtiThreadState.hpp"
61 #include "runtime/atomic.hpp"
62 #include "runtime/flags/flagSetting.hpp"
63 #include "runtime/globals_extension.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/java.hpp"
66 #include "runtime/jniHandles.inline.hpp"
67 #include "runtime/mutexLocker.hpp"
68 #include "runtime/os.inline.hpp"
69 #include "runtime/sharedRuntime.hpp"
70 #include "runtime/stubCodeGenerator.hpp"
71 #include "runtime/stubRoutines.hpp"
72 #include "runtime/timerTrace.hpp"
73 #include "runtime/threadIdentifier.hpp"
74 #include "utilities/ostream.hpp"
75 #include "utilities/spinYield.hpp"
76 #ifdef COMPILER1
77 #include "c1/c1_Runtime1.hpp"
78 #include "c1/c1_LIRAssembler.hpp"
79 #include "gc/shared/c1/barrierSetC1.hpp"
80 #include "gc/g1/c1/g1BarrierSetC1.hpp"
81 #if INCLUDE_SHENANDOAHGC
82 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
83 #endif
84 #include "gc/z/c1/zBarrierSetC1.hpp"
85 #endif
86 #ifdef COMPILER2
87 #include "opto/runtime.hpp"
88 #endif
89 #if INCLUDE_JVMCI
90 #include "jvmci/jvmci.hpp"
91 #endif
92 #if INCLUDE_G1GC
93 #include "gc/g1/g1BarrierSetRuntime.hpp"
94 #endif
95 #if INCLUDE_SHENANDOAHGC
96 #include "gc/shenandoah/shenandoahRuntime.hpp"
97 #endif
98 #if INCLUDE_ZGC
99 #include "gc/z/zBarrierSetRuntime.hpp"
100 #endif
101
102 #include <sys/stat.h>
103 #include <errno.h>
104
105 const char* aot_code_entry_kind_name[] = {
106 #define DECL_KIND_STRING(kind) XSTR(kind),
107 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
108 #undef DECL_KIND_STRING
109 };
110
111 static elapsedTimer _t_totalLoad;
112 static elapsedTimer _t_totalRegister;
113 static elapsedTimer _t_totalFind;
114 static elapsedTimer _t_totalStore;
115
116 static bool enable_timers() {
117 return CITime || log_is_enabled(Info, init);
118 }
119
120 static void report_load_failure() {
121 if (AbortVMOnAOTCodeFailure) {
122 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
123 }
124 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
125 AOTCodeCache::disable_caching();
126 }
127
128 static void report_store_failure() {
129 if (AbortVMOnAOTCodeFailure) {
130 tty->print_cr("Unable to create AOT Code Cache.");
131 vm_abort(false);
132 }
133 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
134 AOTCodeCache::disable_caching();
135 }
136
137 // The sequence of AOT code caching flags and parametters settings.
138 //
139 // 1. The initial AOT code caching flags setting is done
140 // during call to CDSConfig::check_vm_args_consistency().
141 //
142 // 2. The earliest AOT code state check done in compilationPolicy_init()
143 // where we set number of compiler threads for AOT assembly phase.
144 //
145 // 3. We determine presence of AOT code in AOT Cache in
146 // MetaspaceShared::open_static_archive() which is calles
147 // after compilationPolicy_init() but before codeCache_init().
148 //
149 // 4. AOTCodeCache::initialize() is called during universe_init()
150 // and does final AOT state and flags settings.
151 //
152 // 5. Finally AOTCodeCache::init2() is called after universe_init()
153 // when all GC settings are finalized.
154
155 // Next methods determine which action we do with AOT code depending
156 // on phase of AOT process: assembly or production.
157
158 bool AOTCodeCache::is_dumping_code() {
159 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
160 }
161 bool AOTCodeCache::is_dumping_stub() {
162 return AOTStubCaching && CDSConfig::is_dumping_final_static_archive();
163 }
164 bool AOTCodeCache::is_dumping_adapter() {
165 return AOTAdapterCaching && CDSConfig::is_dumping_final_static_archive();
166 }
167
168 bool AOTCodeCache::is_using_code() {
169 return AOTCodeCaching && CDSConfig::is_using_archive();
170 }
171 bool AOTCodeCache::is_using_stub() {
172 return AOTStubCaching && CDSConfig::is_using_archive();
173 }
174 bool AOTCodeCache::is_using_adapter() {
175 return AOTAdapterCaching && CDSConfig::is_using_archive();
176 }
177
178 void AOTCodeCache::enable_caching() {
179 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
180 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
181 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
182 }
183
184 void AOTCodeCache::disable_caching() {
185 FLAG_SET_ERGO(AOTCodeCaching, false);
186 FLAG_SET_ERGO(AOTStubCaching, false);
187 FLAG_SET_ERGO(AOTAdapterCaching, false);
188 }
189
190 bool AOTCodeCache::is_caching_enabled() {
191 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
192 }
193
194 static uint _max_aot_code_size = 0;
195 uint AOTCodeCache::max_aot_code_size() {
196 return _max_aot_code_size;
197 }
198
199 bool AOTCodeCache::is_C3_on() {
200 #if INCLUDE_JVMCI
201 if (UseJVMCICompiler) {
202 return (AOTCodeCaching) && UseC2asC3;
203 }
204 #endif
205 return false;
206 }
207
208 bool AOTCodeCache::is_code_load_thread_on() {
209 return UseCodeLoadThread && is_using_code();
210 }
211
212 bool AOTCodeCache::allow_const_field(ciConstant& value) {
213 return !is_on() || !is_dumping_code() // Restrict only when we generate cache
214 // Can not trust primitive too || !is_reference_type(value.basic_type())
215 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
216 ;
217 }
218
219 // It is called from MetaspaceShared::initialize_shared_spaces()
220 // which is called from universe_init().
221 // At this point all AOT class linking seetings are finilized
222 // and AOT cache is open so we can map AOT code region.
223 void AOTCodeCache::initialize() {
224 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
225 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
226 AOTAdapterCaching = false;
227 return;
228 #else
229 if (FLAG_IS_DEFAULT(AOTCache)) {
230 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
231 disable_caching();
232 return; // AOTCache must be specified to dump and use AOT code
233 }
234 bool is_dumping = false;
235 bool is_using = false;
236 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
237 enable_caching();
238 is_dumping = is_caching_enabled();
239 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
240 enable_caching();
241 is_using = is_caching_enabled();
242 } else {
243 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
244 disable_caching();
245 return; // nothing to do
246 }
247 if (!(is_dumping || is_using)) {
248 disable_caching();
249 return; // AOT code caching disabled on command line
250 }
251 if (AOTCodeCaching) {
252 if (FLAG_IS_DEFAULT(ClassInitBarrierMode)) {
253 FLAG_SET_ERGO(ClassInitBarrierMode, 1);
254 }
255 } else if (ClassInitBarrierMode > 0) {
256 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOTCodeCaching is false.");
257 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
258 }
259 // Reserve AOT Cache region when we dumping AOT code.
260 _max_aot_code_size = AOTCodeMaxSize;
261 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
262 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
263 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
264 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
265 }
266 }
267 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
268 if (is_using && aot_code_size == 0) {
269 log_info(aot, codecache, init)("AOT Code Cache is empty");
270 disable_caching();
271 return;
272 }
273 if (!open_cache(is_dumping, is_using)) {
274 if (is_using) {
275 report_load_failure();
276 } else {
277 report_store_failure();
278 }
279 return;
280 }
281 if (is_dumping) {
282 FLAG_SET_DEFAULT(FoldStableValues, false);
283 FLAG_SET_DEFAULT(ForceUnreachable, true);
284 }
285 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
286 #endif // defined(AMD64) || defined(AARCH64)
287 }
288
289 // It is called after universe_init() when all GC settings are finalized.
290 void AOTCodeCache::init2() {
291 if (!is_on()) {
292 return;
293 }
294 // After Universe initialized
295 BarrierSet* bs = BarrierSet::barrier_set();
296 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
297 address byte_map_base = ci_card_table_address_as<address>();
298 if (is_on_for_dump() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
299 // Bail out since we can't encode card table base address with relocation
300 log_warning(aot, codecache, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
301 close();
302 report_load_failure();
303 return;
304 }
305 }
306 if (!verify_vm_config()) {
307 close();
308 report_load_failure();
309 return;
310 }
311
312 // initialize aot runtime constants as appropriate to this runtime
313 AOTRuntimeConstants::initialize_from_runtime();
314 // initialize the table of external routines so we can save
315 // generated code blobs that reference them
316 init_extrs_table();
317 // initialize the table of initial stubs so we can save
318 // generated code blobs that reference them
319 init_early_stubs_table();
320 }
321
322 AOTCodeCache* AOTCodeCache::_cache = nullptr;
323
324 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
325 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
326 if (cache->failed()) {
327 delete cache;
328 _cache = nullptr;
329 return false;
330 }
331 _cache = cache;
332 return true;
333 }
334
335 static void print_helper(nmethod* nm, outputStream* st) {
336 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
337 if (e->method() == nm->method()) {
338 ResourceMark rm;
339 stringStream ss;
340 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
341 if (e->decompile() > 0) {
342 ss.print("+D%d", e->decompile());
343 }
344 ss.print("[%s%s%s]",
345 (e->is_loaded() ? "L" : ""),
346 (e->load_fail() ? "F" : ""),
347 (e->not_entrant() ? "I" : ""));
348 ss.print("#%d", e->comp_id());
349
350 st->print(" %s", ss.freeze());
351 }
352 });
353 }
354
355 void AOTCodeCache::close() {
356 if (is_on()) {
357 if (AOTCodeCache::is_on_for_use()) {
358 LogStreamHandle(Info, init) log;
359 if (log.is_enabled()) {
360 log.print_cr("AOT Code Cache statistics (when closed): ");
361 AOTCodeCache::print_statistics_on(&log);
362 log.cr();
363 AOTCodeCache::print_timers_on(&log);
364
365 LogStreamHandle(Info, aot, codecache, init) log1;
366 if (log1.is_enabled()) {
367 AOTCodeCache::print_unused_entries_on(&log1);
368 }
369
370 LogStreamHandle(Info, aot, codecache) aot_info;
371 // need a lock to traverse the code cache
372 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
373 if (aot_info.is_enabled()) {
374 NMethodIterator iter(NMethodIterator::all);
375 while (iter.next()) {
376 nmethod* nm = iter.method();
377 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
378 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
379 (nm->method()->is_shared() ? 'S' : ' '),
380 (nm->is_aot() ? 'A' : ' '),
381 (nm->preloaded() ? 'P' : ' '),
382 nm->comp_level());
383 print_helper(nm, &aot_info);
384 aot_info.print(": ");
385 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
386
387 LogStreamHandle(Debug, aot, codecache) aot_debug;
388 if (aot_debug.is_enabled()) {
389 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
390 if (mtd != nullptr) {
391 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
392 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
393 });
394 }
395 }
396 }
397 }
398 }
399 }
400 }
401 delete _cache; // Free memory
402 _cache = nullptr;
403 }
404 }
405
406 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
407 public:
408 uint _aot_code_size;
409 char* _aot_code_data;
410
411 void set_aot_code_data(uint size, char* aot_data) {
412 _aot_code_size = size;
413 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
414 }
415
416 static CachedCodeDirectory* create();
417 };
418
419 // Storing AOT code in the cached code region of AOT Cache:
420 //
421 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
422 // E.g., you can build a hashtable to record what methods have been archived.
423 //
424 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
425 // allocated using AOTCacheAccess::allocate_aot_code_region().
426 //
427 // [3] CachedCodeDirectory must be the very first allocation.
428 //
429 // [4] Two kinds of pointer can be stored:
430 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
431 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
432 // (It's OK to point to an interior location within this buffer).
433 // Such pointers must be stored using AOTCacheAccess::set_pointer()
434 //
435 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
436 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
437 // (e.g., to account for the runtime location of the CodeCache).
438 //
439 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
440 static CachedCodeDirectory* _aot_code_directory = nullptr;
441
442 CachedCodeDirectory* CachedCodeDirectory::create() {
443 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
444 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
445 dir->dumptime_init_internal();
446 return dir;
447 }
448
449 #define DATA_ALIGNMENT HeapWordSize
450
451 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
452 _load_header(nullptr),
453 _load_buffer(nullptr),
454 _store_buffer(nullptr),
455 _C_store_buffer(nullptr),
456 _write_position(0),
457 _load_size(0),
458 _store_size(0),
459 _for_use(is_using),
460 _for_dump(is_dumping),
461 _closing(false),
462 _failed(false),
463 _lookup_failed(false),
464 _for_preload(false),
465 _gen_preload_code(false),
466 _has_clinit_barriers(false),
467 _table(nullptr),
468 _load_entries(nullptr),
469 _search_entries(nullptr),
470 _store_entries(nullptr),
471 _C_strings_buf(nullptr),
472 _store_entries_cnt(0),
473 _compile_id(0),
474 _comp_level(0)
475 {
476 _use_meta_ptrs = UseSharedSpaces ? UseMetadataPointers : false;
477
478 // Read header at the begining of cache
479 if (_for_use) {
480 // Read cache
481 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
482 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
483 if (!rs.is_reserved()) {
484 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
485 set_failed();
486 return;
487 }
488 if (!AOTCacheAccess::map_aot_code_region(rs)) {
489 log_warning(aot, codecache, init)("Failed to read/mmap cached code region in AOT Cache");
490 set_failed();
491 return;
492 }
493 _aot_code_directory = (CachedCodeDirectory*)rs.base();
494 _aot_code_directory->runtime_init_internal();
495
496 _load_size = _aot_code_directory->_aot_code_size;
497 _load_buffer = _aot_code_directory->_aot_code_data;
498 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
499 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
500
501 _load_header = (AOTCodeCache::Header*)addr(0);
502 if (!_load_header->verify_config(_load_size)) {
503 set_failed();
504 return;
505 }
506 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
507 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
508 log_debug(aot, codecache, init)(" Blobs: total=%u", _load_header->blobs_count());
509 log_debug(aot, codecache, init)(" Stubs: total=%u", _load_header->stubs_count());
510 log_debug(aot, codecache, init)(" Nmethods: total=%u", _load_header->nmethods_count());
511 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
512 if (_load_header->has_meta_ptrs()) {
513 assert(UseSharedSpaces, "should be verified already");
514 _use_meta_ptrs = true; // Regardless UseMetadataPointers
515 UseMetadataPointers = true;
516 }
517 // Read strings
518 load_strings();
519 }
520 if (_for_dump) {
521 _gen_preload_code = _use_meta_ptrs && (ClassInitBarrierMode > 0);
522
523 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
524 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
525 // Entries allocated at the end of buffer in reverse (as on stack).
526 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
527 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
528 }
529 _table = new AOTCodeAddressTable();
530 }
531
532 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
533 // This could be concurent execution
534 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
535 _cache->invalidate_entry(entry);
536 }
537 }
538
539 bool AOTCodeCache::is_loaded(AOTCodeEntry* entry) {
540 if (is_on() && _cache->cache_buffer() != nullptr) {
541 return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
542 }
543 return false;
544 }
545
546 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
547 switch (comp_level) {
548 case CompLevel_simple:
549 if ((DisableCachedCode & (1 << 0)) != 0) {
550 return nullptr;
551 }
552 break;
553 case CompLevel_limited_profile:
554 if ((DisableCachedCode & (1 << 1)) != 0) {
555 return nullptr;
556 }
557 break;
558 case CompLevel_full_optimization:
559 if ((DisableCachedCode & (1 << 2)) != 0) {
560 return nullptr;
561 }
562 break;
563
564 default: return nullptr; // Level 1, 2, and 4 only
565 }
566 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
567 if (is_on() && _cache->cache_buffer() != nullptr) {
568 MethodData* md = method->method_data();
569 uint decomp = (md == nullptr) ? 0 : md->decompile_count();
570
571 ResourceMark rm;
572 const char* target_name = method->name_and_sig_as_C_string();
573 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
574 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Code, hash, comp_level, decomp);
575 if (entry == nullptr) {
576 log_info(aot, codecache, nmethod)("Missing entry for '%s' (comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, decomp, hash);
577 #ifdef ASSERT
578 } else {
579 uint name_offset = entry->offset() + entry->name_offset();
580 uint name_size = entry->name_size(); // Includes '/0'
581 const char* name = _cache->cache_buffer() + name_offset;
582 if (strncmp(target_name, name, name_size) != 0) {
583 assert(false, "AOTCodeCache: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
584 }
585 #endif
586 }
587
588 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
589 if (directives->IgnorePrecompiledOption) {
590 LogStreamHandle(Info, aot, codecache, compilation) log;
591 if (log.is_enabled()) {
592 log.print("Ignore cached code entry on level %d for ", comp_level);
593 method->print_value_on(&log);
594 }
595 return nullptr;
596 }
597
598 return entry;
599 }
600 return nullptr;
601 }
602
603 void AOTCodeCache::init_extrs_table() {
604 AOTCodeAddressTable* table = addr_table();
605 if (table != nullptr) {
606 table->init_extrs();
607 }
608 }
609 void AOTCodeCache::init_early_stubs_table() {
610 AOTCodeAddressTable* table = addr_table();
611 if (table != nullptr) {
612 table->init_early_stubs();
613 }
614 }
615
616 void AOTCodeCache::init_shared_blobs_table() {
617 AOTCodeAddressTable* table = addr_table();
618 if (table != nullptr) {
619 table->init_shared_blobs();
620 }
621 }
622
623 void AOTCodeCache::init_stubs_table() {
624 AOTCodeAddressTable* table = addr_table();
625 if (table != nullptr) {
626 table->init_stubs();
627 }
628 }
629
630 void AOTCodeCache::init_opto_table() {
631 AOTCodeAddressTable* table = addr_table();
632 if (table != nullptr) {
633 table->init_opto();
634 }
635 }
636
637 void AOTCodeCache::init_c1_table() {
638 AOTCodeAddressTable* table = addr_table();
639 if (table != nullptr) {
640 table->init_c1();
641 }
642 }
643
644 AOTCodeCache::~AOTCodeCache() {
645 if (_closing) {
646 return; // Already closed
647 }
648 // Stop any further access to cache.
649 // Checked on entry to load_nmethod() and store_nmethod().
650 _closing = true;
651 if (_for_use) {
652 // Wait for all load_nmethod() finish.
653 wait_for_no_nmethod_readers();
654 }
655 // Prevent writing code into cache while we are closing it.
656 // This lock held by ciEnv::register_method() which calls store_nmethod().
657 MutexLocker ml(Compile_lock);
658 if (for_dump()) { // Finalize cache
659 finish_write();
660 }
661 _load_buffer = nullptr;
662 if (_C_store_buffer != nullptr) {
663 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
664 _C_store_buffer = nullptr;
665 _store_buffer = nullptr;
666 }
667 if (_table != nullptr) {
668 delete _table;
669 _table = nullptr;
670 }
671 }
672
673 void AOTCodeCache::Config::record(bool use_meta_ptrs) {
674 _flags = 0;
675 if (use_meta_ptrs) {
676 _flags |= metadataPointers;
677 }
678 #ifdef ASSERT
679 _flags |= debugVM;
680 #endif
681 if (UseCompressedOops) {
682 _flags |= compressedOops;
683 }
684 if (UseCompressedClassPointers) {
685 _flags |= compressedClassPointers;
686 }
687 if (UseTLAB) {
688 _flags |= useTLAB;
689 }
690 if (JavaAssertions::systemClassDefault()) {
691 _flags |= systemClassAssertions;
692 }
693 if (JavaAssertions::userClassDefault()) {
694 _flags |= userClassAssertions;
695 }
696 if (EnableContended) {
697 _flags |= enableContendedPadding;
765 }
766 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
767 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
768 return false;
769 }
770 return true;
771 }
772
773 bool AOTCodeCache::Header::verify_config(uint load_size) const {
774 if (_version != AOT_CODE_VERSION) {
775 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
776 return false;
777 }
778 if (load_size < _cache_size) {
779 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
780 return false;
781 }
782 return true;
783 }
784
785 volatile int AOTCodeCache::_nmethod_readers = 0;
786
787 AOTCodeCache* AOTCodeCache::open_for_use() {
788 if (AOTCodeCache::is_on_for_use()) {
789 return AOTCodeCache::cache();
790 }
791 return nullptr;
792 }
793
794 AOTCodeCache* AOTCodeCache::open_for_dump() {
795 if (AOTCodeCache::is_on_for_dump()) {
796 AOTCodeCache* cache = AOTCodeCache::cache();
797 cache->clear_lookup_failed(); // Reset bit
798 return cache;
799 }
800 return nullptr;
801 }
802
803 bool AOTCodeCache::is_address_in_aot_cache(address p) {
804 AOTCodeCache* cache = open_for_use();
805 if (cache == nullptr) {
806 return false;
807 }
808 if ((p >= (address)cache->cache_buffer()) &&
809 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
810 return true;
811 }
812 return false;
813 }
814
815 static void copy_bytes(const char* from, address to, uint size) {
816 assert(size > 0, "sanity");
817 bool by_words = true;
818 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
819 // Use wordwise copies if possible:
820 Copy::disjoint_words((HeapWord*)from,
821 (HeapWord*)to,
822 ((size_t)size + HeapWordSize-1) / HeapWordSize);
823 } else {
824 by_words = false;
825 Copy::conjoint_jbytes(from, to, (size_t)size);
826 }
827 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
828 }
829
830 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
831 _cache = cache;
832 _entry = entry;
833 _load_buffer = cache->cache_buffer();
834 _read_position = 0;
835 if (task != nullptr) {
836 _compile_id = task->compile_id();
837 _comp_level = task->comp_level();
838 _preload = task->preload();
839 } else {
840 _compile_id = 0;
841 _comp_level = 0;
842 _preload = false;
843 }
844 _lookup_failed = false;
845 }
846
847 void AOTCodeReader::set_read_position(uint pos) {
848 if (pos == _read_position) {
849 return;
850 }
851 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
852 _read_position = pos;
853 }
854
855 bool AOTCodeCache::set_write_position(uint pos) {
856 if (pos == _write_position) {
857 return true;
858 }
859 if (_store_size < _write_position) {
860 _store_size = _write_position; // Adjust during write
861 }
862 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
863 _write_position = pos;
869 bool AOTCodeCache::align_write() {
870 // We are not executing code from cache - we copy it by bytes first.
871 // No need for big alignment (or at all).
872 uint padding = DATA_ALIGNMENT - (_write_position & (DATA_ALIGNMENT - 1));
873 if (padding == DATA_ALIGNMENT) {
874 return true;
875 }
876 uint n = write_bytes((const void*)&align_buffer, padding);
877 if (n != padding) {
878 return false;
879 }
880 log_trace(aot, codecache)("Adjust write alignment in AOT Code Cache");
881 return true;
882 }
883
884 // Check to see if AOT code cache has required space to store "nbytes" of data
885 address AOTCodeCache::reserve_bytes(uint nbytes) {
886 assert(for_dump(), "Code Cache file is not created");
887 uint new_position = _write_position + nbytes;
888 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
889 log_warning(aot, codecache)("Failed to ensure %d bytes at offset %d in AOT Code Cache. Increase AOTCodeMaxSize.",
890 nbytes, _write_position);
891 set_failed();
892 report_store_failure();
893 return nullptr;
894 }
895 address buffer = (address)(_store_buffer + _write_position);
896 log_trace(aot, codecache)("Reserved %d bytes at offset %d in AOT Code Cache", nbytes, _write_position);
897 _write_position += nbytes;
898 if (_store_size < _write_position) {
899 _store_size = _write_position;
900 }
901 return buffer;
902 }
903
904 uint AOTCodeCache::write_bytes(const void* buffer, uint nbytes) {
905 assert(for_dump(), "Code Cache file is not created");
906 if (nbytes == 0) {
907 return 0;
908 }
909 uint new_position = _write_position + nbytes;
910 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
911 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
912 nbytes, _write_position);
913 set_failed();
914 report_store_failure();
915 return 0;
916 }
917 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
918 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
919 _write_position += nbytes;
920 if (_store_size < _write_position) {
921 _store_size = _write_position;
922 }
923 return nbytes;
924 }
925
926 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
927 return (void*)(cache->add_entry());
928 }
929
930 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, uint decomp, AOTCodeEntry* entry) {
931 if (entry->kind() == kind) {
932 assert(entry->id() == id, "sanity");
933 if (kind != AOTCodeEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
934 (entry->comp_level() == comp_level) &&
935 (entry->ignore_decompile() || entry->decompile() == decomp))) {
936 return true; // Found
937 }
938 }
939 return false;
940 }
941
942 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, uint decomp) {
943 assert(_for_use, "sanity");
944 uint count = _load_header->entries_count();
945 if (_load_entries == nullptr) {
946 // Read it
947 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
948 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
949 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
950 }
951 // Binary search
952 int l = 0;
953 int h = count - 1;
954 while (l <= h) {
955 int mid = (l + h) >> 1;
956 int ix = mid * 2;
957 uint is = _search_entries[ix];
958 if (is == id) {
959 int index = _search_entries[ix + 1];
960 AOTCodeEntry* entry = &(_load_entries[index]);
961 if (check_entry(kind, id, comp_level, decomp, entry)) {
962 return entry; // Found
963 }
964 // Leaner search around (could be the same nmethod with different decompile count)
965 for (int i = mid - 1; i >= l; i--) { // search back
966 ix = i * 2;
967 is = _search_entries[ix];
968 if (is != id) {
969 break;
970 }
971 index = _search_entries[ix + 1];
972 AOTCodeEntry* entry = &(_load_entries[index]);
973 if (check_entry(kind, id, comp_level, decomp, entry)) {
974 return entry; // Found
975 }
976 }
977 for (int i = mid + 1; i <= h; i++) { // search forward
978 ix = i * 2;
979 is = _search_entries[ix];
980 if (is != id) {
981 break;
982 }
983 index = _search_entries[ix + 1];
984 AOTCodeEntry* entry = &(_load_entries[index]);
985 if (check_entry(kind, id, comp_level, decomp, entry)) {
986 return entry; // Found
987 }
988 }
989 break; // Not found match (different decompile count or not_entrant state).
990 } else if (is < id) {
991 l = mid + 1;
992 } else {
993 h = mid - 1;
994 }
995 }
996 return nullptr;
997 }
998
999 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1000 assert(entry!= nullptr, "all entries should be read already");
1001 if (entry->not_entrant()) {
1002 return; // Someone invalidated it already
1003 }
1004 #ifdef ASSERT
1005 bool found = false;
1006 if (_for_use) {
1007 uint count = _load_header->entries_count();
1008 uint i = 0;
1009 for(; i < count; i++) {
1010 if (entry == &(_load_entries[i])) {
1011 break;
1012 }
1013 }
1014 found = (i < count);
1015 }
1016 if (!found && _for_dump) {
1017 uint count = _store_entries_cnt;
1018 uint i = 0;
1019 for(; i < count; i++) {
1020 if (entry == &(_store_entries[i])) {
1021 break;
1022 }
1023 }
1024 found = (i < count);
1025 }
1026 assert(found, "entry should exist");
1027 #endif
1028 entry->set_not_entrant();
1029 {
1030 uint name_offset = entry->offset() + entry->name_offset();
1031 const char* name;
1032 if (AOTCodeCache::is_loaded(entry)) {
1033 name = _load_buffer + name_offset;
1034 } else {
1035 name = _store_buffer + name_offset;
1036 }
1037 uint level = entry->comp_level();
1038 uint comp_id = entry->comp_id();
1039 uint decomp = entry->decompile();
1040 bool clinit_brs = entry->has_clinit_barriers();
1041 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s)",
1042 name, comp_id, level, decomp, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1043 }
1044 if (entry->next() != nullptr) {
1045 entry = entry->next();
1046 assert(entry->has_clinit_barriers(), "expecting only such entries here");
1047 invalidate_entry(entry);
1048 }
1049 }
1050
1051 void AOTCodeEntry::update_method_for_writing() {
1052 if (_method != nullptr) {
1053 _method = AOTCacheAccess::method_in_aot_code(_method);
1054 }
1055 }
1056
1057 static int uint_cmp(const void *i, const void *j) {
1058 uint a = *(uint *)i;
1059 uint b = *(uint *)j;
1060 return a > b ? 1 : a < b ? -1 : 0;
1061 }
1062
1063 bool AOTCodeCache::finish_write() {
1064 if (!align_write()) {
1065 return false;
1066 }
1067 uint strings_offset = _write_position;
1068 int strings_count = store_strings();
1069 if (strings_count < 0) {
1070 return false;
1071 }
1072 if (!align_write()) {
1073 return false;
1074 }
1075 uint strings_size = _write_position - strings_offset;
1076
1077 uint entries_count = 0; // Number of entrant (useful) code entries
1078 uint entries_offset = _write_position;
1079
1080 uint store_count = _store_entries_cnt;
1081 if (store_count > 0) {
1082 _aot_code_directory = CachedCodeDirectory::create();
1083 assert(_aot_code_directory != nullptr, "Sanity check");
1084
1085 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1086 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1087 uint code_count = store_count + load_count;
1088 uint search_count = code_count * 2;
1089 uint search_size = search_count * sizeof(uint);
1090 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1091 uint preload_entries_cnt = 0;
1092 uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1093 uint preload_entries_size = code_count * sizeof(uint);
1094 // _write_position should include code and strings
1095 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1096 uint total_size = _write_position + _load_size + header_size +
1097 code_alignment + search_size + preload_entries_size + entries_size;
1098 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1099
1100
1101 // Create ordered search table for entries [id, index];
1102 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1103 // Allocate in AOT Cache buffer
1104 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1105 char* start = align_up(buffer, DATA_ALIGNMENT);
1106 char* current = start + header_size; // Skip header
1107
1108 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1109 uint adapters_count = 0;
1110 uint blobs_count = 0;
1111 uint stubs_count = 0;
1112 uint nmethods_count = 0;
1113 uint max_size = 0;
1114 // Add old entries first
1115 if (_for_use && (_load_header != nullptr)) {
1116 for(uint i = 0; i < load_count; i++) {
1117 if (_load_entries[i].load_fail()) {
1118 continue;
1119 }
1120 if (_load_entries[i].not_entrant()) {
1121 log_info(aot, codecache, exit)("Not entrant load entry id: %d, decomp: %d, hash: " UINT32_FORMAT_X_0, i, _load_entries[i].decompile(), _load_entries[i].id());
1122 if (_load_entries[i].for_preload()) {
1123 // Skip not entrant preload code:
1124 // we can't pre-load code which may have failing dependencies.
1125 continue;
1126 }
1127 _load_entries[i].set_entrant(); // Reset
1128 } else if (_load_entries[i].for_preload() && _load_entries[i].method() != nullptr) {
1129 // record entrant first version code for pre-loading
1130 preload_entries[preload_entries_cnt++] = entries_count;
1131 }
1132 {
1133 uint size = align_up(_load_entries[i].size(), DATA_ALIGNMENT);
1134 if (size > max_size) {
1135 max_size = size;
1136 }
1137 copy_bytes((_load_buffer + _load_entries[i].offset()), (address)current, size);
1138 _load_entries[i].set_offset(current - start); // New offset
1139 current += size;
1140 uint n = write_bytes(&(_load_entries[i]), sizeof(AOTCodeEntry));
1141 if (n != sizeof(AOTCodeEntry)) {
1142 FREE_C_HEAP_ARRAY(char, buffer);
1143 FREE_C_HEAP_ARRAY(uint, search);
1144 return false;
1145 }
1146 search[entries_count*2 + 0] = _load_entries[i].id();
1147 search[entries_count*2 + 1] = entries_count;
1148 entries_count++;
1149 AOTCodeEntry::Kind kind = _load_entries[i].kind();
1150 if (kind == AOTCodeEntry::Adapter) {
1151 adapters_count++;
1152 } else if (kind == AOTCodeEntry::Blob) {
1153 blobs_count++;
1154 } else if (kind == AOTCodeEntry::Stub) {
1155 stubs_count++;
1156 } else {
1157 assert(kind == AOTCodeEntry::Code, "sanity");
1158 nmethods_count++;
1159 }
1160 }
1161 }
1162 }
1163 // AOTCodeEntry entries were allocated in reverse in store buffer.
1164 // Process them in reverse order to cache first code first.
1165 for (int i = store_count - 1; i >= 0; i--) {
1166 if (entries_address[i].load_fail()) {
1167 continue;
1168 }
1169 if (entries_address[i].not_entrant()) {
1170 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, decomp: %d, hash: " UINT32_FORMAT_X_0 "%s", entries_address[i].comp_id(), entries_address[i].comp_level(), entries_address[i].decompile(), entries_address[i].id(), (entries_address[i].has_clinit_barriers() ? ", has clinit barriers" : ""));
1171 if (entries_address[i].for_preload()) {
1172 // Skip not entrant preload code:
1173 // we can't pre-load code which may have failing dependencies.
1174 continue;
1175 }
1176 entries_address[i].set_entrant(); // Reset
1177 } else if (entries_address[i].for_preload() && entries_address[i].method() != nullptr) {
1178 // record entrant first version code for pre-loading
1179 preload_entries[preload_entries_cnt++] = entries_count;
1180 }
1181 {
1182 entries_address[i].set_next(nullptr); // clear pointers before storing data
1183 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
1184 if (size > max_size) {
1185 max_size = size;
1186 }
1187 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
1188 entries_address[i].set_offset(current - start); // New offset
1189 entries_address[i].update_method_for_writing();
1190 current += size;
1191 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
1192 if (n != sizeof(AOTCodeEntry)) {
1193 FREE_C_HEAP_ARRAY(char, buffer);
1194 FREE_C_HEAP_ARRAY(uint, search);
1195 return false;
1196 }
1197 search[entries_count*2 + 0] = entries_address[i].id();
1198 search[entries_count*2 + 1] = entries_count;
1199 entries_count++;
1200 AOTCodeEntry::Kind kind = entries_address[i].kind();
1201 if (kind == AOTCodeEntry::Adapter) {
1202 adapters_count++;
1203 } else if (kind == AOTCodeEntry::Blob) {
1204 blobs_count++;
1205 } else if (kind == AOTCodeEntry::Stub) {
1206 stubs_count++;
1207 } else {
1208 assert(kind == AOTCodeEntry::Code, "sanity");
1209 nmethods_count++;
1210 }
1211 }
1212 }
1213
1214 if (entries_count == 0) {
1215 log_info(aot, codecache, exit)("No entires written to AOT Code Cache");
1216 FREE_C_HEAP_ARRAY(char, buffer);
1217 FREE_C_HEAP_ARRAY(uint, search);
1218 return true; // Nothing to write
1219 }
1220 assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1221 // Write strings
1222 if (strings_count > 0) {
1223 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1224 strings_offset = (current - start); // New offset
1225 current += strings_size;
1226 }
1227 uint preload_entries_offset = (current - start);
1228 preload_entries_size = preload_entries_cnt * sizeof(uint);
1229 if (preload_entries_size > 0) {
1230 copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1231 current += preload_entries_size;
1232 log_info(aot, codecache, exit)("Wrote %d preload entries to AOT Code Cache", preload_entries_cnt);
1233 }
1234 if (preload_entries != nullptr) {
1235 FREE_C_HEAP_ARRAY(uint, preload_entries);
1236 }
1237
1238 uint new_entries_offset = (current - start); // New offset
1239 // Sort and store search table
1240 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1241 search_size = 2 * entries_count * sizeof(uint);
1242 copy_bytes((const char*)search, (address)current, search_size);
1243 FREE_C_HEAP_ARRAY(uint, search);
1244 current += search_size;
1245
1246 // Write entries
1247 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1248 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1249 current += entries_size;
1250
1251 log_stats_on_exit();
1252
1253 uint size = (current - start);
1254 assert(size <= total_size, "%d > %d", size , total_size);
1255 assert(nmethods_count == (entries_count - (stubs_count + blobs_count + adapters_count)), "sanity");
1256 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
1257 log_debug(aot, codecache, exit)(" Blobs: total=%u", blobs_count);
1258 log_debug(aot, codecache, exit)(" Stubs: total=%u", stubs_count);
1259 log_debug(aot, codecache, exit)(" nmethods: total=%u", nmethods_count);
1260 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1261
1262 // Finalize header
1263 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1264 header->init(size,
1265 (uint)strings_count, strings_offset,
1266 entries_count, new_entries_offset,
1267 preload_entries_cnt, preload_entries_offset,
1268 adapters_count, blobs_count, stubs_count,
1269 _use_meta_ptrs);
1270 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1271
1272 _aot_code_directory->set_aot_code_data(size, start);
1273 }
1274 return true;
1275 }
1276
1277 //------------------Store/Load AOT code ----------------------
1278
1279 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1280 AOTCodeCache* cache = open_for_dump();
1281 if (cache == nullptr) {
1282 return false;
1283 }
1284 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1285
1286 if ((entry_kind == AOTCodeEntry::Adapter) && !is_dumping_adapter()) {
1287 return false;
1288 }
1289 log_debug(aot, codecache, stubs)("Writing blob '%s' to AOT Code Cache", name);
1290
1291 #ifdef ASSERT
1292 LogStreamHandle(Trace, aot, codecache, stubs) log;
1293 if (log.is_enabled()) {
1294 FlagSetting fs(PrintRelocations, true);
1295 blob.print_on(&log);
1296 }
1297 #endif
1298 // we need to take a lock to prevent race between compiler threads generating AOT code
1299 // and the main thread generating adapter
1300 MutexLocker ml(Compile_lock);
1301 if (!cache->align_write()) {
1302 return false;
1303 }
1304 uint entry_position = cache->_write_position;
1305
1306 // Write name
1350 n = cache->write_bytes(&off, sizeof(uint32_t));
1351 if (n != sizeof(uint32_t)) {
1352 return false;
1353 }
1354 }
1355 uint entry_size = cache->_write_position - entry_position;
1356 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, id,
1357 entry_position, entry_size, name_offset, name_size,
1358 blob_offset, has_oop_maps, blob.content_begin());
1359 log_debug(aot, codecache, stubs)("Wrote code blob '%s(id=%d)' to AOT Code Cache", name, id);
1360 return true;
1361 }
1362
1363 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1364 AOTCodeCache* cache = open_for_use();
1365 if (cache == nullptr) {
1366 return nullptr;
1367 }
1368 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1369
1370 if ((entry_kind == AOTCodeEntry::Adapter) && !is_using_adapter()) {
1371 return nullptr;
1372 }
1373 log_debug(aot, codecache, stubs)("Reading blob '%s' from AOT Code Cache", name);
1374
1375 AOTCodeEntry* entry = cache->find_entry(entry_kind, id);
1376 if (entry == nullptr) {
1377 return nullptr;
1378 }
1379 AOTCodeReader reader(cache, entry, nullptr);
1380 return reader.compile_code_blob(name, entry_offset_count, entry_offsets);
1381 }
1382
1383 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
1384 uint entry_position = _entry->offset();
1385
1386 // Read name
1387 uint name_offset = entry_position + _entry->name_offset();
1388 uint name_size = _entry->name_size(); // Includes '/0'
1389 const char* stored_name = addr(name_offset);
1390
1391 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1392 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1393 stored_name, name);
1394 ((AOTCodeCache*)_cache)->set_failed();
1395 report_load_failure();
1396 return nullptr;
1397 return nullptr;
1398 }
1399
1400 // Read archived code blob
1401 uint offset = entry_position + _entry->blob_offset();
1402 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1403 offset += archived_blob->size();
1404
1405 address reloc_data = (address)addr(offset);
1406 offset += archived_blob->relocation_size();
1407 set_read_position(offset);
1408
1409 ImmutableOopMapSet* oop_maps = nullptr;
1410 if (_entry->has_oop_maps()) {
1411 oop_maps = read_oop_map_set();
1412 }
1413
1414 CodeBlob* code_blob = CodeBlob::create(archived_blob, stored_name, reloc_data, oop_maps);
1415 if (code_blob == nullptr) { // no space left in CodeCache
1416 return nullptr;
1417 }
1427 for (int i = 0; i < stored_count; i++) {
1428 uint32_t off = *(uint32_t*)addr(offset);
1429 offset += sizeof(uint32_t);
1430 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
1431 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
1432 stored_name, entry_name, _entry->id(), off);
1433 entry_offsets[i] = off;
1434 }
1435
1436 log_debug(aot, codecache, stubs)("Read blob '%s' from AOT Code Cache", name);
1437 #ifdef ASSERT
1438 LogStreamHandle(Trace, aot, codecache, stubs) log;
1439 if (log.is_enabled()) {
1440 FlagSetting fs(PrintRelocations, true);
1441 code_blob->print_on(&log);
1442 }
1443 #endif
1444 return code_blob;
1445 }
1446
1447 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1448 if (!is_dumping_stub()) {
1449 return false;
1450 }
1451 AOTCodeCache* cache = open_for_dump();
1452 if (cache == nullptr) {
1453 return false;
1454 }
1455 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1456 if (!cache->align_write()) {
1457 return false;
1458 }
1459 #ifdef ASSERT
1460 CodeSection* cs = cgen->assembler()->code_section();
1461 if (cs->has_locs()) {
1462 uint reloc_count = cs->locs_count();
1463 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1464 // Collect additional data
1465 RelocIterator iter(cs);
1466 while (iter.next()) {
1467 switch (iter.type()) {
1468 case relocInfo::none:
1469 break;
1470 default: {
1471 iter.print_current_on(tty);
1472 fatal("stub's relocation %d unimplemented", (int)iter.type());
1473 break;
1474 }
1475 }
1476 }
1477 }
1478 #endif
1479 uint entry_position = cache->_write_position;
1480
1481 // Write code
1482 uint code_offset = 0;
1483 uint code_size = cgen->assembler()->pc() - start;
1484 uint n = cache->write_bytes(start, code_size);
1485 if (n != code_size) {
1486 return false;
1487 }
1488 // Write name
1489 uint name_offset = cache->_write_position - entry_position;
1490 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1491 n = cache->write_bytes(name, name_size);
1492 if (n != name_size) {
1493 return false;
1494 }
1495 uint entry_size = cache->_write_position - entry_position;
1496 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1497 code_offset, code_size, 0, 0,
1498 AOTCodeEntry::Stub, (uint32_t)id);
1499 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1500 return true;
1501 }
1502
1503 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1504 if (!is_using_stub()) {
1505 return false;
1506 }
1507 assert(start == cgen->assembler()->pc(), "wrong buffer");
1508 AOTCodeCache* cache = open_for_use();
1509 if (cache == nullptr) {
1510 return false;
1511 }
1512 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1513 if (entry == nullptr) {
1514 return false;
1515 }
1516 uint entry_position = entry->offset();
1517 // Read name
1518 uint name_offset = entry->name_offset() + entry_position;
1519 uint name_size = entry->name_size(); // Includes '/0'
1520 const char* saved_name = cache->addr(name_offset);
1521 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1522 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1523 cache->set_failed();
1524 report_load_failure();
1525 return false;
1526 }
1527 log_info(aot, codecache,stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1528 // Read code
1529 uint code_offset = entry->code_offset() + entry_position;
1530 uint code_size = entry->code_size();
1531 copy_bytes(cache->addr(code_offset), start, code_size);
1532 cgen->assembler()->code_section()->set_end(start + code_size);
1533 log_info(aot, codecache,stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1534 return true;
1535 }
1536
1537 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1538 if (!is_dumping_code()) {
1539 return nullptr;
1540 }
1541 if (!CDSConfig::is_dumping_aot_code()) {
1542 return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
1543 }
1544 AOTCodeCache* cache = open_for_dump();
1545 if (cache == nullptr) {
1546 return nullptr; // Cache file is closed
1547 }
1548 if (nm->is_osr_method()) {
1549 return nullptr; // No OSR
1550 }
1551 if (!compiler->is_c1() && !compiler->is_c2()) {
1552 // Only c1 and c2 compilers
1553 return nullptr;
1554 }
1555 int comp_level = nm->comp_level();
1556 if (comp_level == CompLevel_full_profile) {
1557 // Do not cache C1 compiles with full profile i.e. tier3
1558 return nullptr;
1559 }
1560 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1561
1562 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1563 AOTCodeEntry* entry = nullptr;
1564 entry = cache->write_nmethod(nm, for_preload);
1565 if (entry == nullptr) {
1566 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1567 }
1568 return entry;
1569 }
1570
1571 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1572 assert(!nm->has_clinit_barriers() || _gen_preload_code, "sanity");
1573 uint comp_id = nm->compile_id();
1574 uint comp_level = nm->comp_level();
1575 Method* method = nm->method();
1576 bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)method);
1577 InstanceKlass* holder = method->method_holder();
1578 bool klass_in_cds = holder->is_shared() && !holder->is_shared_unregistered_class();
1579 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1580 if (!builtin_loader) {
1581 ResourceMark rm;
1582 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1583 return nullptr;
1584 }
1585 if (for_preload && !(method_in_cds && klass_in_cds)) {
1586 ResourceMark rm;
1587 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
1588 return nullptr;
1589 }
1590 assert(!for_preload || method_in_cds, "sanity");
1591 _for_preload = for_preload;
1592 _has_clinit_barriers = nm->has_clinit_barriers();
1593
1594 if (!align_write()) {
1595 return nullptr;
1596 }
1597
1598 uint entry_position = _write_position;
1599
1600 uint decomp = (method->method_data() == nullptr) ? 0 : method->method_data()->decompile_count();
1601
1602 // Is this one-step workflow assembly phase?
1603 // In this phase compilation is done based on saved profiling data
1604 // without application run. Ignore decompilation counters in such case.
1605 // Also ignore it for C1 code because it is decompiled unconditionally
1606 // when C2 generated code is published.
1607 bool ignore_decompile = (comp_level == CompLevel_limited_profile) ||
1608 CDSConfig::is_dumping_final_static_archive();
1609
1610 // Write name
1611 uint name_offset = 0;
1612 uint name_size = 0;
1613 uint hash = 0;
1614 uint n;
1615 {
1616 ResourceMark rm;
1617 const char* name = method->name_and_sig_as_C_string();
1618 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, decomp: %d%s%s) to AOT Code Cache",
1619 comp_id, (int)comp_level, name, comp_level, decomp,
1620 (ignore_decompile ? ", ignore_decomp" : ""),
1621 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1622
1623 LogStreamHandle(Info, aot, codecache, loader) log;
1624 if (log.is_enabled()) {
1625 oop loader = holder->class_loader();
1626 oop domain = holder->protection_domain();
1627 log.print("Holder: ");
1628 holder->print_value_on(&log);
1629 log.print(" loader: ");
1630 if (loader == nullptr) {
1631 log.print("nullptr");
1632 } else {
1633 loader->print_value_on(&log);
1634 }
1635 log.print(" domain: ");
1636 if (domain == nullptr) {
1637 log.print("nullptr");
1638 } else {
1639 domain->print_value_on(&log);
1640 }
1641 log.cr();
1642 }
1643 name_offset = _write_position - entry_position;
1644 name_size = (uint)strlen(name) + 1; // Includes '/0'
1645 n = write_bytes(name, name_size);
1646 if (n != name_size) {
1647 return nullptr;
1648 }
1649 hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
1650 }
1651 uint archived_nm_offset = _write_position - entry_position;
1652 nmethod* archived_nm = (nmethod*)reserve_bytes(nm->size());
1653 if (archived_nm == nullptr) {
1654 return nullptr;
1655 }
1656 nm->copy_to((address)archived_nm);
1657
1658 archived_nm->prepare_for_archiving();
1659
1660 #ifndef PRODUCT
1661 // Write asm remarks
1662 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1663 if (count_ptr == nullptr) {
1664 return nullptr;
1665 }
1666 uint count = 0;
1667 bool result = nm->asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1668 log_info(aot, codecache, nmethod)("asm remark offset=%d, str=%s", offset, str);
1669 n = write_bytes(&offset, sizeof(uint));
1670 if (n != sizeof(uint)) {
1671 return false;
1672 }
1673 n = write_bytes(str, (uint)strlen(str) + 1);
1674 if (n != strlen(str) + 1) {
1675 return false;
1676 }
1677 count += 1;
1678 return true;
1679 });
1680 if (!result) {
1681 return nullptr;
1682 }
1683 *count_ptr = count;
1684
1685 // Write dbg strings
1686 count_ptr = (uint *)reserve_bytes(sizeof(uint));
1687 if (count_ptr == nullptr) {
1688 return nullptr;
1689 }
1690 count = 0;
1691 result = nm->dbg_strings().iterate([&] (const char* str) -> bool {
1692 log_info(aot, codecache, nmethod)("dbg string[" INTPTR_FORMAT "]=%s", p2i(str), str);
1693 n = write_bytes(str, (uint)strlen(str) + 1);
1694 if (n != strlen(str) + 1) {
1695 return false;
1696 }
1697 count += 1;
1698 return true;
1699 });
1700 if (!result) {
1701 return nullptr;
1702 }
1703 *count_ptr = count;
1704 #endif /* PRODUCT */
1705
1706 uint reloc_data_size = nm->relocation_size();
1707 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1708 if (n != reloc_data_size) {
1709 return nullptr;
1710 }
1711
1712 // Write oops and metadata present in the nmethod's data region
1713 if (!write_oops(nm)) {
1714 if (lookup_failed() && !failed()) {
1715 // Skip this method and reposition file
1716 set_write_position(entry_position);
1717 }
1718 return nullptr;
1719 }
1720 if (!write_metadata(nm)) {
1721 if (lookup_failed() && !failed()) {
1722 // Skip this method and reposition file
1723 set_write_position(entry_position);
1724 }
1725 return nullptr;
1726 }
1727
1728 if (!write_oop_map_set(*nm)) {
1729 return nullptr;
1730 }
1731
1732 uint immutable_data_size = nm->immutable_data_size();
1733 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1734 if (n != immutable_data_size) {
1735 return nullptr;
1736 }
1737
1738 JavaThread* thread = JavaThread::current();
1739 HandleMark hm(thread);
1740 GrowableArray<Handle> oop_list;
1741 GrowableArray<Metadata*> metadata_list;
1742
1743 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1744 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1745 if (lookup_failed() && !failed()) {
1746 // Skip this method and reposition file
1747 set_write_position(entry_position);
1748 }
1749 return nullptr;
1750 }
1751
1752 if (!write_nmethod_loadtime_relocations(thread, nm, oop_list, metadata_list)) {
1753 return nullptr;
1754 }
1755
1756 uint entry_size = _write_position - entry_position;
1757 AOTCodeEntry* entry = new (this) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1758 archived_nm_offset, 0, 0, 0,
1759 AOTCodeEntry::Code, hash, nm->content_begin(), comp_level, comp_id, decomp,
1760 nm->has_clinit_barriers(), for_preload, ignore_decompile);
1761 if (method_in_cds) {
1762 entry->set_method(method);
1763 }
1764 #ifdef ASSERT
1765 if (nm->has_clinit_barriers() || for_preload) {
1766 assert(for_preload, "sanity");
1767 assert(entry->method() != nullptr, "sanity");
1768 }
1769 #endif
1770 {
1771 ResourceMark rm;
1772 const char* name = nm->method()->name_and_sig_as_C_string();
1773 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1774 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1775 }
1776 if (VerifyCachedCode) {
1777 return nullptr;
1778 }
1779 return entry;
1780 }
1781
1782 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1783 if (!is_using_code()) {
1784 return false;
1785 }
1786 AOTCodeCache* cache = open_for_use();
1787 if (cache == nullptr) {
1788 return false;
1789 }
1790 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1791 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1792 CompileTask* task = env->task();
1793 task->mark_aot_load_start(os::elapsed_counter());
1794 AOTCodeEntry* entry = task->aot_code_entry();
1795 bool preload = task->preload();
1796 assert(entry != nullptr, "sanity");
1797 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1798 uint decomp = (target->method_data() == nullptr) ? 0 : target->method_data()->decompile_count();
1799 VM_ENTRY_MARK;
1800 ResourceMark rm;
1801 methodHandle method(THREAD, target->get_Method());
1802 const char* target_name = method->name_and_sig_as_C_string();
1803 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
1804 bool clinit_brs = entry->has_clinit_barriers();
1805 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (decomp: %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1806 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1807 target_name, decomp, hash, (clinit_brs ? ", has clinit barriers" : ""),
1808 (entry->ignore_decompile() ? ", ignore_decomp" : ""));
1809 }
1810 ReadingMark rdmk;
1811 if (rdmk.failed()) {
1812 // Cache is closed, cannot touch anything.
1813 return false;
1814 }
1815
1816 AOTCodeReader reader(cache, entry, task);
1817 bool success = reader.compile_nmethod(env, target, compiler);
1818 if (success) {
1819 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1820 } else {
1821 entry->set_load_fail();
1822 }
1823 task->mark_aot_load_finish(os::elapsed_counter());
1824 return success;
1825 }
1826
1827 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1828 CompileTask* task = env->task();
1829 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1830 nmethod* nm = nullptr;
1831
1832 uint entry_position = aot_code_entry->offset();
1833 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1834 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1835 set_read_position(archived_nm_offset + archived_nm->size());
1836
1837 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1838 env->set_oop_recorder(oop_recorder);
1839
1840 uint offset;
1841
1842 #ifndef PRODUCT
1843 // Read asm remarks
1844 offset = read_position();
1845 uint count = *(uint *)addr(offset);
1846 offset += sizeof(uint);
1847 AsmRemarks asm_remarks;
1848 for (uint i = 0; i < count; i++) {
1849 uint remark_offset = *(uint *)addr(offset);
1850 offset += sizeof(uint);
1851 const char* remark = (const char*)addr(offset);
1852 offset += (uint)strlen(remark)+1;
1853 asm_remarks.insert(remark_offset, remark);
1854 }
1855 set_read_position(offset);
1856
1857 // Read dbg strings
1858 count = *(uint *)addr(offset);
1859 offset += sizeof(uint);
1860 DbgStrings dbg_strings;
1861 for (uint i = 0; i < count; i++) {
1862 const char* str = (const char*)addr(offset);
1863 offset += (uint)strlen(str)+1;
1864 dbg_strings.insert(str);
1865 }
1866 set_read_position(offset);
1867 #endif /* PRODUCT */
1868
1869 offset = read_position();
1870 address reloc_data = (address)addr(offset);
1871 offset += archived_nm->relocation_size();
1872 set_read_position(offset);
1873
1874 // Read oops and metadata
1875 VM_ENTRY_MARK
1876 GrowableArray<Handle> oop_list;
1877 GrowableArray<Metadata*> metadata_list;
1878
1879 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1880 return false;
1881 }
1882
1883 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1884
1885 offset = read_position();
1886 address immutable_data = (address)addr(offset);
1887 offset += archived_nm->immutable_data_size();
1888 set_read_position(offset);
1889
1890 GrowableArray<Handle> reloc_immediate_oop_list;
1891 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1892 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1893 return false;
1894 }
1895
1896 // Read Dependencies (compressed already)
1897 Dependencies* dependencies = new Dependencies(env);
1898 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1899 env->set_dependencies(dependencies);
1900
1901 const char* name = addr(entry_position + aot_code_entry->name_offset());
1902
1903 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1904 #ifdef ASSERT
1905 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1906 if (log.is_enabled()) {
1907 FlagSetting fs(PrintRelocations, true);
1908 archived_nm->print_on(&log);
1909 archived_nm->decode2(&log);
1910 }
1911 #endif
1912
1913 if (VerifyCachedCode) {
1914 return false;
1915 }
1916
1917 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1918 env->register_aot_method(THREAD,
1919 target,
1920 compiler,
1921 archived_nm,
1922 reloc_data,
1923 oop_list,
1924 metadata_list,
1925 oopmaps,
1926 immutable_data,
1927 reloc_immediate_oop_list,
1928 reloc_immediate_metadata_list,
1929 NOT_PRODUCT_ARG(asm_remarks)
1930 NOT_PRODUCT_ARG(dbg_strings)
1931 this);
1932 bool success = task->is_success();
1933 if (success) {
1934 aot_code_entry->set_loaded();
1935 }
1936 return success;
1937 }
1938
1939 bool skip_preload(methodHandle mh) {
1940 if (!mh->method_holder()->is_loaded()) {
1941 return true;
1942 }
1943 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1944 if (directives->DontPreloadOption) {
1945 LogStreamHandle(Info, aot, codecache, init) log;
1946 if (log.is_enabled()) {
1947 log.print("Exclude preloading code for ");
1948 mh->print_value_on(&log);
1949 }
1950 return true;
1951 }
1952 return false;
1953 }
1954
1955 bool AOTCodeCache::gen_preload_code(ciMethod* m, int entry_bci) {
1956 VM_ENTRY_MARK;
1957 return (entry_bci == InvocationEntryBci) && is_on() && _cache->gen_preload_code() &&
1958 AOTCacheAccess::can_generate_aot_code(m->get_Method());
1959 }
1960
1961 void AOTCodeCache::preload_code(JavaThread* thread) {
1962 if ((ClassInitBarrierMode == 0) || !is_on_for_use()) {
1963 return;
1964 }
1965 if ((DisableCachedCode & (1 << 3)) != 0) {
1966 return; // no preloaded code (level 5);
1967 }
1968 _cache->preload_startup_code(thread);
1969 }
1970
1971 void AOTCodeCache::preload_startup_code(TRAPS) {
1972 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1973 // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
1974 // level we want (that is CompLevel_full_optimization).
1975 return;
1976 }
1977 assert(_for_use, "sanity");
1978 uint count = _load_header->entries_count();
1979 if (_load_entries == nullptr) {
1980 // Read it
1981 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
1982 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
1983 log_info(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1984 }
1985 uint preload_entries_count = _load_header->preload_entries_count();
1986 if (preload_entries_count > 0) {
1987 uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
1988 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
1989 uint count = MIN2(preload_entries_count, SCLoadStop);
1990 for (uint i = SCLoadStart; i < count; i++) {
1991 uint index = entries_index[i];
1992 AOTCodeEntry* entry = &(_load_entries[index]);
1993 if (entry->not_entrant()) {
1994 continue;
1995 }
1996 methodHandle mh(THREAD, entry->method());
1997 assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
1998 if (skip_preload(mh)) {
1999 continue; // Exclude preloading for this method
2000 }
2001 assert(mh->method_holder()->is_loaded(), "");
2002 if (!mh->method_holder()->is_linked()) {
2003 assert(!HAS_PENDING_EXCEPTION, "");
2004 mh->method_holder()->link_class(THREAD);
2005 if (HAS_PENDING_EXCEPTION) {
2006 LogStreamHandle(Info, aot, codecache) log;
2007 if (log.is_enabled()) {
2008 ResourceMark rm;
2009 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
2010 THREAD->pending_exception()->print_value_on(&log);
2011 if (log_is_enabled(Debug, aot, codecache)) {
2012 THREAD->pending_exception()->print_on(&log);
2013 }
2014 }
2015 CLEAR_PENDING_EXCEPTION;
2016 }
2017 }
2018 if (mh->aot_code_entry() != nullptr) {
2019 // Second C2 compilation of the same method could happen for
2020 // different reasons without marking first entry as not entrant.
2021 continue; // Keep old entry to avoid issues
2022 }
2023 mh->set_aot_code_entry(entry);
2024 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0, false, CompileTask::Reason_Preload, CHECK);
2025 }
2026 }
2027 }
2028
2029 // ------------ process code and data --------------
2030
2031 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
2032 GrowableArray<uint> reloc_data;
2033 RelocIterator iter(&code_blob);
2034 LogStreamHandle(Trace, aot, codecache, reloc) log;
2035 while (iter.next()) {
2036 int idx = reloc_data.append(0); // default value
2037 switch (iter.type()) {
2038 case relocInfo::none:
2039 break;
2040 case relocInfo::runtime_call_type: {
2041 // Record offset of runtime destination
2042 CallRelocation* r = (CallRelocation*)iter.reloc();
2043 address dest = r->destination();
2044 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2045 dest = (address)-1; // do nothing in this case when loading this relocation
2046 }
2047 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, &code_blob));
2048 break;
2049 }
2050 case relocInfo::runtime_call_w_cp_type:
2051 fatal("runtime_call_w_cp_type unimplemented");
2052 break;
2053 case relocInfo::external_word_type: {
2054 // Record offset of runtime target
2055 address target = ((external_word_Relocation*)iter.reloc())->target();
2056 reloc_data.at_put(idx, _table->id_for_address(target, iter, nullptr, &code_blob));
2057 break;
2058 }
2059 case relocInfo::internal_word_type:
2060 break;
2061 case relocInfo::section_word_type:
2062 break;
2063 default:
2064 fatal("relocation %d unimplemented", (int)iter.type());
2065 break;
2066 }
2067 if (log.is_enabled()) {
2068 iter.print_current_on(&log);
2069 }
2070 }
2071
2072 // Write additional relocation data: uint per relocation
2073 // Write the count first
2074 int count = reloc_data.length();
2075 write_bytes(&count, sizeof(int));
2076 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2077 iter != reloc_data.end(); ++iter) {
2078 uint value = *iter;
2079 int n = write_bytes(&value, sizeof(uint));
2080 if (n != sizeof(uint)) {
2081 return false;
2082 }
2083 }
2084 return true;
2085 }
2086
2087 void AOTCodeReader::apply_relocations(nmethod* nm, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list) {
2088 LogStreamHandle(Info, aot, codecache, reloc) log;
2089 uint buffer_offset = read_position();
2090 int count = *(int*)addr(buffer_offset);
2091 buffer_offset += sizeof(int);
2092 if (log.is_enabled()) {
2093 log.print_cr("======== extra relocations count=%d", count);
2094 }
2095 uint* reloc_data = (uint*)addr(buffer_offset);
2096 buffer_offset += (count * sizeof(uint));
2097 set_read_position(buffer_offset);
2098
2099 RelocIterator iter(nm);
2100 int j = 0;
2101
2102 while (iter.next()) {
2103 switch (iter.type()) {
2104 case relocInfo::none:
2105 break;
2106 case relocInfo::oop_type: {
2107 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2108 if (r->oop_is_immediate()) {
2109 Handle h = oop_list.at(reloc_data[j]);
2110 r->set_value(cast_from_oop<address>(h()));
2111 } else {
2112 r->fix_oop_relocation();
2113 }
2114 break;
2115 }
2116 case relocInfo::metadata_type: {
2117 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2118 Metadata* m;
2119 if (r->metadata_is_immediate()) {
2120 m = metadata_list.at(reloc_data[j]);
2121 } else {
2122 // Get already updated value from nmethod.
2123 int index = r->metadata_index();
2124 m = nm->metadata_at(index);
2125 }
2126 r->set_value((address)m);
2127 break;
2128 }
2129 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2130 case relocInfo::opt_virtual_call_type:
2131 case relocInfo::static_call_type: {
2132 address dest = _cache->address_for_id(reloc_data[j]);
2133 if (dest != (address)-1) {
2134 ((CallRelocation*)iter.reloc())->set_destination(dest);
2135 }
2136 break;
2137 }
2138 case relocInfo::trampoline_stub_type: {
2139 address dest = _cache->address_for_id(reloc_data[j]);
2140 if (dest != (address)-1) {
2141 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2142 }
2143 break;
2144 }
2145 case relocInfo::static_stub_type:
2146 break;
2147 case relocInfo::runtime_call_type: {
2148 address dest = _cache->address_for_id(reloc_data[j]);
2149 if (dest != (address)-1) {
2150 ((CallRelocation*)iter.reloc())->set_destination(dest);
2151 }
2152 break;
2153 }
2154 case relocInfo::runtime_call_w_cp_type:
2155 fatal("runtime_call_w_cp_type unimplemented");
2156 //address destination = iter.reloc()->value();
2157 break;
2158 case relocInfo::external_word_type: {
2159 address target = _cache->address_for_id(reloc_data[j]);
2160 // Add external address to global table
2161 int index = ExternalsRecorder::find_index(target);
2162 // Update index in relocation
2163 Relocation::add_jint(iter.data(), index);
2164 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2165 assert(reloc->target() == target, "sanity");
2166 reloc->set_value(target); // Patch address in the code
2167 break;
2168 }
2169 case relocInfo::internal_word_type: {
2170 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2171 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), nm->content_begin());
2172 break;
2173 }
2174 case relocInfo::section_word_type: {
2175 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2176 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), nm->content_begin());
2177 break;
2178 }
2179 case relocInfo::poll_type:
2180 break;
2181 case relocInfo::poll_return_type:
2182 break;
2183 case relocInfo::post_call_nop_type:
2184 break;
2185 case relocInfo::entry_guard_type:
2186 break;
2187 default:
2188 fatal("relocation %d unimplemented", (int)iter.type());
2189 break;
2190 }
2191 if (log.is_enabled()) {
2192 iter.print_current_on(&log);
2193 }
2194 j++;
2195 }
2196 assert(j == count, "must be");
2197 }
2198
2199
2200 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
2201 LogStreamHandle(Trace, aot, reloc) log;
2202 uint offset = read_position();
2203 int count = *(int*)addr(offset);
2204 offset += sizeof(int);
2205 if (log.is_enabled()) {
2206 log.print_cr("======== extra relocations count=%d", count);
2207 }
2208 uint* reloc_data = (uint*)addr(offset);
2209 offset += (count * sizeof(uint));
2210 set_read_position(offset);
2211
2212 RelocIterator iter(code_blob);
2213 int j = 0;
2214 while (iter.next()) {
2215 switch (iter.type()) {
2216 case relocInfo::none:
2217 break;
2218 case relocInfo::runtime_call_type: {
2219 address dest = _cache->address_for_id(reloc_data[j]);
2220 if (dest != (address)-1) {
2221 ((CallRelocation*)iter.reloc())->set_destination(dest);
2222 }
2223 break;
2224 }
2225 case relocInfo::runtime_call_w_cp_type:
2226 fatal("runtime_call_w_cp_type unimplemented");
2227 break;
2228 case relocInfo::external_word_type: {
2229 address target = _cache->address_for_id(reloc_data[j]);
2230 // Add external address to global table
2231 int index = ExternalsRecorder::find_index(target);
2232 // Update index in relocation
2233 Relocation::add_jint(iter.data(), index);
2234 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2235 assert(reloc->target() == target, "sanity");
2236 reloc->set_value(target); // Patch address in the code
2237 break;
2238 }
2239 case relocInfo::internal_word_type: {
2240 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2241 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2242 break;
2243 }
2244 case relocInfo::section_word_type: {
2245 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2246 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2247 break;
2248 }
2249 default:
2250 fatal("relocation %d unimplemented", (int)iter.type());
2251 break;
2252 }
2253 if (log.is_enabled()) {
2254 iter.print_current_on(&log);
2255 }
2256 j++;
2257 }
2258 assert(j == count, "sanity");
2259 }
2260
2261 bool AOTCodeCache::write_nmethod_loadtime_relocations(JavaThread* thread, nmethod* nm, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2262 LogStreamHandle(Info, aot, codecache, reloc) log;
2263 GrowableArray<uint> reloc_data;
2264 // Collect additional data
2265 RelocIterator iter(nm);
2266 bool has_immediate = false;
2267 while (iter.next()) {
2268 int idx = reloc_data.append(0); // default value
2269 switch (iter.type()) {
2270 case relocInfo::none:
2271 break;
2272 case relocInfo::oop_type: {
2273 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2274 if (r->oop_is_immediate()) {
2275 // store index of oop in the reloc immediate oop list
2276 Handle h(thread, r->oop_value());
2277 int oop_idx = oop_list.find(h);
2278 assert(oop_idx != -1, "sanity check");
2279 reloc_data.at_put(idx, (uint)oop_idx);
2280 has_immediate = true;
2281 }
2282 break;
2283 }
2284 case relocInfo::metadata_type: {
2285 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2286 if (r->metadata_is_immediate()) {
2287 // store index of metadata in the reloc immediate metadata list
2288 int metadata_idx = metadata_list.find(r->metadata_value());
2289 assert(metadata_idx != -1, "sanity check");
2290 reloc_data.at_put(idx, (uint)metadata_idx);
2291 has_immediate = true;
2292 }
2293 break;
2294 }
2295 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2296 case relocInfo::opt_virtual_call_type:
2297 case relocInfo::static_call_type: {
2298 CallRelocation* r = (CallRelocation*)iter.reloc();
2299 address dest = r->destination();
2300 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2301 dest = (address)-1; // do nothing in this case when loading this relocation
2302 }
2303 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, nm));
2304 break;
2305 }
2306 case relocInfo::trampoline_stub_type: {
2307 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2308 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, nm));
2309 break;
2310 }
2311 case relocInfo::static_stub_type:
2312 break;
2313 case relocInfo::runtime_call_type: {
2314 // Record offset of runtime destination
2315 CallRelocation* r = (CallRelocation*)iter.reloc();
2316 address dest = r->destination();
2317 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2318 dest = (address)-1; // do nothing in this case when loading this relocation
2319 }
2320 reloc_data.at_put(idx, _table->id_for_address(dest, iter, nullptr, nm));
2321 break;
2322 }
2323 case relocInfo::runtime_call_w_cp_type:
2324 fatal("runtime_call_w_cp_type unimplemented");
2325 break;
2326 case relocInfo::external_word_type: {
2327 // Record offset of runtime target
2328 address target = ((external_word_Relocation*)iter.reloc())->target();
2329 reloc_data.at_put(idx, _table->id_for_address(target, iter, nullptr, nm));
2330 break;
2331 }
2332 case relocInfo::internal_word_type:
2333 break;
2334 case relocInfo::section_word_type:
2335 break;
2336 case relocInfo::poll_type:
2337 break;
2338 case relocInfo::poll_return_type:
2339 break;
2340 case relocInfo::post_call_nop_type:
2341 break;
2342 case relocInfo::entry_guard_type:
2343 break;
2344 default:
2345 fatal("relocation %d unimplemented", (int)iter.type());
2346 break;
2347 }
2348 if (log.is_enabled()) {
2349 iter.print_current_on(&log);
2350 }
2351 }
2352
2353 // Write additional relocation data: uint per relocation
2354 // Write the count first
2355 int count = reloc_data.length();
2356 write_bytes(&count, sizeof(int));
2357 uint data_size = count * sizeof(uint);
2358 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2359 iter != reloc_data.end(); ++iter) {
2360 uint value = *iter;
2361 int n = write_bytes(&value, sizeof(uint));
2362 if (n != sizeof(uint)) {
2363 return false;
2364 break;
2365 }
2366 }
2367
2368 if (!align_write()) {
2369 return false;
2370 }
2371 return true; //success;
2372 }
2373
2374 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2375 int count = oop_list.length();
2376 if (!write_bytes(&count, sizeof(int))) {
2377 return false;
2378 }
2379 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2380 iter != oop_list.end(); ++iter) {
2381 Handle h = *iter;
2382 if (!write_oop(h())) {
2383 return false;
2384 }
2385 }
2386
2387 count = metadata_list.length();
2388 if (!write_bytes(&count, sizeof(int))) {
2389 return false;
2390 }
2391 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2392 iter != metadata_list.end(); ++iter) {
2393 Metadata* m = *iter;
2394 if (!write_metadata(m)) {
2395 return false;
2396 }
2397 }
2398 return true;
2399 }
2400
2401 bool AOTCodeCache::write_debug_info(DebugInformationRecorder* recorder) {
2402 if (!align_write()) {
2403 return false;
2404 }
2405 // Don't call data_size() and pcs_size(). They will freeze OopRecorder.
2406 int data_size = recorder->stream()->position(); // In bytes
2407 uint n = write_bytes(&data_size, sizeof(int));
2408 if (n != sizeof(int)) {
2409 return false;
2410 }
2411 int pcs_length = recorder->pcs_length(); // In bytes
2412 n = write_bytes(&pcs_length, sizeof(int));
2413 if (n != sizeof(int)) {
2414 return false;
2415 }
2416 n = write_bytes(recorder->stream()->buffer(), data_size);
2417 if (n != (uint)data_size) {
2418 return false;
2419 }
2420 uint pcs_size = pcs_length * sizeof(PcDesc);
2421 n = write_bytes(recorder->pcs(), pcs_size);
2422 if (n != pcs_size) {
2423 return false;
2424 }
2425 return true;
2426 }
2427
2428 DebugInformationRecorder* AOTCodeReader::read_debug_info(OopRecorder* oop_recorder) {
2429 uint code_offset = align_up(read_position(), DATA_ALIGNMENT);
2430 int data_size = *(int*)addr(code_offset);
2431 code_offset += sizeof(int);
2432 int pcs_length = *(int*)addr(code_offset);
2433 code_offset += sizeof(int);
2434
2435 log_debug(aot, codecache)("======== read DebugInfo [%d, %d]:", data_size, pcs_length);
2436
2437 // Aligned initial sizes
2438 int data_size_align = align_up(data_size, DATA_ALIGNMENT);
2439 int pcs_length_align = pcs_length + 1;
2440 assert(sizeof(PcDesc) > DATA_ALIGNMENT, "sanity");
2441 DebugInformationRecorder* recorder = new DebugInformationRecorder(oop_recorder, data_size_align, pcs_length);
2442
2443 copy_bytes(addr(code_offset), recorder->stream()->buffer(), data_size_align);
2444 recorder->stream()->set_position(data_size);
2445 code_offset += data_size;
2446
2447 uint pcs_size = pcs_length * sizeof(PcDesc);
2448 copy_bytes(addr(code_offset), (address)recorder->pcs(), pcs_size);
2449 code_offset += pcs_size;
2450 set_read_position(code_offset);
2451 return recorder;
2452 }
2453
2454 bool AOTCodeCache::write_metadata(nmethod* nm) {
2455 int count = nm->metadata_count()-1;
2456 if (!write_bytes(&count, sizeof(int))) {
2457 return false;
2458 }
2459 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2460 if (!write_metadata(*p)) {
2461 return false;
2462 }
2463 }
2464 return true;
2465 }
2466
2467 bool AOTCodeCache::write_metadata(OopRecorder* oop_recorder) {
2468 int metadata_count = oop_recorder->metadata_count();
2469 uint n = write_bytes(&metadata_count, sizeof(int));
2470 if (n != sizeof(int)) {
2471 return false;
2472 }
2473
2474 log_debug(aot, codecache)("======== write metadata [%d]:", metadata_count);
2475
2476 for (int i = 1; i < metadata_count; i++) { // skip first virtual nullptr
2477 Metadata* m = oop_recorder->metadata_at(i);
2478 LogStreamHandle(Debug, aot, codecache, metadata) log;
2479 if (log.is_enabled()) {
2480 log.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2481 if (m == (Metadata*)Universe::non_oop_word()) {
2482 log.print("non-metadata word");
2483 } else if (m == nullptr) {
2484 log.print("nullptr-oop");
2485 } else {
2486 Metadata::print_value_on_maybe_null(&log, m);
2487 }
2488 log.cr();
2489 }
2490 if (!write_metadata(m)) {
2491 return false;
2492 }
2493 }
2494 return true;
2495 }
2496
2497 bool AOTCodeCache::write_metadata(Metadata* m) {
2498 uint n = 0;
2499 if (m == nullptr) {
2500 DataKind kind = DataKind::Null;
2501 n = write_bytes(&kind, sizeof(int));
2502 if (n != sizeof(int)) {
2503 return false;
2504 }
2505 } else if (m == (Metadata*)Universe::non_oop_word()) {
2506 DataKind kind = DataKind::No_Data;
2507 n = write_bytes(&kind, sizeof(int));
2508 if (n != sizeof(int)) {
2509 return false;
2510 }
2511 } else if (m->is_klass()) {
2512 if (!write_klass((Klass*)m)) {
2513 return false;
2514 }
2515 } else if (m->is_method()) {
2516 if (!write_method((Method*)m)) {
2517 return false;
2518 }
2519 } else if (m->is_methodCounters()) {
2520 DataKind kind = DataKind::MethodCnts;
2521 n = write_bytes(&kind, sizeof(int));
2522 if (n != sizeof(int)) {
2523 return false;
2524 }
2525 if (!write_method(((MethodCounters*)m)->method())) {
2526 return false;
2527 }
2528 log_info(aot, codecache)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2529 } else { // Not supported
2530 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2531 return false;
2532 }
2533 return true;
2534 }
2535
2536 bool AOTCodeReader::read_metadata(OopRecorder* oop_recorder, ciMethod* target) {
2537 uint code_offset = read_position();
2538 int metadata_count = *(int*)addr(code_offset);
2539 code_offset += sizeof(int);
2540 set_read_position(code_offset);
2541
2542 log_debug(aot, codecache)("======== read metadata [%d]:", metadata_count);
2543
2544 if (metadata_count == 0) {
2545 return true;
2546 }
2547 {
2548 VM_ENTRY_MARK;
2549 methodHandle comp_method(THREAD, target->get_Method());
2550
2551 for (int i = 1; i < metadata_count; i++) {
2552 Metadata* m = read_metadata(comp_method);
2553 if (lookup_failed()) {
2554 return false;
2555 }
2556 if (oop_recorder->is_real(m)) {
2557 oop_recorder->find_index(m);
2558 } else {
2559 oop_recorder->allocate_metadata_index(m);
2560 }
2561 LogTarget(Debug, aot, codecache, metadata) log;
2562 if (log.is_enabled()) {
2563 LogStream ls(log);
2564 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2565 if (m == (Metadata*)Universe::non_oop_word()) {
2566 ls.print("non-metadata word");
2567 } else if (m == nullptr) {
2568 ls.print("nullptr-oop");
2569 } else {
2570 Metadata::print_value_on_maybe_null(&ls, m);
2571 }
2572 ls.cr();
2573 }
2574 }
2575 }
2576 return true;
2577 }
2578
2579 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2580 uint code_offset = read_position();
2581 Metadata* m = nullptr;
2582 DataKind kind = *(DataKind*)addr(code_offset);
2583 code_offset += sizeof(DataKind);
2584 set_read_position(code_offset);
2585 if (kind == DataKind::Null) {
2586 m = (Metadata*)nullptr;
2587 } else if (kind == DataKind::No_Data) {
2588 m = (Metadata*)Universe::non_oop_word();
2589 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
2590 m = (Metadata*)read_klass(comp_method, (kind == DataKind::Klass_Shared));
2591 } else if (kind == DataKind::Method || kind == DataKind::Method_Shared) {
2592 m = (Metadata*)read_method(comp_method, (kind == DataKind::Method_Shared));
2593 } else if (kind == DataKind::MethodCnts) {
2594 kind = *(DataKind*)addr(code_offset);
2595 bool shared = (kind == DataKind::Method_Shared);
2596 assert(kind == DataKind::Method || shared, "Sanity");
2597 code_offset += sizeof(DataKind);
2598 set_read_position(code_offset);
2599 m = (Metadata*)read_method(comp_method, shared);
2600 if (m != nullptr) {
2601 Method* method = (Method*)m;
2602 m = method->get_method_counters(Thread::current());
2603 if (m == nullptr) {
2604 set_lookup_failed();
2605 log_info(aot, codecache)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2606 } else {
2607 log_info(aot, codecache)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2608 }
2609 }
2610 } else {
2611 set_lookup_failed();
2612 log_info(aot, codecache)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2613 }
2614 return m;
2615 }
2616
2617 bool AOTCodeCache::write_method(Method* method) {
2618 bool can_use_meta_ptrs = _use_meta_ptrs;
2619 Klass* klass = method->method_holder();
2620 if (klass->is_instance_klass()) {
2621 InstanceKlass* ik = InstanceKlass::cast(klass);
2622 ClassLoaderData* cld = ik->class_loader_data();
2623 if (!cld->is_builtin_class_loader_data()) {
2624 set_lookup_failed();
2625 return false;
2626 }
2627 if (_for_preload && !AOTCacheAccess::can_generate_aot_code(ik)) {
2628 _for_preload = false;
2629 // Bailout if code has clinit barriers:
2630 // method will be recompiled without them in any case
2631 if (_has_clinit_barriers) {
2632 set_lookup_failed();
2633 return false;
2634 }
2635 can_use_meta_ptrs = false;
2636 }
2637 }
2638 ResourceMark rm;
2639 if (can_use_meta_ptrs && AOTCacheAccess::can_generate_aot_code(method)) {
2640 DataKind kind = DataKind::Method_Shared;
2641 uint n = write_bytes(&kind, sizeof(int));
2642 if (n != sizeof(int)) {
2643 return false;
2644 }
2645 uint method_offset = AOTCacheAccess::delta_from_shared_address_base((address)method);
2646 n = write_bytes(&method_offset, sizeof(uint));
2647 if (n != sizeof(uint)) {
2648 return false;
2649 }
2650 log_info(aot, codecache)("%d (L%d): Wrote shared method: %s @ 0x%08x", compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2651 return true;
2652 }
2653 // Bailout if code has clinit barriers:
2654 // method will be recompiled without them in any case
2655 if (_for_preload && _has_clinit_barriers) {
2656 set_lookup_failed();
2657 return false;
2658 }
2659 _for_preload = false;
2660 log_info(aot, codecache,cds)("%d (L%d): Not shared method: %s", compile_id(), comp_level(), method->name_and_sig_as_C_string());
2661 if (method->is_hidden()) { // Skip such nmethod
2662 set_lookup_failed();
2663 return false;
2664 }
2665 DataKind kind = DataKind::Method;
2666 uint n = write_bytes(&kind, sizeof(int));
2667 if (n != sizeof(int)) {
2668 return false;
2669 }
2670 Symbol* name = method->name();
2671 Symbol* holder = method->klass_name();
2672 Symbol* signat = method->signature();
2673 int name_length = name->utf8_length();
2674 int holder_length = holder->utf8_length();
2675 int signat_length = signat->utf8_length();
2676
2677 // Write sizes and strings
2678 int total_length = holder_length + 1 + name_length + 1 + signat_length + 1;
2679 char* dest = NEW_RESOURCE_ARRAY(char, total_length);
2680 holder->as_C_string(dest, total_length);
2681 dest[holder_length] = '\0';
2682 int pos = holder_length + 1;
2683 name->as_C_string(&(dest[pos]), (total_length - pos));
2684 pos += name_length;
2685 dest[pos++] = '\0';
2686 signat->as_C_string(&(dest[pos]), (total_length - pos));
2687 dest[total_length - 1] = '\0';
2688
2689 LogTarget(Info, aot, codecache, loader) log;
2690 if (log.is_enabled()) {
2691 LogStream ls(log);
2692 oop loader = klass->class_loader();
2693 oop domain = klass->protection_domain();
2694 ls.print("Holder %s loader: ", dest);
2695 if (loader == nullptr) {
2696 ls.print("nullptr");
2697 } else {
2698 loader->print_value_on(&ls);
2699 }
2700 ls.print(" domain: ");
2701 if (domain == nullptr) {
2702 ls.print("nullptr");
2703 } else {
2704 domain->print_value_on(&ls);
2705 }
2706 ls.cr();
2707 }
2708
2709 n = write_bytes(&holder_length, sizeof(int));
2710 if (n != sizeof(int)) {
2711 return false;
2712 }
2713 n = write_bytes(&name_length, sizeof(int));
2714 if (n != sizeof(int)) {
2715 return false;
2716 }
2717 n = write_bytes(&signat_length, sizeof(int));
2718 if (n != sizeof(int)) {
2719 return false;
2720 }
2721 n = write_bytes(dest, total_length);
2722 if (n != (uint)total_length) {
2723 return false;
2724 }
2725 dest[holder_length] = ' ';
2726 dest[holder_length + 1 + name_length] = ' ';
2727 log_info(aot, codecache)("%d (L%d): Wrote method: %s", compile_id(), comp_level(), dest);
2728 return true;
2729 }
2730
2731 Method* AOTCodeReader::read_method(const methodHandle& comp_method, bool shared) {
2732 uint code_offset = read_position();
2733 if (_cache->use_meta_ptrs() && shared) {
2734 uint method_offset = *(uint*)addr(code_offset);
2735 code_offset += sizeof(uint);
2736 set_read_position(code_offset);
2737 Method* m = (Method*)((address)SharedBaseAddress + method_offset);
2738 if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
2739 // Something changed in CDS
2740 set_lookup_failed();
2741 log_info(aot, codecache)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2742 return nullptr;
2743 }
2744 assert(m->is_method(), "sanity");
2745 ResourceMark rm;
2746 Klass* k = m->method_holder();
2747 if (!k->is_instance_klass()) {
2748 set_lookup_failed();
2749 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2750 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2751 return nullptr;
2752 } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2753 set_lookup_failed();
2754 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2755 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2756 return nullptr;
2757 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2758 set_lookup_failed();
2759 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2760 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2761 return nullptr;
2762 } else if (!InstanceKlass::cast(k)->is_linked()) {
2763 set_lookup_failed();
2764 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2765 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2766 return nullptr;
2767 }
2768 log_info(aot, codecache)("%d (L%d): Shared method lookup: %s",
2769 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2770 return m;
2771 }
2772 int holder_length = *(int*)addr(code_offset);
2773 code_offset += sizeof(int);
2774 int name_length = *(int*)addr(code_offset);
2775 code_offset += sizeof(int);
2776 int signat_length = *(int*)addr(code_offset);
2777 code_offset += sizeof(int);
2778
2779 const char* dest = addr(code_offset);
2780 code_offset += holder_length + 1 + name_length + 1 + signat_length + 1;
2781 set_read_position(code_offset);
2782 TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), holder_length);
2783 if (klass_sym == nullptr) {
2784 set_lookup_failed();
2785 log_info(aot, codecache)("%d (L%d): Probe failed for class %s", compile_id(), comp_level(), &(dest[0]));
2786 return nullptr;
2787 }
2788 // Use class loader of compiled method.
2789 Thread* thread = Thread::current();
2790 Handle loader(thread, comp_method->method_holder()->class_loader());
2791 Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
2792 assert(!thread->has_pending_exception(), "should not throw");
2793 if (k == nullptr && !loader.is_null()) {
2794 // Try default loader and domain
2795 k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
2796 assert(!thread->has_pending_exception(), "should not throw");
2797 }
2798 if (k != nullptr) {
2799 if (!k->is_instance_klass()) {
2800 set_lookup_failed();
2801 log_info(aot, codecache)("%d (L%d): Lookup failed for holder %s: not instance klass",
2802 compile_id(), comp_level(), &(dest[0]));
2803 return nullptr;
2804 } else if (!InstanceKlass::cast(k)->is_linked()) {
2805 set_lookup_failed();
2806 log_info(aot, codecache)("%d (L%d): Lookup failed for holder %s: not linked",
2807 compile_id(), comp_level(), &(dest[0]));
2808 return nullptr;
2809 }
2810 log_info(aot, codecache)("%d (L%d): Holder lookup: %s", compile_id(), comp_level(), k->external_name());
2811 } else {
2812 set_lookup_failed();
2813 log_info(aot, codecache)("%d (L%d): Lookup failed for holder %s",
2814 compile_id(), comp_level(), &(dest[0]));
2815 return nullptr;
2816 }
2817 TempNewSymbol name_sym = SymbolTable::probe(&(dest[holder_length + 1]), name_length);
2818 int pos = holder_length + 1 + name_length + 1;
2819 TempNewSymbol sign_sym = SymbolTable::probe(&(dest[pos]), signat_length);
2820 if (name_sym == nullptr) {
2821 set_lookup_failed();
2822 log_info(aot, codecache)("%d (L%d): Probe failed for method name %s",
2823 compile_id(), comp_level(), &(dest[holder_length + 1]));
2824 return nullptr;
2825 }
2826 if (sign_sym == nullptr) {
2827 set_lookup_failed();
2828 log_info(aot, codecache)("%d (L%d): Probe failed for method signature %s",
2829 compile_id(), comp_level(), &(dest[pos]));
2830 return nullptr;
2831 }
2832 Method* m = InstanceKlass::cast(k)->find_method(name_sym, sign_sym);
2833 if (m != nullptr) {
2834 ResourceMark rm;
2835 log_info(aot, codecache)("%d (L%d): Method lookup: %s", compile_id(), comp_level(), m->name_and_sig_as_C_string());
2836 } else {
2837 set_lookup_failed();
2838 log_info(aot, codecache)("%d (L%d): Lookup failed for method %s::%s%s",
2839 compile_id(), comp_level(), &(dest[0]), &(dest[holder_length + 1]), &(dest[pos]));
2840 return nullptr;
2841 }
2842 return m;
2843 }
2844
2845 bool AOTCodeCache::write_klass(Klass* klass) {
2846 bool can_use_meta_ptrs = _use_meta_ptrs;
2847 uint array_dim = 0;
2848 if (klass->is_objArray_klass()) {
2849 array_dim = ObjArrayKlass::cast(klass)->dimension();
2850 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2851 }
2852 uint init_state = 0;
2853 if (klass->is_instance_klass()) {
2854 InstanceKlass* ik = InstanceKlass::cast(klass);
2855 ClassLoaderData* cld = ik->class_loader_data();
2856 if (!cld->is_builtin_class_loader_data()) {
2857 set_lookup_failed();
2858 return false;
2859 }
2860 if (_for_preload && !AOTCacheAccess::can_generate_aot_code(ik)) {
2861 _for_preload = false;
2862 // Bailout if code has clinit barriers:
2863 // method will be recompiled without them in any case
2864 if (_has_clinit_barriers) {
2865 set_lookup_failed();
2866 return false;
2867 }
2868 can_use_meta_ptrs = false;
2869 }
2870 init_state = (ik->is_initialized() ? 1 : 0);
2871 }
2872 ResourceMark rm;
2873 uint state = (array_dim << 1) | (init_state & 1);
2874 if (can_use_meta_ptrs && AOTCacheAccess::can_generate_aot_code(klass)) {
2875 DataKind kind = DataKind::Klass_Shared;
2876 uint n = write_bytes(&kind, sizeof(int));
2877 if (n != sizeof(int)) {
2878 return false;
2879 }
2880 // Record state of instance klass initialization.
2881 n = write_bytes(&state, sizeof(int));
2882 if (n != sizeof(int)) {
2883 return false;
2884 }
2885 uint klass_offset = AOTCacheAccess::delta_from_shared_address_base((address)klass);
2886 n = write_bytes(&klass_offset, sizeof(uint));
2887 if (n != sizeof(uint)) {
2888 return false;
2889 }
2890 log_info(aot, codecache)("%d (L%d): Wrote shared klass: %s%s%s @ 0x%08x", compile_id(), comp_level(), klass->external_name(),
2891 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2892 (array_dim > 0 ? " (object array)" : ""),
2893 klass_offset);
2894 return true;
2895 }
2896 // Bailout if code has clinit barriers:
2897 // method will be recompiled without them in any case
2898 if (_for_preload && _has_clinit_barriers) {
2899 set_lookup_failed();
2900 return false;
2901 }
2902 _for_preload = false;
2903 log_info(aot, codecache,cds)("%d (L%d): Not shared klass: %s", compile_id(), comp_level(), klass->external_name());
2904 if (klass->is_hidden()) { // Skip such nmethod
2905 set_lookup_failed();
2906 return false;
2907 }
2908 DataKind kind = DataKind::Klass;
2909 uint n = write_bytes(&kind, sizeof(int));
2910 if (n != sizeof(int)) {
2911 return false;
2912 }
2913 // Record state of instance klass initialization.
2914 n = write_bytes(&state, sizeof(int));
2915 if (n != sizeof(int)) {
2916 return false;
2917 }
2918 Symbol* name = klass->name();
2919 int name_length = name->utf8_length();
2920 int total_length = name_length + 1;
2921 char* dest = NEW_RESOURCE_ARRAY(char, total_length);
2922 name->as_C_string(dest, total_length);
2923 dest[total_length - 1] = '\0';
2924 LogTarget(Info, aot, codecache, loader) log;
2925 if (log.is_enabled()) {
2926 LogStream ls(log);
2927 oop loader = klass->class_loader();
2928 oop domain = klass->protection_domain();
2929 ls.print("Class %s loader: ", dest);
2930 if (loader == nullptr) {
2931 ls.print("nullptr");
2932 } else {
2933 loader->print_value_on(&ls);
2934 }
2935 ls.print(" domain: ");
2936 if (domain == nullptr) {
2937 ls.print("nullptr");
2938 } else {
2939 domain->print_value_on(&ls);
2940 }
2941 ls.cr();
2942 }
2943 n = write_bytes(&name_length, sizeof(int));
2944 if (n != sizeof(int)) {
2945 return false;
2946 }
2947 n = write_bytes(dest, total_length);
2948 if (n != (uint)total_length) {
2949 return false;
2950 }
2951 log_info(aot, codecache)("%d (L%d): Wrote klass: %s%s%s",
2952 compile_id(), comp_level(),
2953 dest, (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2954 (array_dim > 0 ? " (object array)" : ""));
2955 return true;
2956 }
2957
2958 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method, bool shared) {
2959 uint code_offset = read_position();
2960 uint state = *(uint*)addr(code_offset);
2961 uint init_state = (state & 1);
2962 uint array_dim = (state >> 1);
2963 code_offset += sizeof(int);
2964 if (_cache->use_meta_ptrs() && shared) {
2965 uint klass_offset = *(uint*)addr(code_offset);
2966 code_offset += sizeof(uint);
2967 set_read_position(code_offset);
2968 Klass* k = (Klass*)((address)SharedBaseAddress + klass_offset);
2969 if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2970 // Something changed in CDS
2971 set_lookup_failed();
2972 log_info(aot, codecache)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2973 return nullptr;
2974 }
2975 assert(k->is_klass(), "sanity");
2976 ResourceMark rm;
2977 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2978 set_lookup_failed();
2979 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2980 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2981 return nullptr;
2982 } else
2983 // Allow not initialized klass which was uninitialized during code caching or for preload
2984 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2985 set_lookup_failed();
2986 log_info(aot, codecache)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2987 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2988 return nullptr;
2989 }
2990 if (array_dim > 0) {
2991 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2992 Klass* ak = k->array_klass_or_null(array_dim);
2993 // FIXME: what would it take to create an array class on the fly?
2994 // Klass* ak = k->array_klass(dim, JavaThread::current());
2995 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2996 if (ak == nullptr) {
2997 set_lookup_failed();
2998 log_info(aot, codecache)("%d (L%d): %d-dimension array klass lookup failed: %s",
2999 compile_id(), comp_level(), array_dim, k->external_name());
3000 }
3001 log_info(aot, codecache)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
3002 return ak;
3003 } else {
3004 log_info(aot, codecache)("%d (L%d): Shared klass lookup: %s",
3005 compile_id(), comp_level(), k->external_name());
3006 return k;
3007 }
3008 }
3009 int name_length = *(int*)addr(code_offset);
3010 code_offset += sizeof(int);
3011 const char* dest = addr(code_offset);
3012 code_offset += name_length + 1;
3013 set_read_position(code_offset);
3014 TempNewSymbol klass_sym = SymbolTable::probe(&(dest[0]), name_length);
3015 if (klass_sym == nullptr) {
3016 set_lookup_failed();
3017 log_info(aot, codecache)("%d (L%d): Probe failed for class %s",
3018 compile_id(), comp_level(), &(dest[0]));
3019 return nullptr;
3020 }
3021 // Use class loader of compiled method.
3022 Thread* thread = Thread::current();
3023 Handle loader(thread, comp_method->method_holder()->class_loader());
3024 Klass* k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, loader);
3025 assert(!thread->has_pending_exception(), "should not throw");
3026 if (k == nullptr && !loader.is_null()) {
3027 // Try default loader and domain
3028 k = SystemDictionary::find_instance_or_array_klass(thread, klass_sym, Handle());
3029 assert(!thread->has_pending_exception(), "should not throw");
3030 }
3031 if (k != nullptr) {
3032 // Allow not initialized klass which was uninitialized during code caching
3033 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1)) {
3034 set_lookup_failed();
3035 log_info(aot, codecache)("%d (L%d): Lookup failed for klass %s: not initialized", compile_id(), comp_level(), &(dest[0]));
3036 return nullptr;
3037 }
3038 log_info(aot, codecache)("%d (L%d): Klass lookup %s", compile_id(), comp_level(), k->external_name());
3039 } else {
3040 set_lookup_failed();
3041 log_info(aot, codecache)("%d (L%d): Lookup failed for class %s", compile_id(), comp_level(), &(dest[0]));
3042 return nullptr;
3043 }
3044 return k;
3045 }
3046
3047 bool AOTCodeCache::write_oops(OopRecorder* oop_recorder) {
3048 int oop_count = oop_recorder->oop_count();
3049 uint n = write_bytes(&oop_count, sizeof(int));
3050 if (n != sizeof(int)) {
3051 return false;
3052 }
3053 log_debug(aot, codecache)("======== write oops [%d]:", oop_count);
3054
3055 for (int i = 1; i < oop_count; i++) { // skip first virtual nullptr
3056 jobject jo = oop_recorder->oop_at(i);
3057 LogStreamHandle(Info, aot, codecache, oops) log;
3058 if (log.is_enabled()) {
3059 log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
3060 if (jo == (jobject)Universe::non_oop_word()) {
3061 log.print("non-oop word");
3062 } else if (jo == nullptr) {
3063 log.print("nullptr-oop");
3064 } else {
3065 JNIHandles::resolve(jo)->print_value_on(&log);
3066 }
3067 log.cr();
3068 }
3069 if (!write_oop(jo)) {
3070 return false;
3071 }
3072 }
3073 return true;
3074 }
3075
3076 bool AOTCodeCache::write_oop(jobject& jo) {
3077 oop obj = JNIHandles::resolve(jo);
3078 return write_oop(obj);
3079 }
3080
3081 bool AOTCodeCache::write_oop(oop obj) {
3082 DataKind kind;
3083 uint n = 0;
3084 if (obj == nullptr) {
3085 kind = DataKind::Null;
3086 n = write_bytes(&kind, sizeof(int));
3087 if (n != sizeof(int)) {
3088 return false;
3089 }
3090 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
3091 kind = DataKind::No_Data;
3092 n = write_bytes(&kind, sizeof(int));
3093 if (n != sizeof(int)) {
3094 return false;
3095 }
3096 } else if (java_lang_Class::is_instance(obj)) {
3097 if (java_lang_Class::is_primitive(obj)) {
3098 int bt = (int)java_lang_Class::primitive_type(obj);
3099 kind = DataKind::Primitive;
3100 n = write_bytes(&kind, sizeof(int));
3101 if (n != sizeof(int)) {
3102 return false;
3103 }
3104 n = write_bytes(&bt, sizeof(int));
3105 if (n != sizeof(int)) {
3106 return false;
3107 }
3108 log_info(aot, codecache)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
3109 } else {
3110 Klass* klass = java_lang_Class::as_Klass(obj);
3111 if (!write_klass(klass)) {
3112 return false;
3113 }
3114 }
3115 } else if (java_lang_String::is_instance(obj)) { // herere
3116 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
3117 if (k >= 0) {
3118 kind = DataKind::String_Shared;
3119 n = write_bytes(&kind, sizeof(int));
3120 if (n != sizeof(int)) {
3121 return false;
3122 }
3123 n = write_bytes(&k, sizeof(int));
3124 if (n != sizeof(int)) {
3125 return false;
3126 }
3127 return true;
3128 }
3129 kind = DataKind::String;
3130 n = write_bytes(&kind, sizeof(int));
3131 if (n != sizeof(int)) {
3132 return false;
3133 }
3134 ResourceMark rm;
3135 size_t length_sz = 0;
3136 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
3137 int length = (int)length_sz; // FIXME -- cast
3138 length++; // write tailing '/0'
3139 n = write_bytes(&length, sizeof(int));
3140 if (n != sizeof(int)) {
3141 return false;
3142 }
3143 n = write_bytes(string, (uint)length);
3144 if (n != (uint)length) {
3145 return false;
3146 }
3147 log_info(aot, codecache)("%d (L%d): Write String: %s", compile_id(), comp_level(), string);
3148 } else if (java_lang_Module::is_instance(obj)) {
3149 fatal("Module object unimplemented");
3150 } else if (java_lang_ClassLoader::is_instance(obj)) {
3151 if (obj == SystemDictionary::java_system_loader()) {
3152 kind = DataKind::SysLoader;
3153 log_info(aot, codecache)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
3154 } else if (obj == SystemDictionary::java_platform_loader()) {
3155 kind = DataKind::PlaLoader;
3156 log_info(aot, codecache)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
3157 } else {
3158 fatal("ClassLoader object unimplemented");
3159 return false;
3160 }
3161 n = write_bytes(&kind, sizeof(int));
3162 if (n != sizeof(int)) {
3163 return false;
3164 }
3165 } else { // herere
3166 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
3167 if (k >= 0) {
3168 kind = DataKind::MH_Oop_Shared;
3169 n = write_bytes(&kind, sizeof(int));
3170 if (n != sizeof(int)) {
3171 return false;
3172 }
3173 n = write_bytes(&k, sizeof(int));
3174 if (n != sizeof(int)) {
3175 return false;
3176 }
3177 return true;
3178 }
3179 // Unhandled oop - bailout
3180 set_lookup_failed();
3181 log_info(aot, codecache, nmethod)("%d (L%d): Unhandled obj: " PTR_FORMAT " : %s",
3182 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
3183 return false;
3184 }
3185 return true;
3186 }
3187
3188 bool AOTCodeReader::read_oops(OopRecorder* oop_recorder, ciMethod* target) {
3189 uint code_offset = read_position();
3190 int oop_count = *(int*)addr(code_offset);
3191 code_offset += sizeof(int);
3192 set_read_position(code_offset);
3193 log_debug(aot, codecache)("======== read oops [%d]:", oop_count);
3194 if (oop_count == 0) {
3195 return true;
3196 }
3197 {
3198 VM_ENTRY_MARK;
3199 methodHandle comp_method(THREAD, target->get_Method());
3200 for (int i = 1; i < oop_count; i++) {
3201 oop obj = read_oop(THREAD, comp_method);
3202 if (lookup_failed()) {
3203 return false;
3204 }
3205 jobject jo = JNIHandles::make_local(THREAD, obj);
3206 if (oop_recorder->is_real(jo)) {
3207 oop_recorder->find_index(jo);
3208 } else {
3209 oop_recorder->allocate_oop_index(jo);
3210 }
3211 LogStreamHandle(Debug, aot, codecache, oops) log;
3212 if (log.is_enabled()) {
3213 log.print("%d: " INTPTR_FORMAT " ", i, p2i(jo));
3214 if (jo == (jobject)Universe::non_oop_word()) {
3215 log.print("non-oop word");
3216 } else if (jo == nullptr) {
3217 log.print("nullptr-oop");
3218 } else {
3219 JNIHandles::resolve(jo)->print_value_on(&log);
3220 }
3221 log.cr();
3222 }
3223 }
3224 }
3225 return true;
3226 }
3227
3228 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
3229 uint code_offset = read_position();
3230 oop obj = nullptr;
3231 DataKind kind = *(DataKind*)addr(code_offset);
3232 code_offset += sizeof(DataKind);
3233 set_read_position(code_offset);
3234 if (kind == DataKind::Null) {
3235 return nullptr;
3236 } else if (kind == DataKind::No_Data) {
3237 return cast_to_oop(Universe::non_oop_word());
3238 } else if (kind == DataKind::Klass || kind == DataKind::Klass_Shared) {
3239 Klass* k = read_klass(comp_method, (kind == DataKind::Klass_Shared));
3240 if (k == nullptr) {
3241 return nullptr;
3242 }
3243 obj = k->java_mirror();
3244 if (obj == nullptr) {
3245 set_lookup_failed();
3246 log_info(aot, codecache)("Lookup failed for java_mirror of klass %s", k->external_name());
3247 return nullptr;
3248 }
3249 } else if (kind == DataKind::Primitive) {
3250 code_offset = read_position();
3251 int t = *(int*)addr(code_offset);
3252 code_offset += sizeof(int);
3253 set_read_position(code_offset);
3254 BasicType bt = (BasicType)t;
3255 obj = java_lang_Class::primitive_mirror(bt);
3256 log_info(aot, codecache)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
3257 } else if (kind == DataKind::String_Shared) {
3258 code_offset = read_position();
3259 int k = *(int*)addr(code_offset);
3260 code_offset += sizeof(int);
3261 set_read_position(code_offset);
3262 obj = AOTCacheAccess::get_archived_object(k);
3263 } else if (kind == DataKind::String) {
3264 code_offset = read_position();
3265 int length = *(int*)addr(code_offset);
3266 code_offset += sizeof(int);
3267 set_read_position(code_offset);
3268 const char* dest = addr(code_offset);
3269 set_read_position(code_offset + length);
3270 obj = StringTable::intern(&(dest[0]), thread);
3271 if (obj == nullptr) {
3272 set_lookup_failed();
3273 log_info(aot, codecache)("%d (L%d): Lookup failed for String %s",
3274 compile_id(), comp_level(), &(dest[0]));
3275 return nullptr;
3276 }
3277 assert(java_lang_String::is_instance(obj), "must be string");
3278 log_info(aot, codecache)("%d (L%d): Read String: %s", compile_id(), comp_level(), dest);
3279 } else if (kind == DataKind::SysLoader) {
3280 obj = SystemDictionary::java_system_loader();
3281 log_info(aot, codecache)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
3282 } else if (kind == DataKind::PlaLoader) {
3283 obj = SystemDictionary::java_platform_loader();
3284 log_info(aot, codecache)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
3285 } else if (kind == DataKind::MH_Oop_Shared) {
3286 code_offset = read_position();
3287 int k = *(int*)addr(code_offset);
3288 code_offset += sizeof(int);
3289 set_read_position(code_offset);
3290 obj = AOTCacheAccess::get_archived_object(k);
3291 } else {
3292 set_lookup_failed();
3293 log_info(aot, codecache)("%d (L%d): Unknown oop's kind: %d",
3294 compile_id(), comp_level(), (int)kind);
3295 return nullptr;
3296 }
3297 return obj;
3298 }
3299
3300 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
3301 methodHandle comp_method(JavaThread::current(), target->get_Method());
3302 JavaThread* current = JavaThread::current();
3303 uint offset = read_position();
3304 int count = *(int *)addr(offset);
3305 offset += sizeof(int);
3306 set_read_position(offset);
3307 for (int i = 0; i < count; i++) {
3308 oop obj = read_oop(current, comp_method);
3309 if (lookup_failed()) {
3310 return false;
3311 }
3312 Handle h(thread, obj);
3313 oop_list.append(h);
3314 if (oop_recorder != nullptr) {
3315 jobject jo = JNIHandles::make_local(thread, obj);
3316 if (oop_recorder->is_real(jo)) {
3317 oop_recorder->find_index(jo);
3318 } else {
3319 oop_recorder->allocate_oop_index(jo);
3320 }
3321 }
3322 LogStreamHandle(Debug, aot, codecache, oops) log;
3323 if (log.is_enabled()) {
3324 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
3325 if (obj == Universe::non_oop_word()) {
3326 log.print("non-oop word");
3327 } else if (obj == nullptr) {
3328 log.print("nullptr-oop");
3329 } else {
3330 obj->print_value_on(&log);
3331 }
3332 log.cr();
3333 }
3334 }
3335
3336 offset = read_position();
3337 count = *(int *)addr(offset);
3338 offset += sizeof(int);
3339 set_read_position(offset);
3340 for (int i = 0; i < count; i++) {
3341 Metadata* m = read_metadata(comp_method);
3342 if (lookup_failed()) {
3343 return false;
3344 }
3345 metadata_list.append(m);
3346 if (oop_recorder != nullptr) {
3347 if (oop_recorder->is_real(m)) {
3348 oop_recorder->find_index(m);
3349 } else {
3350 oop_recorder->allocate_metadata_index(m);
3351 }
3352 }
3353 LogTarget(Debug, aot, codecache, metadata) log;
3354 if (log.is_enabled()) {
3355 LogStream ls(log);
3356 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
3357 if (m == (Metadata*)Universe::non_oop_word()) {
3358 ls.print("non-metadata word");
3359 } else if (m == nullptr) {
3360 ls.print("nullptr-oop");
3361 } else {
3362 Metadata::print_value_on_maybe_null(&ls, m);
3363 }
3364 ls.cr();
3365 }
3366 }
3367 return true;
3368 }
3369
3370 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
3371 ImmutableOopMapSet* oopmaps = cb.oop_maps();
3372 int oopmaps_size = oopmaps->nr_of_bytes();
3373 if (!write_bytes(&oopmaps_size, sizeof(int))) {
3374 return false;
3375 }
3376 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
3377 if (n != (uint)oopmaps->nr_of_bytes()) {
3378 return false;
3379 }
3380 return true;
3381 }
3382
3383 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
3384 uint offset = read_position();
3385 int size = *(int *)addr(offset);
3386 offset += sizeof(int);
3387 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
3388 offset += size;
3389 set_read_position(offset);
3390 return oopmaps;
3391 }
3392
3393 bool AOTCodeCache::write_oops(nmethod* nm) {
3394 int count = nm->oops_count()-1;
3395 if (!write_bytes(&count, sizeof(int))) {
3396 return false;
3397 }
3398 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
3399 if (!write_oop(*p)) {
3400 return false;
3401 }
3402 }
3403 return true;
3404 }
3405
3406 //======================= AOTCodeAddressTable ===============
3407
3408 // address table ids for generated routines, external addresses and C
3409 // string addresses are partitioned into positive integer ranges
3410 // defined by the following positive base and max values
3411 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
3412 // [_stubs_base, _stubs_base + _stubs_max -1],
3413 // ...
3414 // [_c_str_base, _c_str_base + _c_str_max -1],
3415 #define _extrs_max 80
3416 #define _stubs_max 120
3417 #define _all_blobs_max 100
3418 #define _blobs_max 24
3419 #define _C2_blobs_max 25
3420 #define _C1_blobs_max (_all_blobs_max - _blobs_max - _C2_blobs_max)
3421 #define _all_max 300
3422
3423 #define _extrs_base 0
3424 #define _stubs_base (_extrs_base + _extrs_max)
3425 #define _blobs_base (_stubs_base + _stubs_max)
3426 #define _C1_blobs_base (_blobs_base + _blobs_max)
3427 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
3428 #if (_C2_blobs_base >= _all_max)
3429 #error AOTCodeAddressTable ranges need adjusting
3430 #endif
3431
3432 #define SET_ADDRESS(type, addr) \
3433 { \
3434 type##_addr[type##_length++] = (address) (addr); \
3435 assert(type##_length <= type##_max, "increase size"); \
3436 }
3437
3438 static bool initializing_extrs = false;
3439
3440 void AOTCodeAddressTable::init_extrs() {
3441 if (_extrs_complete || initializing_extrs) return; // Done already
3442 initializing_extrs = true;
3443 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3444
3445 _extrs_length = 0;
3446 _stubs_length = 0;
3447
3448 // Recored addresses of VM runtime methods
3449 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3450 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3451 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3452 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3453
3454 #ifdef COMPILER2
3455 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3456 #endif
3457 #ifdef COMPILER1
3458 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3459 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3460 #endif
3461
3462 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3463
3464 #if INCLUDE_G1GC
3465 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3466 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3467 #endif
3468
3469 #if INCLUDE_SHENANDOAHGC
3470 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3471 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3472 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3473 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3474 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3475 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3476 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3477 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3478 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3479 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3480 #endif
3481
3482 #if INCLUDE_ZGC
3483 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3484 #if defined(AMD64)
3485 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3486 #endif
3487 #endif // INCLUDE_ZGC
3488
3489 SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3490 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3491 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3492 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3493
3494 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3495 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3496 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3497
3498 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3499 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3500 #if defined(AMD64) && !defined(ZERO)
3501 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3502 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3503 #endif // AMD64
3504 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3505 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3506 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3507 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3508 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3509 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3510 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3511 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3512 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3513 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3514 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3515 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3516 #ifndef ZERO
3517 SET_ADDRESS(_extrs, SharedRuntime::drem);
3518 SET_ADDRESS(_extrs, SharedRuntime::frem);
3519 #endif
3520 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3521 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3522 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3523 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3524 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3525 #if INCLUDE_JVMTI
3526 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3527 #endif /* INCLUDE_JVMTI */
3528 BarrierSet* bs = BarrierSet::barrier_set();
3529 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3530 SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3531 }
3532 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3533 SET_ADDRESS(_extrs, Thread::current);
3534
3535 SET_ADDRESS(_extrs, os::javaTimeMillis);
3536 SET_ADDRESS(_extrs, os::javaTimeNanos);
3537
3538 #if INCLUDE_JVMTI
3539 SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3540 #endif /* INCLUDE_JVMTI */
3541 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3542 #ifndef PRODUCT
3543 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3544 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3545 #endif
3546
3547 #ifndef ZERO
3548 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3549 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3550 #endif
3551 #if defined(AMD64)
3552 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3553 #endif
3554 #endif // ZERO
3555
3556 #ifdef COMPILER1
3557 #ifdef X86
3558 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3559 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3560 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3561 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3562 #endif
3563 #endif
3564
3565 // addresses of fields in AOT runtime constants area
3566 address* p = AOTRuntimeConstants::field_addresses_list();
3567 while (*p != nullptr) {
3568 SET_ADDRESS(_extrs, *p++);
3569 }
3570
3571 _extrs_complete = true;
3572 log_info(aot, codecache,init)("External addresses recorded");
3573 }
3574
3575 static bool initializing_early_stubs = false;
3576 void AOTCodeAddressTable::init_early_stubs() {
3577 if (_complete || initializing_early_stubs) return; // Done already
3578 initializing_early_stubs = true;
3579 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3580 _stubs_length = 0;
3581 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3582 _early_stubs_complete = true;
3583 log_info(aot, codecache,init)("early stubs recorded");
3584 }
3585
3586 static bool initializing_shared_blobs = false;
3587 void AOTCodeAddressTable::init_shared_blobs() {
3588 if (_complete || initializing_shared_blobs) return; // Done already
3589 initializing_shared_blobs = true;
3590 _blobs_addr = NEW_C_HEAP_ARRAY(address, _all_blobs_max, mtCode);
3591
3592 // Divide _blobs_addr array to chunks because they could be initialized in parrallel
3593 _C1_blobs_addr = _blobs_addr + _blobs_max;// C1 blobs addresses stored after shared blobs
3594 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3595
3596 _blobs_length = 0; // for shared blobs
3597 _C1_blobs_length = 0;
3598 _C2_blobs_length = 0;
3599
3600 // Blobs
3601 SET_ADDRESS(_blobs, SharedRuntime::get_handle_wrong_method_stub());
3602 SET_ADDRESS(_blobs, SharedRuntime::get_ic_miss_stub());
3603 SET_ADDRESS(_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3604 SET_ADDRESS(_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3605 SET_ADDRESS(_blobs, SharedRuntime::get_resolve_static_call_stub());
3606 SET_ADDRESS(_blobs, SharedRuntime::deopt_blob()->entry_point());
3607 SET_ADDRESS(_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3608 SET_ADDRESS(_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3609 #ifdef COMPILER2
3610 SET_ADDRESS(_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3611 #endif
3612
3613 assert(_blobs_length <= _blobs_max, "increase _blobs_max to %d", _blobs_length);
3614 log_info(aot, codecache,init)("Early shared blobs recorded");
3615 }
3616
3617 static bool initializing_stubs = false;
3618 void AOTCodeAddressTable::init_stubs() {
3619 if (_complete || initializing_stubs) return; // Done already
3620 initializing_stubs = true;
3621 // final blobs
3622 SET_ADDRESS(_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3623 SET_ADDRESS(_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3624 SET_ADDRESS(_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3625 SET_ADDRESS(_blobs, SharedRuntime::throw_StackOverflowError_entry());
3626 SET_ADDRESS(_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3627
3628 assert(_blobs_length <= _blobs_max, "increase _blobs_max to %d", _blobs_length);
3629
3630 _shared_blobs_complete = true;
3631 log_info(aot, codecache,init)("All shared blobs recorded");
3632
3633 // Stubs
3634 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3635 /*
3636 SET_ADDRESS(_stubs, StubRoutines::throw_AbstractMethodError_entry());
3637 SET_ADDRESS(_stubs, StubRoutines::throw_IncompatibleClassChangeError_entry());
3638 SET_ADDRESS(_stubs, StubRoutines::throw_NullPointerException_at_call_entry());
3639 SET_ADDRESS(_stubs, StubRoutines::throw_StackOverflowError_entry());
3640 SET_ADDRESS(_stubs, StubRoutines::throw_delayed_StackOverflowError_entry());
3641 */
3642 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3643 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3644 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3645 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3646 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3647
3648 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3649 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3650 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3651
3652 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3653
3654
3655 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3656 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3657 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3658 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3659 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3660 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3661
3662 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3663 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3664 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3665 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3666 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3667 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3668
3669 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3670 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3671 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3672 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3673 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3674 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3675
3676 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3677 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3678 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3679 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3680 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3681 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3682
3683 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3684 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3685
3686 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3687 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3688
3689 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3690 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3691 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3692 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3693 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3694 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3695
3696 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3697 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3698
3699 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3700 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3701 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3702 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3703 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3704 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3705 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3706 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3707 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3708 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3709 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3710 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3711 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3712 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3713 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3714 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3715 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3716 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3717 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3718 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3719 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3720 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3721
3722 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3723
3724 SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3725 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3726 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3727
3728 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3729 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3730 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3731 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3732 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3733 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3734 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3735 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3736
3737 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3738
3739 SET_ADDRESS(_stubs, StubRoutines::dexp());
3740 SET_ADDRESS(_stubs, StubRoutines::dlog());
3741 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3742 SET_ADDRESS(_stubs, StubRoutines::dpow());
3743 SET_ADDRESS(_stubs, StubRoutines::dsin());
3744 SET_ADDRESS(_stubs, StubRoutines::dcos());
3745 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3746 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3747 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3748 SET_ADDRESS(_stubs, StubRoutines::dtan());
3749
3750 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3751 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3752
3753 #if defined(AMD64) && !defined(ZERO)
3754 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3755 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3756 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3757 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3758 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3759 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3760 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3761 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3762 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3763 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3764 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3765 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3766 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3767 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3768 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3769 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3770 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3771 // See C2_MacroAssembler::load_iota_indices().
3772 for (int i = 0; i < 6; i++) {
3773 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3774 }
3775 #endif
3776 #if defined(AARCH64) && !defined(ZERO)
3777 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3778 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3779 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3780 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3781 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3782 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3783 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3784 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3785 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3786 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3787 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3788 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3789 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3790
3791 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3792 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3793 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3794 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3795 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3796 #endif
3797
3798 _complete = true;
3799 log_info(aot, codecache,init)("Stubs recorded");
3800 }
3801
3802 void AOTCodeAddressTable::init_opto() {
3803 #ifdef COMPILER2
3804 // OptoRuntime Blobs
3805 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3806 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3807 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3808 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3809 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3810 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3811 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3812 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3813 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3814 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3815 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3816 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3817 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3818 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3819 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3820 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3821 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3822 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3823 #if INCLUDE_JVMTI
3824 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3825 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3826 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3827 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3828 #endif /* INCLUDE_JVMTI */
3829 #endif
3830
3831 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3832 _opto_complete = true;
3833 log_info(aot, codecache,init)("OptoRuntime Blobs recorded");
3834 }
3835
3836 void AOTCodeAddressTable::init_c1() {
3837 #ifdef COMPILER1
3838 // Runtime1 Blobs
3839 for (int i = 0; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3840 C1StubId id = (C1StubId)i;
3841 if (Runtime1::blob_for(id) == nullptr) {
3842 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3843 continue;
3844 }
3845 if (Runtime1::entry_for(id) == nullptr) {
3846 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3847 continue;
3848 }
3849 address entry = Runtime1::entry_for(id);
3850 SET_ADDRESS(_C1_blobs, entry);
3851 }
3852 #if INCLUDE_G1GC
3853 if (UseG1GC) {
3854 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3855 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3856 SET_ADDRESS(_C1_blobs, entry);
3857 entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3858 SET_ADDRESS(_C1_blobs, entry);
3859 }
3860 #endif // INCLUDE_G1GC
3861 #if INCLUDE_ZGC
3862 if (UseZGC) {
3863 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3864 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3865 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3866 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3867 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3868 }
3869 #endif // INCLUDE_ZGC
3870 #if INCLUDE_SHENANDOAHGC
3871 if (UseShenandoahGC) {
3872 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3873 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3874 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3875 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3876 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3877 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3878 }
3879 #endif // INCLUDE_SHENANDOAHGC
3880 #endif // COMPILER1
3881
3882 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3883 _c1_complete = true;
3884 log_info(aot, codecache,init)("Runtime1 Blobs recorded");
3885 }
3886
3887 #undef SET_ADDRESS
3888
3889 AOTCodeAddressTable::~AOTCodeAddressTable() {
3890 if (_extrs_addr != nullptr) {
3891 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3892 }
3893 if (_stubs_addr != nullptr) {
3894 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3895 }
3896 if (_blobs_addr != nullptr) {
3897 FREE_C_HEAP_ARRAY(address, _blobs_addr);
3898 }
3899 }
3900
3901 #ifdef PRODUCT
3902 #define MAX_STR_COUNT 200
3903 #else
3904 #define MAX_STR_COUNT 500
3905 #endif
3906
3907 #define _c_str_max MAX_STR_COUNT
3908 #define _c_str_base _all_max
3909
3910 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3911 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3912 static int _C_strings_count = 0;
3913 static int _C_strings_s[MAX_STR_COUNT] = {0};
3914 static int _C_strings_id[MAX_STR_COUNT] = {0};
3915 static int _C_strings_used = 0;
3916
3917 void AOTCodeCache::load_strings() {
3918 uint strings_count = _load_header->strings_count();
3919 if (strings_count == 0) {
3920 return;
3921 }
3922 uint strings_offset = _load_header->strings_offset();
3923 uint* string_lengths = (uint*)addr(strings_offset);
3924 strings_offset += (strings_count * sizeof(uint));
3925 uint strings_size = _load_header->entries_offset() - strings_offset;
3926 // We have to keep cached strings longer than _cache buffer
4044
4045 address AOTCodeAddressTable::address_for_id(int idx) {
4046 if (!_extrs_complete) {
4047 fatal("AOT Code Cache VM runtime addresses table is not complete");
4048 }
4049 if (idx == -1) {
4050 return (address)-1;
4051 }
4052 uint id = (uint)idx;
4053 // special case for symbols based relative to os::init
4054 if (id > (_c_str_base + _c_str_max)) {
4055 return (address)os::init + idx;
4056 }
4057 if (idx < 0) {
4058 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
4059 }
4060 // no need to compare unsigned id against 0
4061 if (/* id >= _extrs_base && */ id < _extrs_length) {
4062 return _extrs_addr[id - _extrs_base];
4063 }
4064 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
4065 return _stubs_addr[id - _stubs_base];
4066 }
4067 if (id >= _blobs_base && id < _blobs_base + _blobs_length) {
4068 return _blobs_addr[id - _blobs_base];
4069 }
4070 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
4071 return _C1_blobs_addr[id - _C1_blobs_base];
4072 }
4073 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
4074 return _C2_blobs_addr[id - _C2_blobs_base];
4075 }
4076 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
4077 return address_for_C_string(id - _c_str_base);
4078 }
4079 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
4080 return nullptr;
4081 }
4082
4083 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBuffer* buffer, CodeBlob* blob) {
4084 if (!_extrs_complete) {
4085 fatal("AOT Code Cache VM runtime addresses table is not complete");
4086 }
4087 int id = -1;
4088 if (addr == (address)-1) { // Static call stub has jump to itself
4089 return id;
4090 }
4091 // Seach for C string
4092 id = id_for_C_string(addr);
4093 if (id >= 0) {
4094 return id + _c_str_base;
4095 }
4096 if (StubRoutines::contains(addr)) {
4097 // Search in stubs
4098 id = search_address(addr, _stubs_addr, _stubs_length);
4099 if (id < 0) {
4100 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
4101 if (desc == nullptr) {
4102 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
4103 }
4104 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
4105 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
4106 } else {
4107 return _stubs_base + id;
4108 }
4109 } else {
4110 CodeBlob* cb = CodeCache::find_blob(addr);
4111 if (cb != nullptr) {
4112 int id_base = _blobs_base;
4113 // Search in code blobs
4114 id = search_address(addr, _blobs_addr, _blobs_length);
4115 if (id == -1) {
4116 id_base = _C1_blobs_base;
4117 // search C1 blobs
4118 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
4119 }
4120 if (id == -1) {
4121 id_base = _C2_blobs_base;
4122 // search C2 blobs
4123 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
4124 }
4125 if (id < 0) {
4126 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
4127 } else {
4128 return id_base + id;
4129 }
4130 } else {
4131 // Search in runtime functions
4132 id = search_address(addr, _extrs_addr, _extrs_length);
4133 if (id < 0) {
4134 ResourceMark rm;
4135 const int buflen = 1024;
4136 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
4137 int offset = 0;
4138 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
4139 if (offset > 0) {
4140 // Could be address of C string
4141 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
4142 CompileTask* task = ciEnv::current()->task();
4143 uint compile_id = 0;
4144 uint comp_level =0;
4145 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
4146 compile_id = task->compile_id();
4147 comp_level = task->comp_level();
4148 }
4149 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
4150 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
4151 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
4152 return dist;
4153 }
4154 reloc.print_current_on(tty);
4155 blob->print_on(tty);
4156 blob->print_code_on(tty);
4157 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
4158 } else {
4159 reloc.print_current_on(tty);
4160 #ifndef PRODUCT
4161 if (buffer != nullptr) {
4162 buffer->print_on(tty);
4163 buffer->decode();
4164 }
4165 if (blob != nullptr) {
4166 blob->print_on(tty);
4167 blob->print_code_on(tty);
4168 }
4169 #endif // !PRODUCT
4170 os::find(addr, tty);
4171 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
4172 }
4173 } else {
4174 return _extrs_base + id;
4175 }
4176 }
4177 }
4178 return id;
4179 }
4180
4181 #undef _extrs_max
4182 #undef _stubs_max
4183 #undef _all_blobs_max
4184 #undef _blobs_max
4185 #undef _C1_blobs_max
4186 #undef _C2_blobs_max
4187 #undef _extrs_base
4188 #undef _stubs_base
4189 #undef _blobs_base
4190 #undef _C1_blobs_base
4191 #undef _C2_blobs_base
4192 #undef _c_str_base
4193
4194 void AOTRuntimeConstants::initialize_from_runtime() {
4195 BarrierSet* bs = BarrierSet::barrier_set();
4196 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
4197 CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
4198 _aot_runtime_constants._grain_shift = ctbs->grain_shift();
4199 _aot_runtime_constants._card_shift = ctbs->card_shift();
4200 }
4201 }
4202
4203 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
4204
4205 address AOTRuntimeConstants::_field_addresses_list[] = {
4206 grain_shift_address(),
4207 card_shift_address(),
4208 nullptr
4209 };
4210
4211
4212 void AOTCodeCache::wait_for_no_nmethod_readers() {
4213 while (true) {
4214 int cur = Atomic::load(&_nmethod_readers);
4215 int upd = -(cur + 1);
4216 if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
4217 // Success, no new readers should appear.
4218 break;
4219 }
4220 }
4221
4222 // Now wait for all readers to leave.
4223 SpinYield w;
4224 while (Atomic::load(&_nmethod_readers) != -1) {
4225 w.wait();
4226 }
4227 }
4228
4229 AOTCodeCache::ReadingMark::ReadingMark() {
4230 while (true) {
4231 int cur = Atomic::load(&_nmethod_readers);
4232 if (cur < 0) {
4233 // Cache is already closed, cannot proceed.
4234 _failed = true;
4235 return;
4236 }
4237 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4238 // Successfully recorded ourselves as entered.
4239 _failed = false;
4240 return;
4241 }
4242 }
4243 }
4244
4245 AOTCodeCache::ReadingMark::~ReadingMark() {
4246 if (_failed) {
4247 return;
4248 }
4249 while (true) {
4250 int cur = Atomic::load(&_nmethod_readers);
4251 if (cur > 0) {
4252 // Cache is open, we are counting down towards 0.
4253 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
4254 return;
4255 }
4256 } else {
4257 // Cache is closed, we are counting up towards -1.
4258 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
4259 return;
4260 }
4261 }
4262 }
4263 }
4264
4265 void AOTCodeCache::print_timers_on(outputStream* st) {
4266 if (is_using_code()) {
4267 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
4268 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
4269 st->print_cr (" find cached code: %7.3f s", _t_totalFind.seconds());
4270 }
4271 if (is_dumping_code()) {
4272 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
4273 }
4274 }
4275
4276 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
4277 AOTCodeStats result;
4278 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4279 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
4280 }
4281
4282 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4283 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
4284 }
4285 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
4286 return result;
4287 }
4288
4289 void AOTCodeCache::log_stats_on_exit() {
4290 LogStreamHandle(Info, aot, codecache, exit) log;
4291 if (log.is_enabled()) {
4292 AOTCodeStats prev_stats;
4293 AOTCodeStats current_stats;
4294 AOTCodeStats total_stats;
4295 uint max_size = 0;
4296
4297 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
4298
4299 for (uint i = 0; i < load_count; i++) {
4300 prev_stats.collect_entry_stats(&_load_entries[i]);
4301 if (max_size < _load_entries[i].size()) {
4302 max_size = _load_entries[i].size();
4303 }
4304 }
4305 for (uint i = 0; i < _store_entries_cnt; i++) {
4306 current_stats.collect_entry_stats(&_store_entries[i]);
4307 if (max_size < _store_entries[i].size()) {
4308 max_size = _store_entries[i].size();
4309 }
4310 }
4311 total_stats = AOTCodeStats::add_aot_code_stats(prev_stats, current_stats);
4312
4313 log.print_cr("Wrote %d AOTCodeEntry entries(%u max size) to AOT Code Cache",
4314 total_stats.total_count(), max_size);
4315 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4316 if (total_stats.entry_count(kind) > 0) {
4317 log.print_cr(" %s: total=%u(old=%u+new=%u)",
4318 aot_code_entry_kind_name[kind], total_stats.entry_count(kind), prev_stats.entry_count(kind), current_stats.entry_count(kind));
4319 if (kind == AOTCodeEntry::Code) {
4320 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4321 if (total_stats.nmethod_count(lvl) > 0) {
4322 log.print_cr(" Tier %d: total=%u(old=%u+new=%u)",
4323 lvl, total_stats.nmethod_count(lvl), prev_stats.nmethod_count(lvl), current_stats.nmethod_count(lvl));
4324 }
4325 }
4326 }
4327 }
4328 }
4329 log.print_cr("Total=%u(old=%u+new=%u)", total_stats.total_count(), prev_stats.total_count(), current_stats.total_count());
4330 }
4331 }
4332
4333 static void print_helper1(outputStream* st, const char* name, int count) {
4334 if (count > 0) {
4335 st->print(" %s=%d", name, count);
4336 }
4337 }
4338
4339 void AOTCodeCache::print_statistics_on(outputStream* st) {
4340 AOTCodeCache* cache = open_for_use();
4341 if (cache != nullptr) {
4342 ReadingMark rdmk;
4343 if (rdmk.failed()) {
4344 // Cache is closed, cannot touch anything.
4345 return;
4346 }
4347
4348 uint count = cache->_load_header->entries_count();
4349 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
4350 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4351
4352 AOTCodeStats stats;
4353 for (uint i = 0; i < count; i++) {
4354 stats.collect_all_stats(&load_entries[i]);
4355 }
4356
4357 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4358 if (stats.entry_count(kind) > 0) {
4359 st->print(" %s:", aot_code_entry_kind_name[kind]);
4360 print_helper1(st, "total", stats.entry_count(kind));
4361 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4362 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4363 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4364 st->cr();
4365 }
4366 if (kind == AOTCodeEntry::Code) {
4367 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4368 if (stats.nmethod_count(lvl) > 0) {
4369 st->print(" AOT Code T%d", lvl);
4370 print_helper1(st, "total", stats.nmethod_count(lvl));
4371 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4372 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4373 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4374 if (lvl == AOTCompLevel_count-1) {
4375 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4376 }
4377 st->cr();
4378 }
4379 }
4380 }
4381 }
4382 } else {
4383 st->print_cr("failed to map code cache");
4384 }
4385 }
4386
4387 void AOTCodeEntry::print(outputStream* st) const {
4388 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, decompiled: %d, %s%s%s%s%s]",
4389 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id, _decompile,
4390 (_not_entrant? "not_entrant" : "entrant"),
4391 (_loaded ? ", loaded" : ""),
4392 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4393 (_for_preload ? ", for_preload" : ""),
4394 (_ignore_decompile ? ", ignore_decomp" : ""));
4395 }
4396
4397 void AOTCodeCache::print_on(outputStream* st) {
4398 AOTCodeCache* cache = open_for_use();
4399 if (cache != nullptr) {
4400 ReadingMark rdmk;
4401 if (rdmk.failed()) {
4402 // Cache is closed, cannot touch anything.
4403 return;
4404 }
4405
4406 uint count = cache->_load_header->entries_count();
4407 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
4408 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4409
4410 for (uint i = 0; i < count; i++) {
4411 int index = search_entries[2*i + 1];
4412 AOTCodeEntry* entry = &(load_entries[index]);
4413
4414 uint entry_position = entry->offset();
4415 uint name_offset = entry->name_offset() + entry_position;
4416 const char* saved_name = cache->addr(name_offset);
4417
4418 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u L%u offset:%u size=%u '%s' %s%s%s%s",
4419 i, index, entry->kind(), entry->id(), entry->comp_level(), entry->offset(),
4420 entry->size(), saved_name,
4421 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4422 entry->for_preload() ? " for_preload" : "",
4423 entry->is_loaded() ? " loaded" : "",
4424 entry->not_entrant() ? " not_entrant" : "");
4425
4426 st->print_raw(" ");
4427 AOTCodeReader reader(cache, entry, nullptr);
4428 reader.print_on(st);
4429 }
4430 } else {
4431 st->print_cr("failed to map code cache");
4432 }
4433 }
4434
4435 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4436 LogStreamHandle(Info, aot, codecache, init) info;
4437 if (info.is_enabled()) {
4438 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4439 if (entry->is_code() && !entry->is_loaded()) {
4440 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4441 if (mtd != nullptr) {
4442 if (mtd->has_holder()) {
4443 if (mtd->holder()->method_holder()->is_initialized()) {
4444 ResourceMark rm;
4445 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4446 if ((uint)ctd->level() == entry->comp_level()) {
4447 if (ctd->init_deps_left() == 0) {
4448 nmethod* nm = mtd->holder()->code();
4449 if (nm == nullptr) {
4450 if (mtd->holder()->queued_for_compilation()) {
4451 return; // scheduled for compilation
4452 }
4453 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4454 return; // already online compiled and superseded by a more optimal method
4455 }
4456 info.print("AOT Code Cache entry not loaded: ");
4457 ctd->print_on(&info);
4458 info.cr();
4459 }
4460 }
4461 });
4462 } else {
4463 // not yet initialized
4464 }
4465 } else {
4466 info.print("AOT Code Cache entry doesn't have a holder: ");
4467 mtd->print_on(&info);
4468 info.cr();
4469 }
4470 }
4471 }
4472 });
4473 }
4474 }
4475
4476 void AOTCodeReader::print_on(outputStream* st) {
4477 uint entry_position = _entry->offset();
4478 set_read_position(entry_position);
4479
4480 // Read name
4481 uint name_offset = entry_position + _entry->name_offset();
4482 uint name_size = _entry->name_size(); // Includes '/0'
4483 const char* name = addr(name_offset);
4484
4485 st->print_cr(" name: %s", name);
4486 }
4487
|