12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "classfile/javaAssertions.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "code/codeCache.hpp"
35 #include "gc/shared/gcConfig.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/memoryReserver.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/flags/flagSetting.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubInfo.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/copy.hpp"
48 #ifdef COMPILER1
49 #include "c1/c1_Runtime1.hpp"
50 #endif
51 #ifdef COMPILER2
52 #include "opto/runtime.hpp"
53 #endif
54 #if INCLUDE_G1GC
55 #include "gc/g1/g1BarrierSetRuntime.hpp"
56 #endif
57 #if INCLUDE_SHENANDOAHGC
58 #include "gc/shenandoah/shenandoahRuntime.hpp"
59 #endif
60 #if INCLUDE_ZGC
61 #include "gc/z/zBarrierSetRuntime.hpp"
62 #endif
63
64 #include <errno.h>
65 #include <sys/stat.h>
66
67 const char* aot_code_entry_kind_name[] = {
68 #define DECL_KIND_STRING(kind) XSTR(kind),
69 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
70 #undef DECL_KIND_STRING
71 };
72
73 static void report_load_failure() {
74 if (AbortVMOnAOTCodeFailure) {
75 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
76 }
77 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
78 AOTCodeCache::disable_caching();
79 }
80
81 static void report_store_failure() {
82 if (AbortVMOnAOTCodeFailure) {
83 tty->print_cr("Unable to create AOT Code Cache.");
84 vm_abort(false);
85 }
86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
87 AOTCodeCache::disable_caching();
88 }
89
90 // The sequence of AOT code caching flags and parametters settings.
91 //
92 // 1. The initial AOT code caching flags setting is done
107
108 // Next methods determine which action we do with AOT code depending
109 // on phase of AOT process: assembly or production.
110
111 bool AOTCodeCache::is_dumping_adapter() {
112 return AOTAdapterCaching && is_on_for_dump();
113 }
114
115 bool AOTCodeCache::is_using_adapter() {
116 return AOTAdapterCaching && is_on_for_use();
117 }
118
119 bool AOTCodeCache::is_dumping_stub() {
120 return AOTStubCaching && is_on_for_dump();
121 }
122
123 bool AOTCodeCache::is_using_stub() {
124 return AOTStubCaching && is_on_for_use();
125 }
126
127 // Next methods could be called regardless AOT code cache status.
128 // Initially they are called during flags parsing and finilized
129 // in AOTCodeCache::initialize().
130 void AOTCodeCache::enable_caching() {
131 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
132 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
133 }
134
135 void AOTCodeCache::disable_caching() {
136 FLAG_SET_ERGO(AOTStubCaching, false);
137 FLAG_SET_ERGO(AOTAdapterCaching, false);
138 }
139
140 bool AOTCodeCache::is_caching_enabled() {
141 return AOTStubCaching || AOTAdapterCaching;
142 }
143
144 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
145 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
146 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
147 // becasue both id and kind are used to find an entry, and that combination should be unique
148 if (kind == AOTCodeEntry::Adapter) {
149 return id;
150 } else if (kind == AOTCodeEntry::SharedBlob) {
151 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
152 return id;
153 } else if (kind == AOTCodeEntry::C1Blob) {
154 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
155 return id;
156 } else {
157 // kind must be AOTCodeEntry::C2Blob
158 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
159 return id;
160 }
161 }
162
163 static uint _max_aot_code_size = 0;
164 uint AOTCodeCache::max_aot_code_size() {
165 return _max_aot_code_size;
166 }
167
168 // It is called from AOTMetaspace::initialize_shared_spaces()
169 // which is called from universe_init().
170 // At this point all AOT class linking seetings are finilized
171 // and AOT cache is open so we can map AOT code region.
172 void AOTCodeCache::initialize() {
173 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
174 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
175 disable_caching();
176 return;
177 #else
178 if (FLAG_IS_DEFAULT(AOTCache)) {
179 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
180 disable_caching();
181 return; // AOTCache must be specified to dump and use AOT code
182 }
183
184 // Disable stubs caching until JDK-8357398 is fixed.
185 FLAG_SET_ERGO(AOTStubCaching, false);
186
187 if (VerifyOops) {
188 // Disable AOT stubs caching when VerifyOops flag is on.
189 // Verify oops code generated a lot of C strings which overflow
190 // AOT C string table (which has fixed size).
191 // AOT C string table will be reworked later to handle such cases.
192 //
193 // Note: AOT adapters are not affected - they don't have oop operations.
194 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
195 FLAG_SET_ERGO(AOTStubCaching, false);
196 }
197
198 bool is_dumping = false;
199 bool is_using = false;
200 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
201 is_dumping = true;
202 enable_caching();
203 is_dumping = is_caching_enabled();
204 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
205 enable_caching();
206 is_using = is_caching_enabled();
207 } else {
208 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
209 disable_caching();
210 return; // nothing to do
211 }
212 if (!(is_dumping || is_using)) {
213 disable_caching();
214 return; // AOT code caching disabled on command line
215 }
216 _max_aot_code_size = AOTCodeMaxSize;
217 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
218 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
219 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
220 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
221 }
222 }
223 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
224 if (is_using && aot_code_size == 0) {
225 log_info(aot, codecache, init)("AOT Code Cache is empty");
226 disable_caching();
227 return;
228 }
229 if (!open_cache(is_dumping, is_using)) {
230 if (is_using) {
231 report_load_failure();
232 } else {
233 report_store_failure();
234 }
235 return;
236 }
237 if (is_dumping) {
238 FLAG_SET_DEFAULT(ForceUnreachable, true);
239 }
240 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
241 #endif // defined(AMD64) || defined(AARCH64)
242 }
243
244 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
245 AOTCodeCache* AOTCodeCache::_cache = nullptr;
246 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
247
248 // It is called after universe_init() when all GC settings are finalized.
249 void AOTCodeCache::init2() {
250 DEBUG_ONLY( _passed_init2 = true; )
251 if (opened_cache == nullptr) {
252 return;
253 }
254 if (!opened_cache->verify_config()) {
255 delete opened_cache;
256 opened_cache = nullptr;
257 report_load_failure();
258 return;
259 }
260
261 // initialize the table of external routines so we can save
262 // generated code blobs that reference them
263 AOTCodeAddressTable* table = opened_cache->_table;
264 assert(table != nullptr, "should be initialized already");
265 table->init_extrs();
266
267 // Now cache and address table are ready for AOT code generation
268 _cache = opened_cache;
269 }
270
271 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
272 opened_cache = new AOTCodeCache(is_dumping, is_using);
273 if (opened_cache->failed()) {
274 delete opened_cache;
275 opened_cache = nullptr;
276 return false;
277 }
278 return true;
279 }
280
281 void AOTCodeCache::close() {
282 if (is_on()) {
283 delete _cache; // Free memory
284 _cache = nullptr;
285 opened_cache = nullptr;
286 }
287 }
288
289 #define DATA_ALIGNMENT HeapWordSize
290
291 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
292 _load_header(nullptr),
293 _load_buffer(nullptr),
294 _store_buffer(nullptr),
295 _C_store_buffer(nullptr),
296 _write_position(0),
297 _load_size(0),
298 _store_size(0),
299 _for_use(is_using),
300 _for_dump(is_dumping),
301 _closing(false),
302 _failed(false),
303 _lookup_failed(false),
304 _table(nullptr),
305 _load_entries(nullptr),
306 _search_entries(nullptr),
307 _store_entries(nullptr),
308 _C_strings_buf(nullptr),
309 _store_entries_cnt(0)
310 {
311 // Read header at the begining of cache
312 if (_for_use) {
313 // Read cache
314 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
315 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
316 if (!rs.is_reserved()) {
317 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
318 set_failed();
319 return;
320 }
321 if (!AOTCacheAccess::map_aot_code_region(rs)) {
322 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
323 set_failed();
324 return;
325 }
326
327 _load_size = (uint)load_size;
328 _load_buffer = (char*)rs.base();
329 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
330 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
331
332 _load_header = (Header*)addr(0);
333 if (!_load_header->verify(_load_size)) {
334 set_failed();
335 return;
336 }
337 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
338 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
339 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
340 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
341 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
342 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
343
344 // Read strings
345 load_strings();
346 }
347 if (_for_dump) {
348 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
349 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
350 // Entries allocated at the end of buffer in reverse (as on stack).
351 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
352 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
353 }
354 _table = new AOTCodeAddressTable();
355 }
356
357 void AOTCodeCache::init_early_stubs_table() {
358 AOTCodeAddressTable* table = addr_table();
359 if (table != nullptr) {
360 table->init_early_stubs();
361 }
362 }
363
364 void AOTCodeCache::init_shared_blobs_table() {
365 AOTCodeAddressTable* table = addr_table();
366 if (table != nullptr) {
367 table->init_shared_blobs();
368 }
369 }
370
371 void AOTCodeCache::init_early_c1_table() {
372 AOTCodeAddressTable* table = addr_table();
373 if (table != nullptr) {
374 table->init_early_c1();
375 }
376 }
377
378 AOTCodeCache::~AOTCodeCache() {
379 if (_closing) {
380 return; // Already closed
381 }
382 // Stop any further access to cache.
383 _closing = true;
384
385 MutexLocker ml(Compile_lock);
386 if (for_dump()) { // Finalize cache
387 finish_write();
388 }
389 _load_buffer = nullptr;
390 if (_C_store_buffer != nullptr) {
391 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
392 _C_store_buffer = nullptr;
393 _store_buffer = nullptr;
394 }
395 if (_table != nullptr) {
396 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
397 delete _table;
398 _table = nullptr;
399 }
400 }
401
402 void AOTCodeCache::Config::record() {
403 _flags = 0;
404 #ifdef ASSERT
405 _flags |= debugVM;
406 #endif
407 if (UseCompressedOops) {
408 _flags |= compressedOops;
409 }
410 if (UseCompressedClassPointers) {
411 _flags |= compressedClassPointers;
412 }
413 if (UseTLAB) {
414 _flags |= useTLAB;
415 }
416 if (JavaAssertions::systemClassDefault()) {
417 _flags |= systemClassAssertions;
418 }
419 if (JavaAssertions::userClassDefault()) {
420 _flags |= userClassAssertions;
421 }
422 if (EnableContended) {
423 _flags |= enableContendedPadding;
424 }
425 if (RestrictContended) {
426 _flags |= restrictContendedPadding;
427 }
428 _compressedOopShift = CompressedOops::shift();
429 _compressedOopBase = CompressedOops::base();
430 _compressedKlassShift = CompressedKlassPointers::shift();
431 _contendedPaddingWidth = ContendedPaddingWidth;
432 _gc = (uint)Universe::heap()->kind();
433 }
434
435 bool AOTCodeCache::Config::verify() const {
436 // First checks affect all cached AOT code
437 #ifdef ASSERT
438 if ((_flags & debugVM) == 0) {
439 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
440 return false;
441 }
442 #else
443 if ((_flags & debugVM) != 0) {
444 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
445 return false;
446 }
447 #endif
448
449 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
450 if (aot_gc != Universe::heap()->kind()) {
451 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
452 return false;
453 }
454
455 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
456 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
457 return false;
458 }
459 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
460 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
461 return false;
462 }
463
464 // The following checks do not affect AOT adapters caching
465
466 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
467 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
468 AOTStubCaching = false;
469 }
470 if (_compressedOopShift != (uint)CompressedOops::shift()) {
471 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
472 AOTStubCaching = false;
473 }
474
475 // This should be the last check as it only disables AOTStubCaching
476 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
477 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
478 AOTStubCaching = false;
479 }
480
481 return true;
482 }
483
484 bool AOTCodeCache::Header::verify(uint load_size) const {
485 if (_version != AOT_CODE_VERSION) {
486 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
487 return false;
488 }
489 if (load_size < _cache_size) {
490 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
491 return false;
492 }
493 return true;
494 }
495
496 AOTCodeCache* AOTCodeCache::open_for_use() {
497 if (AOTCodeCache::is_on_for_use()) {
498 return AOTCodeCache::cache();
499 }
500 return nullptr;
501 }
502
503 AOTCodeCache* AOTCodeCache::open_for_dump() {
504 if (AOTCodeCache::is_on_for_dump()) {
505 AOTCodeCache* cache = AOTCodeCache::cache();
506 cache->clear_lookup_failed(); // Reset bit
507 return cache;
508 }
509 return nullptr;
510 }
511
512 void copy_bytes(const char* from, address to, uint size) {
513 assert((int)size > 0, "sanity");
514 memcpy(to, from, size);
515 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
516 }
517
518 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
519 _cache = cache;
520 _entry = entry;
521 _load_buffer = cache->cache_buffer();
522 _read_position = 0;
523 _lookup_failed = false;
524 }
525
526 void AOTCodeReader::set_read_position(uint pos) {
527 if (pos == _read_position) {
528 return;
529 }
530 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
531 _read_position = pos;
532 }
533
534 bool AOTCodeCache::set_write_position(uint pos) {
535 if (pos == _write_position) {
536 return true;
537 }
538 if (_store_size < _write_position) {
539 _store_size = _write_position; // Adjust during write
540 }
541 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
542 _write_position = pos;
585 if (nbytes == 0) {
586 return 0;
587 }
588 uint new_position = _write_position + nbytes;
589 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
590 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
591 nbytes, _write_position);
592 set_failed();
593 report_store_failure();
594 return 0;
595 }
596 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
597 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
598 _write_position += nbytes;
599 if (_store_size < _write_position) {
600 _store_size = _write_position;
601 }
602 return nbytes;
603 }
604
605 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
606 return (void*)(cache->add_entry());
607 }
608
609 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
610 if (entry->kind() == kind) {
611 assert(entry->id() == id, "sanity");
612 return true; // Found
613 }
614 return false;
615 }
616
617 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
618 assert(_for_use, "sanity");
619 uint count = _load_header->entries_count();
620 if (_load_entries == nullptr) {
621 // Read it
622 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
623 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
624 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
625 }
626 // Binary search
627 int l = 0;
628 int h = count - 1;
629 while (l <= h) {
630 int mid = (l + h) >> 1;
631 int ix = mid * 2;
632 uint is = _search_entries[ix];
633 if (is == id) {
634 int index = _search_entries[ix + 1];
635 AOTCodeEntry* entry = &(_load_entries[index]);
636 if (check_entry(kind, id, entry)) {
637 return entry; // Found
638 }
639 // Linear search around to handle id collission
640 for (int i = mid - 1; i >= l; i--) { // search back
641 ix = i * 2;
642 is = _search_entries[ix];
643 if (is != id) {
644 break;
645 }
646 index = _search_entries[ix + 1];
647 AOTCodeEntry* entry = &(_load_entries[index]);
648 if (check_entry(kind, id, entry)) {
649 return entry; // Found
650 }
651 }
652 for (int i = mid + 1; i <= h; i++) { // search forward
653 ix = i * 2;
654 is = _search_entries[ix];
655 if (is != id) {
656 break;
657 }
658 index = _search_entries[ix + 1];
659 AOTCodeEntry* entry = &(_load_entries[index]);
660 if (check_entry(kind, id, entry)) {
661 return entry; // Found
662 }
663 }
664 break; // Not found match
665 } else if (is < id) {
666 l = mid + 1;
667 } else {
668 h = mid - 1;
669 }
670 }
671 return nullptr;
672 }
673
674 extern "C" {
675 static int uint_cmp(const void *i, const void *j) {
676 uint a = *(uint *)i;
677 uint b = *(uint *)j;
678 return a > b ? 1 : a < b ? -1 : 0;
679 }
680 }
681
682 bool AOTCodeCache::finish_write() {
683 if (!align_write()) {
684 return false;
685 }
686 uint strings_offset = _write_position;
687 int strings_count = store_strings();
688 if (strings_count < 0) {
689 return false;
690 }
691 if (!align_write()) {
692 return false;
693 }
694 uint strings_size = _write_position - strings_offset;
695
696 uint entries_count = 0; // Number of entrant (useful) code entries
697 uint entries_offset = _write_position;
698
699 uint store_count = _store_entries_cnt;
700 if (store_count > 0) {
701 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
702 uint code_count = store_count;
703 uint search_count = code_count * 2;
704 uint search_size = search_count * sizeof(uint);
705 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
706 // _write_position includes size of code and strings
707 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
708 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
709 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
710
711 // Create ordered search table for entries [id, index];
712 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
713 // Allocate in AOT Cache buffer
714 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
715 char* start = align_up(buffer, DATA_ALIGNMENT);
716 char* current = start + header_size; // Skip header
717
718 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
719 uint adapters_count = 0;
720 uint shared_blobs_count = 0;
721 uint C1_blobs_count = 0;
722 uint C2_blobs_count = 0;
723 uint max_size = 0;
724 // AOTCodeEntry entries were allocated in reverse in store buffer.
725 // Process them in reverse order to cache first code first.
726 for (int i = store_count - 1; i >= 0; i--) {
727 entries_address[i].set_next(nullptr); // clear pointers before storing data
728 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
729 if (size > max_size) {
730 max_size = size;
731 }
732 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
733 entries_address[i].set_offset(current - start); // New offset
734 current += size;
735 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
736 if (n != sizeof(AOTCodeEntry)) {
737 FREE_C_HEAP_ARRAY(uint, search);
738 return false;
739 }
740 search[entries_count*2 + 0] = entries_address[i].id();
741 search[entries_count*2 + 1] = entries_count;
742 entries_count++;
743 AOTCodeEntry::Kind kind = entries_address[i].kind();
744 if (kind == AOTCodeEntry::Adapter) {
745 adapters_count++;
746 } else if (kind == AOTCodeEntry::SharedBlob) {
747 shared_blobs_count++;
748 } else if (kind == AOTCodeEntry::C1Blob) {
749 C1_blobs_count++;
750 } else if (kind == AOTCodeEntry::C2Blob) {
751 C2_blobs_count++;
752 }
753 }
754 if (entries_count == 0) {
755 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
756 FREE_C_HEAP_ARRAY(uint, search);
757 return true; // Nothing to write
758 }
759 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
760 // Write strings
761 if (strings_count > 0) {
762 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
763 strings_offset = (current - start); // New offset
764 current += strings_size;
765 }
766
767 uint new_entries_offset = (current - start); // New offset
768 // Sort and store search table
769 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
770 search_size = 2 * entries_count * sizeof(uint);
771 copy_bytes((const char*)search, (address)current, search_size);
772 FREE_C_HEAP_ARRAY(uint, search);
773 current += search_size;
774
775 // Write entries
776 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
777 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
778 current += entries_size;
779 uint size = (current - start);
780 assert(size <= total_size, "%d > %d", size , total_size);
781
782 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
783 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
784 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
785 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
786 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
787
788 // Finalize header
789 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
790 header->init(size, (uint)strings_count, strings_offset,
791 entries_count, new_entries_offset,
792 adapters_count, shared_blobs_count,
793 C1_blobs_count, C2_blobs_count);
794
795 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
796 }
797 return true;
798 }
799
800 //------------------Store/Load AOT code ----------------------
801
802 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
803 AOTCodeCache* cache = open_for_dump();
804 if (cache == nullptr) {
805 return false;
806 }
807 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
808
809 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
810 return false;
811 }
812 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
813 return false;
814 }
815 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
850 return false;
851 }
852 CodeBlob::archive_blob(&blob, archive_buffer);
853
854 uint reloc_data_size = blob.relocation_size();
855 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
856 if (n != reloc_data_size) {
857 return false;
858 }
859
860 bool has_oop_maps = false;
861 if (blob.oop_maps() != nullptr) {
862 if (!cache->write_oop_map_set(blob)) {
863 return false;
864 }
865 has_oop_maps = true;
866 }
867
868 #ifndef PRODUCT
869 // Write asm remarks
870 if (!cache->write_asm_remarks(blob)) {
871 return false;
872 }
873 if (!cache->write_dbg_strings(blob)) {
874 return false;
875 }
876 #endif /* PRODUCT */
877
878 if (!cache->write_relocations(blob)) {
879 if (!cache->failed()) {
880 // We may miss an address in AOT table - skip this code blob.
881 cache->set_write_position(entry_position);
882 }
883 return false;
884 }
885
886 uint entry_size = cache->_write_position - entry_position;
887 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
888 entry_position, entry_size, name_offset, name_size,
889 blob_offset, has_oop_maps, blob.content_begin());
890 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
891 return true;
892 }
893
899
900 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
901 AOTCodeCache* cache = open_for_use();
902 if (cache == nullptr) {
903 return nullptr;
904 }
905 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
906
907 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
908 return nullptr;
909 }
910 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
911 return nullptr;
912 }
913 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
914
915 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
916 if (entry == nullptr) {
917 return nullptr;
918 }
919 AOTCodeReader reader(cache, entry);
920 CodeBlob* blob = reader.compile_code_blob(name);
921
922 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
923 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
924 return blob;
925 }
926
927 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
928 assert(AOTCodeEntry::is_blob(entry_kind),
929 "wrong entry kind for blob id %s", StubInfo::name(id));
930 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
931 }
932
933 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
934 uint entry_position = _entry->offset();
935
936 // Read name
937 uint name_offset = entry_position + _entry->name_offset();
938 uint name_size = _entry->name_size(); // Includes '/0'
939 const char* stored_name = addr(name_offset);
940
941 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
942 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
943 stored_name, name);
944 set_lookup_failed(); // Skip this blob
945 return nullptr;
946 }
947
948 // Read archived code blob
949 uint offset = entry_position + _entry->blob_offset();
950 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
951 offset += archived_blob->size();
952
953 address reloc_data = (address)addr(offset);
954 offset += archived_blob->relocation_size();
955 set_read_position(offset);
956
957 ImmutableOopMapSet* oop_maps = nullptr;
958 if (_entry->has_oop_maps()) {
959 oop_maps = read_oop_map_set();
960 }
961
962 CodeBlob* code_blob = CodeBlob::create(archived_blob,
963 stored_name,
964 reloc_data,
965 oop_maps
966 );
967 if (code_blob == nullptr) { // no space left in CodeCache
968 return nullptr;
969 }
970
971 #ifndef PRODUCT
972 code_blob->asm_remarks().init();
973 read_asm_remarks(code_blob->asm_remarks());
974 code_blob->dbg_strings().init();
975 read_dbg_strings(code_blob->dbg_strings());
976 #endif // PRODUCT
977
978 fix_relocations(code_blob);
979
980 #ifdef ASSERT
981 LogStreamHandle(Trace, aot, codecache, stubs) log;
982 if (log.is_enabled()) {
983 FlagSetting fs(PrintRelocations, true);
984 code_blob->print_on(&log);
985 }
986 #endif
987 return code_blob;
988 }
989
990 // ------------ process code and data --------------
991
992 // Can't use -1. It is valid value for jump to iteself destination
993 // used by static call stub: see NativeJump::jump_destination().
994 #define BAD_ADDRESS_ID -2
995
996 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
997 GrowableArray<uint> reloc_data;
998 RelocIterator iter(&code_blob);
999 LogStreamHandle(Trace, aot, codecache, reloc) log;
1000 while (iter.next()) {
1001 int idx = reloc_data.append(0); // default value
1002 switch (iter.type()) {
1003 case relocInfo::none:
1004 break;
1005 case relocInfo::runtime_call_type: {
1006 // Record offset of runtime destination
1007 CallRelocation* r = (CallRelocation*)iter.reloc();
1008 address dest = r->destination();
1009 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1010 dest = (address)-1; // do nothing in this case when loading this relocation
1011 }
1012 int id = _table->id_for_address(dest, iter, &code_blob);
1013 if (id == BAD_ADDRESS_ID) {
1014 return false;
1015 }
1016 reloc_data.at_put(idx, id);
1017 break;
1018 }
1019 case relocInfo::runtime_call_w_cp_type:
1020 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1021 return false;
1022 case relocInfo::external_word_type: {
1023 // Record offset of runtime target
1024 address target = ((external_word_Relocation*)iter.reloc())->target();
1025 int id = _table->id_for_address(target, iter, &code_blob);
1026 if (id == BAD_ADDRESS_ID) {
1027 return false;
1028 }
1029 reloc_data.at_put(idx, id);
1030 break;
1031 }
1032 case relocInfo::internal_word_type:
1033 break;
1034 case relocInfo::section_word_type:
1035 break;
1036 case relocInfo::post_call_nop_type:
1037 break;
1038 default:
1039 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1040 return false;
1041 break;
1042 }
1043 if (log.is_enabled()) {
1044 iter.print_current_on(&log);
1045 }
1046 }
1047
1048 // Write additional relocation data: uint per relocation
1049 // Write the count first
1050 int count = reloc_data.length();
1051 write_bytes(&count, sizeof(int));
1052 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1053 iter != reloc_data.end(); ++iter) {
1054 uint value = *iter;
1055 int n = write_bytes(&value, sizeof(uint));
1056 if (n != sizeof(uint)) {
1057 return false;
1058 }
1059 }
1060 return true;
1061 }
1062
1063 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1064 LogStreamHandle(Trace, aot, reloc) log;
1065 uint offset = read_position();
1066 int count = *(int*)addr(offset);
1067 offset += sizeof(int);
1068 if (log.is_enabled()) {
1069 log.print_cr("======== extra relocations count=%d", count);
1070 }
1071 uint* reloc_data = (uint*)addr(offset);
1072 offset += (count * sizeof(uint));
1073 set_read_position(offset);
1074
1075 RelocIterator iter(code_blob);
1076 int j = 0;
1077 while (iter.next()) {
1078 switch (iter.type()) {
1079 case relocInfo::none:
1080 break;
1081 case relocInfo::runtime_call_type: {
1082 address dest = _cache->address_for_id(reloc_data[j]);
1083 if (dest != (address)-1) {
1084 ((CallRelocation*)iter.reloc())->set_destination(dest);
1085 }
1086 break;
1087 }
1088 case relocInfo::runtime_call_w_cp_type:
1089 // this relocation should not be in cache (see write_relocations)
1090 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1091 break;
1092 case relocInfo::external_word_type: {
1093 address target = _cache->address_for_id(reloc_data[j]);
1094 // Add external address to global table
1095 int index = ExternalsRecorder::find_index(target);
1096 // Update index in relocation
1097 Relocation::add_jint(iter.data(), index);
1098 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1099 assert(reloc->target() == target, "sanity");
1100 reloc->set_value(target); // Patch address in the code
1101 break;
1102 }
1103 case relocInfo::internal_word_type: {
1104 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1105 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1106 break;
1107 }
1108 case relocInfo::section_word_type: {
1109 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1110 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1111 break;
1112 }
1113 case relocInfo::post_call_nop_type:
1114 break;
1115 default:
1116 assert(false,"relocation %d unimplemented", (int)iter.type());
1117 break;
1118 }
1119 if (log.is_enabled()) {
1120 iter.print_current_on(&log);
1121 }
1122 j++;
1123 }
1124 assert(j == count, "sanity");
1125 }
1126
1127 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1128 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1129 int oopmaps_size = oopmaps->nr_of_bytes();
1130 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1131 return false;
1132 }
1133 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1134 if (n != (uint)oopmaps->nr_of_bytes()) {
1135 return false;
1136 }
1137 return true;
1138 }
1139
1140 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1141 uint offset = read_position();
1142 int size = *(int *)addr(offset);
1143 offset += sizeof(int);
1144 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1145 offset += size;
1146 set_read_position(offset);
1147 return oopmaps;
1148 }
1149
1150 #ifndef PRODUCT
1151 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1152 // Write asm remarks
1153 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1154 if (count_ptr == nullptr) {
1155 return false;
1156 }
1157 uint count = 0;
1158 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1159 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1160 uint n = write_bytes(&offset, sizeof(uint));
1161 if (n != sizeof(uint)) {
1162 return false;
1163 }
1164 const char* cstr = add_C_string(str);
1165 int id = _table->id_for_C_string((address)cstr);
1166 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1167 n = write_bytes(&id, sizeof(int));
1168 if (n != sizeof(int)) {
1169 return false;
1170 }
1171 count += 1;
1172 return true;
1173 });
1174 *count_ptr = count;
1175 return result;
1176 }
1177
1178 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1179 // Read asm remarks
1180 uint offset = read_position();
1181 uint count = *(uint *)addr(offset);
1182 offset += sizeof(uint);
1183 for (uint i = 0; i < count; i++) {
1184 uint remark_offset = *(uint *)addr(offset);
1185 offset += sizeof(uint);
1186 int remark_string_id = *(uint *)addr(offset);
1187 offset += sizeof(int);
1188 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1189 asm_remarks.insert(remark_offset, remark);
1190 }
1191 set_read_position(offset);
1192 }
1193
1194 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1195 // Write dbg strings
1196 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1197 if (count_ptr == nullptr) {
1198 return false;
1199 }
1200 uint count = 0;
1201 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1202 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1203 const char* cstr = add_C_string(str);
1204 int id = _table->id_for_C_string((address)cstr);
1205 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1206 uint n = write_bytes(&id, sizeof(int));
1207 if (n != sizeof(int)) {
1208 return false;
1209 }
1210 count += 1;
1211 return true;
1212 });
1213 *count_ptr = count;
1214 return result;
1215 }
1216
1217 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1218 // Read dbg strings
1219 uint offset = read_position();
1220 uint count = *(uint *)addr(offset);
1221 offset += sizeof(uint);
1222 for (uint i = 0; i < count; i++) {
1223 int string_id = *(uint *)addr(offset);
1224 offset += sizeof(int);
1225 const char* str = (const char*)_cache->address_for_C_string(string_id);
1226 dbg_strings.insert(str);
1227 }
1228 set_read_position(offset);
1229 }
1230 #endif // PRODUCT
1231
1232 //======================= AOTCodeAddressTable ===============
1233
1234 // address table ids for generated routines, external addresses and C
1235 // string addresses are partitioned into positive integer ranges
1236 // defined by the following positive base and max values
1237 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1238 // [_blobs_base, _blobs_base + _blobs_max -1],
1239 // ...
1240 // [_c_str_base, _c_str_base + _c_str_max -1],
1241
1242 #define _extrs_max 100
1243 #define _stubs_max 3
1244
1245 #define _shared_blobs_max 20
1246 #define _C1_blobs_max 10
1247 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1248 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1249
1250 #define _extrs_base 0
1251 #define _stubs_base (_extrs_base + _extrs_max)
1252 #define _shared_blobs_base (_stubs_base + _stubs_max)
1253 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1254 #define _blobs_end (_shared_blobs_base + _blobs_max)
1255
1256 #define SET_ADDRESS(type, addr) \
1257 { \
1258 type##_addr[type##_length++] = (address) (addr); \
1259 assert(type##_length <= type##_max, "increase size"); \
1260 }
1261
1262 static bool initializing_extrs = false;
1263
1264 void AOTCodeAddressTable::init_extrs() {
1265 if (_extrs_complete || initializing_extrs) return; // Done already
1266
1267 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1268
1269 initializing_extrs = true;
1270 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1271
1272 _extrs_length = 0;
1273
1274 // Record addresses of VM runtime methods
1275 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1276 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1277 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1278 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1279 #if defined(AARCH64) && !defined(ZERO)
1280 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1281 #endif
1282 {
1283 // Required by Shared blobs
1284 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1285 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1286 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1287 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1288 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1289 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1290 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1291 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1292 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1293 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1294 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1295 }
1296
1297 #ifdef COMPILER1
1298 {
1299 // Required by C1 blobs
1300 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1301 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1302 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1303 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1304 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1305 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1306 SET_ADDRESS(_extrs, Runtime1::new_instance);
1307 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1308 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1309 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1310 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1311 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1312 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1313 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1314 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1315 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1316 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1317 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1318 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1319 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1320 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1321 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1322 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1323 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1324 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1325 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1326 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1327 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1328 SET_ADDRESS(_extrs, Thread::current);
1329 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1330 #ifndef PRODUCT
1331 SET_ADDRESS(_extrs, os::breakpoint);
1332 #endif
1333 }
1334 #endif
1335
1336 #ifdef COMPILER2
1337 {
1338 // Required by C2 blobs
1339 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1340 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1341 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1342 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1343 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1344 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1345 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1346 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1347 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1348 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1349 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1350 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1351 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1352 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1353 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1354 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1355 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1356 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1357 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1358 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1359 #if defined(AARCH64)
1360 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1361 #endif // AARCH64
1362 }
1363 #endif // COMPILER2
1364
1365 #if INCLUDE_G1GC
1366 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1367 #endif
1368 #if INCLUDE_SHENANDOAHGC
1369 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1370 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1371 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1372 #endif
1373 #if INCLUDE_ZGC
1374 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1375 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1376 #if defined(AMD64)
1377 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1378 #endif
1379 #endif
1380 #ifndef ZERO
1381 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1382 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1383 #endif
1384 #endif // ZERO
1385
1386 _extrs_complete = true;
1387 log_debug(aot, codecache, init)("External addresses recorded");
1388 }
1389
1390 static bool initializing_early_stubs = false;
1391
1392 void AOTCodeAddressTable::init_early_stubs() {
1393 if (_complete || initializing_early_stubs) return; // Done already
1394 initializing_early_stubs = true;
1395 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1396 _stubs_length = 0;
1397 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1398
1399 {
1400 // Required by C1 blobs
1401 #if defined(AMD64) && !defined(ZERO)
1402 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1403 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1404 #endif // AMD64
1405 }
1406
1407 _early_stubs_complete = true;
1408 log_info(aot, codecache, init)("Early stubs recorded");
1409 }
1410
1411 static bool initializing_shared_blobs = false;
1412
1413 void AOTCodeAddressTable::init_shared_blobs() {
1414 if (_complete || initializing_shared_blobs) return; // Done already
1415 initializing_shared_blobs = true;
1416 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1417
1418 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1419 _shared_blobs_addr = blobs_addr;
1420 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1421
1422 _shared_blobs_length = 0;
1423 _C1_blobs_length = 0;
1424
1425 // clear the address table
1426 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1427
1428 // Record addresses of generated code blobs
1429 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1430 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1431 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1432 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1433 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1434 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1435 #if INCLUDE_JVMCI
1436 if (EnableJVMCI) {
1437 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1438 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1439 }
1440 #endif
1441
1442 _shared_blobs_complete = true;
1443 log_debug(aot, codecache, init)("Early shared blobs recorded");
1444 _complete = true;
1445 }
1446
1447 void AOTCodeAddressTable::init_early_c1() {
1448 #ifdef COMPILER1
1449 // Runtime1 Blobs
1450 StubId id = StubInfo::stub_base(StubGroup::C1);
1451 // include forward_exception in range we publish
1452 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1453 for (; id != limit; id = StubInfo::next(id)) {
1454 if (Runtime1::blob_for(id) == nullptr) {
1455 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1456 continue;
1457 }
1458 if (Runtime1::entry_for(id) == nullptr) {
1459 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1460 continue;
1461 }
1462 address entry = Runtime1::entry_for(id);
1463 SET_ADDRESS(_C1_blobs, entry);
1464 }
1465 #endif // COMPILER1
1466 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1467 _early_c1_complete = true;
1468 }
1469
1470 #undef SET_ADDRESS
1471
1472 AOTCodeAddressTable::~AOTCodeAddressTable() {
1473 if (_extrs_addr != nullptr) {
1474 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1475 }
1476 if (_stubs_addr != nullptr) {
1477 FREE_C_HEAP_ARRAY(address, _stubs_addr);
1478 }
1479 if (_shared_blobs_addr != nullptr) {
1480 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1481 }
1482 }
1483
1484 #ifdef PRODUCT
1485 #define MAX_STR_COUNT 200
1486 #else
1487 #define MAX_STR_COUNT 500
1488 #endif
1489 #define _c_str_max MAX_STR_COUNT
1490 static const int _c_str_base = _all_max;
1491
1492 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1493 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1494 static int _C_strings_count = 0;
1495 static int _C_strings_s[MAX_STR_COUNT] = {0};
1496 static int _C_strings_id[MAX_STR_COUNT] = {0};
1497 static int _C_strings_used = 0;
1498
1499 void AOTCodeCache::load_strings() {
1500 uint strings_count = _load_header->strings_count();
1501 if (strings_count == 0) {
1502 return;
1503 }
1504 uint strings_offset = _load_header->strings_offset();
1505 uint* string_lengths = (uint*)addr(strings_offset);
1506 strings_offset += (strings_count * sizeof(uint));
1507 uint strings_size = _load_header->entries_offset() - strings_offset;
1508 // We have to keep cached strings longer than _cache buffer
1509 // because they are refernced from compiled code which may
1510 // still be executed on VM exit after _cache is freed.
1511 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1512 memcpy(p, addr(strings_offset), strings_size);
1513 _C_strings_buf = p;
1514 assert(strings_count <= MAX_STR_COUNT, "sanity");
1515 for (uint i = 0; i < strings_count; i++) {
1516 _C_strings[i] = p;
1517 uint len = string_lengths[i];
1518 _C_strings_s[i] = i;
1519 _C_strings_id[i] = i;
1520 p += len;
1521 }
1522 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1523 _C_strings_count = strings_count;
1524 _C_strings_used = strings_count;
1525 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1526 }
1527
1629 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1630 if (idx == -1) {
1631 return (address)-1;
1632 }
1633 uint id = (uint)idx;
1634 // special case for symbols based relative to os::init
1635 if (id > (_c_str_base + _c_str_max)) {
1636 return (address)os::init + idx;
1637 }
1638 if (idx < 0) {
1639 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1640 return nullptr;
1641 }
1642 // no need to compare unsigned id against 0
1643 if (/* id >= _extrs_base && */ id < _extrs_length) {
1644 return _extrs_addr[id - _extrs_base];
1645 }
1646 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1647 return _stubs_addr[id - _stubs_base];
1648 }
1649 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1650 return _shared_blobs_addr[id - _shared_blobs_base];
1651 }
1652 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1653 return _C1_blobs_addr[id - _C1_blobs_base];
1654 }
1655 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1656 return address_for_C_string(id - _c_str_base);
1657 }
1658 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1659 return nullptr;
1660 }
1661
1662 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1663 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1664 int id = -1;
1665 if (addr == (address)-1) { // Static call stub has jump to itself
1666 return id;
1667 }
1668 // Seach for C string
1669 id = id_for_C_string(addr);
1670 if (id >= 0) {
1671 return id + _c_str_base;
1672 }
1673 if (StubRoutines::contains(addr)) {
1674 // Search in stubs
1675 id = search_address(addr, _stubs_addr, _stubs_length);
1676 if (id < 0) {
1677 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1678 if (desc == nullptr) {
1679 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1680 }
1681 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1682 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1683 } else {
1684 return id + _stubs_base;
1685 }
1686 } else {
1687 CodeBlob* cb = CodeCache::find_blob(addr);
1688 if (cb != nullptr) {
1689 // Search in code blobs
1690 int id_base = _shared_blobs_base;
1691 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1692 if (id < 0) {
1693 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1694 } else {
1695 return id_base + id;
1696 }
1697 } else {
1698 // Search in runtime functions
1699 id = search_address(addr, _extrs_addr, _extrs_length);
1700 if (id < 0) {
1701 ResourceMark rm;
1702 const int buflen = 1024;
1703 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1704 int offset = 0;
1705 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1706 if (offset > 0) {
1707 // Could be address of C string
1708 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1709 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1710 p2i(addr), dist, (const char*)addr);
1711 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1712 return dist;
1713 }
1714 #ifdef ASSERT
1715 reloc.print_current_on(tty);
1716 code_blob->print_on(tty);
1717 code_blob->print_code_on(tty);
1718 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1719 #endif
1720 } else {
1721 #ifdef ASSERT
1722 reloc.print_current_on(tty);
1723 code_blob->print_on(tty);
1724 code_blob->print_code_on(tty);
1725 os::find(addr, tty);
1726 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1727 #endif
1728 }
1729 } else {
1730 return _extrs_base + id;
1731 }
1732 }
1733 }
1734 return id;
1735 }
1736
1737 // This is called after initialize() but before init2()
1738 // and _cache is not set yet.
1739 void AOTCodeCache::print_on(outputStream* st) {
1740 if (opened_cache != nullptr && opened_cache->for_use()) {
1741 st->print_cr("\nAOT Code Cache");
1742 uint count = opened_cache->_load_header->entries_count();
1743 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
1744 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1745
1746 for (uint i = 0; i < count; i++) {
1747 // Use search_entries[] to order ouput
1748 int index = search_entries[2*i + 1];
1749 AOTCodeEntry* entry = &(load_entries[index]);
1750
1751 uint entry_position = entry->offset();
1752 uint name_offset = entry->name_offset() + entry_position;
1753 const char* saved_name = opened_cache->addr(name_offset);
1754
1755 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
1756 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
1757 }
1758 }
1759 }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/barrierSetAssembler.hpp"
55 #include "gc/shared/gcConfig.hpp"
56 #include "logging/logStream.hpp"
57 #include "memory/memoryReserver.hpp"
58 #include "memory/universe.hpp"
59 #include "oops/klass.inline.hpp"
60 #include "oops/method.inline.hpp"
61 #include "oops/trainingData.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "runtime/atomicAccess.hpp"
64 #include "runtime/deoptimization.hpp"
65 #include "runtime/flags/flagSetting.hpp"
66 #include "runtime/globals_extension.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/java.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/mountUnmountDisabler.hpp"
71 #include "runtime/mutexLocker.hpp"
72 #include "runtime/os.inline.hpp"
73 #include "runtime/sharedRuntime.hpp"
74 #include "runtime/stubCodeGenerator.hpp"
75 #include "runtime/stubRoutines.hpp"
76 #include "runtime/threadIdentifier.hpp"
77 #include "runtime/timerTrace.hpp"
78 #include "utilities/copy.hpp"
79 #include "utilities/formatBuffer.hpp"
80 #include "utilities/ostream.hpp"
81 #include "utilities/spinYield.hpp"
82 #ifdef COMPILER1
83 #include "c1/c1_LIRAssembler.hpp"
84 #include "c1/c1_Runtime1.hpp"
85 #include "gc/g1/c1/g1BarrierSetC1.hpp"
86 #include "gc/shared/c1/barrierSetC1.hpp"
87 #if INCLUDE_SHENANDOAHGC
88 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
89 #endif // INCLUDE_SHENANDOAHGC
90 #include "gc/z/c1/zBarrierSetC1.hpp"
91 #endif // COMPILER1
92 #ifdef COMPILER2
93 #include "opto/runtime.hpp"
94 #endif
95 #if INCLUDE_JVMCI
96 #include "jvmci/jvmci.hpp"
97 #endif
98 #if INCLUDE_G1GC
99 #include "gc/g1/g1BarrierSetRuntime.hpp"
100 #endif
101 #if INCLUDE_SHENANDOAHGC
102 #include "gc/shenandoah/shenandoahRuntime.hpp"
103 #endif
104 #if INCLUDE_ZGC
105 #include "gc/z/zBarrierSetRuntime.hpp"
106 #endif
107 #if defined(X86) && !defined(ZERO)
108 #include "rdtsc_x86.hpp"
109 #endif
110
111 #include <errno.h>
112 #include <sys/stat.h>
113
114 const char* aot_code_entry_kind_name[] = {
115 #define DECL_KIND_STRING(kind) XSTR(kind),
116 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
117 #undef DECL_KIND_STRING
118 };
119
120 static elapsedTimer _t_totalLoad;
121 static elapsedTimer _t_totalPreload;
122 static elapsedTimer _t_totalRegister;
123 static elapsedTimer _t_totalFind;
124 static elapsedTimer _t_totalStore;
125
126 static bool enable_timers() {
127 return CITime || log_is_enabled(Info, init);
128 }
129
130 static void report_load_failure() {
131 if (AbortVMOnAOTCodeFailure) {
132 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
133 }
134 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
135 AOTCodeCache::disable_caching();
136 }
137
138 static void report_store_failure() {
139 if (AbortVMOnAOTCodeFailure) {
140 tty->print_cr("Unable to create AOT Code Cache.");
141 vm_abort(false);
142 }
143 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
144 AOTCodeCache::disable_caching();
145 }
146
147 // The sequence of AOT code caching flags and parametters settings.
148 //
149 // 1. The initial AOT code caching flags setting is done
164
165 // Next methods determine which action we do with AOT code depending
166 // on phase of AOT process: assembly or production.
167
168 bool AOTCodeCache::is_dumping_adapter() {
169 return AOTAdapterCaching && is_on_for_dump();
170 }
171
172 bool AOTCodeCache::is_using_adapter() {
173 return AOTAdapterCaching && is_on_for_use();
174 }
175
176 bool AOTCodeCache::is_dumping_stub() {
177 return AOTStubCaching && is_on_for_dump();
178 }
179
180 bool AOTCodeCache::is_using_stub() {
181 return AOTStubCaching && is_on_for_use();
182 }
183
184 bool AOTCodeCache::is_dumping_code() {
185 return AOTCodeCaching && is_on_for_dump();
186 }
187
188 bool AOTCodeCache::is_using_code() {
189 return AOTCodeCaching && is_on_for_use();
190 }
191
192 // This is used before AOTCodeCahe is initialized
193 // but after AOT (CDS) Cache flags consistency is checked.
194 bool AOTCodeCache::maybe_dumping_code() {
195 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
196 }
197
198 // Next methods could be called regardless of AOT code cache status.
199 // Initially they are called during AOT flags parsing and finilized
200 // in AOTCodeCache::initialize().
201 void AOTCodeCache::enable_caching() {
202 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
203 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
204 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
205 }
206
207 void AOTCodeCache::disable_caching() {
208 FLAG_SET_ERGO(AOTCodeCaching, false);
209 FLAG_SET_ERGO(AOTStubCaching, false);
210 FLAG_SET_ERGO(AOTAdapterCaching, false);
211 }
212
213 bool AOTCodeCache::is_caching_enabled() {
214 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
215 }
216
217 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
218 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
219 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
220 // becasue both id and kind are used to find an entry, and that combination should be unique
221 if (kind == AOTCodeEntry::Adapter) {
222 return id;
223 } else if (kind == AOTCodeEntry::SharedBlob) {
224 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
225 return id;
226 } else if (kind == AOTCodeEntry::C1Blob) {
227 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
228 return id;
229 } else {
230 // kind must be AOTCodeEntry::C2Blob
231 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
232 return id;
233 }
234 }
235
236 static uint _max_aot_code_size = 0;
237 uint AOTCodeCache::max_aot_code_size() {
238 return _max_aot_code_size;
239 }
240
241 bool AOTCodeCache::is_code_load_thread_on() {
242 return UseAOTCodeLoadThread && AOTCodeCaching;
243 }
244
245 bool AOTCodeCache::allow_const_field(ciConstant& value) {
246 ciEnv* env = CURRENT_ENV;
247 precond(env != nullptr);
248 assert(!env->is_precompile() || is_dumping_code(), "AOT compilation should be enabled");
249 return !env->is_precompile() // Restrict only when we generate AOT code
250 // Can not trust primitive too || !is_reference_type(value.basic_type())
251 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
252 ;
253 }
254
255 // It is called from AOTMetaspace::initialize_shared_spaces()
256 // which is called from universe_init().
257 // At this point all AOT class linking seetings are finilized
258 // and AOT cache is open so we can map AOT code region.
259 void AOTCodeCache::initialize() {
260 if (!is_caching_enabled()) {
261 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
262 return;
263 }
264 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
265 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
266 disable_caching();
267 return;
268 #else
269 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
270
271 // Disable stubs caching until JDK-8357398 is fixed.
272 FLAG_SET_ERGO(AOTStubCaching, false);
273
274 if (VerifyOops) {
275 // Disable AOT stubs caching when VerifyOops flag is on.
276 // Verify oops code generated a lot of C strings which overflow
277 // AOT C string table (which has fixed size).
278 // AOT C string table will be reworked later to handle such cases.
279 //
280 // Note: AOT adapters are not affected - they don't have oop operations.
281 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
282 FLAG_SET_ERGO(AOTStubCaching, false);
283 }
284
285 bool is_dumping = false;
286 bool is_using = false;
287 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
288 is_dumping = is_caching_enabled();
289 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
290 is_using = is_caching_enabled();
291 }
292 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
293 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
294 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
295 }
296 if (!(is_dumping || is_using)) {
297 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
298 disable_caching();
299 return; // AOT code caching disabled on command line
300 }
301 // Reserve AOT Cache region when we dumping AOT code.
302 _max_aot_code_size = AOTCodeMaxSize;
303 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
304 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
305 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
306 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
307 }
308 }
309 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
310 if (is_using && aot_code_size == 0) {
311 log_info(aot, codecache, init)("AOT Code Cache is empty");
312 disable_caching();
313 return;
314 }
315 if (!open_cache(is_dumping, is_using)) {
316 if (is_using) {
317 report_load_failure();
318 } else {
319 report_store_failure();
320 }
321 return;
322 }
323 if (is_dumping) {
324 FLAG_SET_DEFAULT(FoldStableValues, false);
325 FLAG_SET_DEFAULT(ForceUnreachable, true);
326 }
327 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
328 #endif // defined(AMD64) || defined(AARCH64)
329 }
330
331 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
332 AOTCodeCache* AOTCodeCache::_cache = nullptr;
333 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
334
335 // It is called after universe_init() when all GC settings are finalized.
336 void AOTCodeCache::init2() {
337 DEBUG_ONLY( _passed_init2 = true; )
338 if (opened_cache == nullptr) {
339 return;
340 }
341 // After Universe initialized
342 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
343 delete opened_cache;
344 opened_cache = nullptr;
345 report_load_failure();
346 return;
347 }
348
349 // initialize aot runtime constants as appropriate to this runtime
350 AOTRuntimeConstants::initialize_from_runtime();
351
352 // initialize the table of external routines and initial stubs so we can save
353 // generated code blobs that reference them
354 AOTCodeAddressTable* table = opened_cache->_table;
355 assert(table != nullptr, "should be initialized already");
356 table->init_extrs();
357
358 // Now cache and address table are ready for AOT code generation
359 _cache = opened_cache;
360
361 // Set ClassInitBarrierMode after all checks since it affects code generation
362 if (is_dumping_code()) {
363 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
364 } else {
365 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
366 }
367 }
368
369 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
370 opened_cache = new AOTCodeCache(is_dumping, is_using);
371 if (opened_cache->failed()) {
372 delete opened_cache;
373 opened_cache = nullptr;
374 return false;
375 }
376 return true;
377 }
378
379 static void print_helper(nmethod* nm, outputStream* st) {
380 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
381 if (e->method() == nm->method()) {
382 ResourceMark rm;
383 stringStream ss;
384 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
385 ss.print("[%s%s%s]",
386 (e->is_loaded() ? "L" : ""),
387 (e->load_fail() ? "F" : ""),
388 (e->not_entrant() ? "I" : ""));
389 ss.print("#%d", e->comp_id());
390
391 st->print(" %s", ss.freeze());
392 }
393 });
394 }
395
396 void AOTCodeCache::close() {
397 if (is_on()) {
398 delete _cache; // Free memory
399 _cache = nullptr;
400 opened_cache = nullptr;
401 }
402 }
403
404 class CachedCodeDirectory {
405 public:
406 uint _aot_code_size;
407 char* _aot_code_data;
408
409 void set_aot_code_data(uint size, char* aot_data) {
410 _aot_code_size = size;
411 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
412 }
413
414 static CachedCodeDirectory* create();
415 };
416
417 // Storing AOT code in the AOT code region (ac) of AOT Cache:
418 //
419 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
420 // E.g., you can build a hashtable to record what methods have been archived.
421 //
422 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
423 // allocated using AOTCacheAccess::allocate_aot_code_region().
424 //
425 // [3] CachedCodeDirectory must be the very first allocation.
426 //
427 // [4] Two kinds of pointer can be stored:
428 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
429 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
430 // (It's OK to point to an interior location within this buffer).
431 // Such pointers must be stored using AOTCacheAccess::set_pointer()
432 //
433 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
434 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
435 // (e.g., to account for the runtime location of the CodeCache).
436 //
437 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
438 static CachedCodeDirectory* _aot_code_directory = nullptr;
439
440 CachedCodeDirectory* CachedCodeDirectory::create() {
441 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
442 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
443 return dir;
444 }
445
446 #define DATA_ALIGNMENT HeapWordSize
447
448 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
449 _load_header(nullptr),
450 _load_buffer(nullptr),
451 _store_buffer(nullptr),
452 _C_store_buffer(nullptr),
453 _write_position(0),
454 _load_size(0),
455 _store_size(0),
456 _for_use(is_using),
457 _for_dump(is_dumping),
458 _closing(false),
459 _failed(false),
460 _lookup_failed(false),
461 _for_preload(false),
462 _has_clinit_barriers(false),
463 _table(nullptr),
464 _load_entries(nullptr),
465 _search_entries(nullptr),
466 _store_entries(nullptr),
467 _C_strings_buf(nullptr),
468 _store_entries_cnt(0),
469 _compile_id(0),
470 _comp_level(0)
471 {
472 // Read header at the begining of cache
473 if (_for_use) {
474 // Read cache
475 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
476 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
477 if (!rs.is_reserved()) {
478 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
479 set_failed();
480 return;
481 }
482 if (!AOTCacheAccess::map_aot_code_region(rs)) {
483 log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
484 set_failed();
485 return;
486 }
487 _aot_code_directory = (CachedCodeDirectory*)rs.base();
488
489 _load_size = _aot_code_directory->_aot_code_size;
490 _load_buffer = _aot_code_directory->_aot_code_data;
491 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
492 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
493
494 _load_header = (Header*)addr(0);
495 if (!_load_header->verify(_load_size)) {
496 set_failed();
497 return;
498 }
499 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
500 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Adapter], _load_header->adapters_count());
501 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::SharedBlob], _load_header->shared_blobs_count());
502 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C1Blob], _load_header->C1_blobs_count());
503 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C2Blob], _load_header->C2_blobs_count());
504 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Stub], _load_header->stubs_count());
505 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Nmethod], _load_header->nmethods_count());
506 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
507
508 // Read strings
509 load_strings();
510 }
511 if (_for_dump) {
512 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
513 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
514 // Entries allocated at the end of buffer in reverse (as on stack).
515 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
516 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
517 }
518 _table = new AOTCodeAddressTable();
519 }
520
521 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
522 // This could be concurent execution
523 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
524 _cache->invalidate_entry(entry);
525 }
526 }
527
528 void AOTCodeCache::init_early_stubs_table() {
529 AOTCodeAddressTable* table = addr_table();
530 if (table != nullptr) {
531 table->init_early_stubs();
532 }
533 }
534
535 void AOTCodeCache::init_shared_blobs_table() {
536 AOTCodeAddressTable* table = addr_table();
537 if (table != nullptr) {
538 table->init_shared_blobs();
539 }
540 }
541
542 void AOTCodeCache::init_stubs_table() {
543 AOTCodeAddressTable* table = addr_table();
544 if (table != nullptr) {
545 table->init_stubs();
546 }
547 }
548
549 void AOTCodeCache::init_early_c1_table() {
550 AOTCodeAddressTable* table = addr_table();
551 if (table != nullptr) {
552 table->init_early_c1();
553 }
554 }
555
556 void AOTCodeCache::init_c1_table() {
557 AOTCodeAddressTable* table = addr_table();
558 if (table != nullptr) {
559 table->init_c1();
560 }
561 }
562
563 void AOTCodeCache::init_c2_table() {
564 AOTCodeAddressTable* table = addr_table();
565 if (table != nullptr) {
566 table->init_c2();
567 }
568 }
569
570 AOTCodeCache::~AOTCodeCache() {
571 if (_closing) {
572 return; // Already closed
573 }
574 // Stop any further access to cache.
575 // Checked on entry to load_nmethod() and store_nmethod().
576 _closing = true;
577 if (_for_use) {
578 // Wait for all load_nmethod() finish.
579 wait_for_no_nmethod_readers();
580 }
581 // Prevent writing code into cache while we are closing it.
582 // This lock held by ciEnv::register_method() which calls store_nmethod().
583 MutexLocker ml(Compile_lock);
584 if (for_dump()) { // Finalize cache
585 finish_write();
586 }
587 _load_buffer = nullptr;
588 if (_C_store_buffer != nullptr) {
589 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
590 _C_store_buffer = nullptr;
591 _store_buffer = nullptr;
592 }
593 if (_table != nullptr) {
594 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
595 delete _table;
596 _table = nullptr;
597 }
598 }
599
600 void AOTCodeCache::Config::record(uint cpu_features_offset) {
601 _flags = 0;
602 #ifdef ASSERT
603 _flags |= debugVM;
604 #endif
605 if (UseCompressedOops) {
606 _flags |= compressedOops;
607 }
608 if (UseCompressedClassPointers) {
609 _flags |= compressedClassPointers;
610 }
611 if (UseTLAB) {
612 _flags |= useTLAB;
613 }
614 if (JavaAssertions::systemClassDefault()) {
615 _flags |= systemClassAssertions;
616 }
617 if (JavaAssertions::userClassDefault()) {
618 _flags |= userClassAssertions;
619 }
620 if (EnableContended) {
621 _flags |= enableContendedPadding;
622 }
623 if (RestrictContended) {
624 _flags |= restrictContendedPadding;
625 }
626 if (PreserveFramePointer) {
627 _flags |= preserveFramePointer;
628 }
629 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
630 _compressedOopShift = CompressedOops::shift();
631 _compressedOopBase = CompressedOops::base();
632 _compressedKlassShift = CompressedKlassPointers::shift();
633 _compressedKlassBase = CompressedKlassPointers::base();
634 _contendedPaddingWidth = ContendedPaddingWidth;
635 _objectAlignment = ObjectAlignmentInBytes;
636 _gcCardSize = GCCardSizeInBytes;
637 _gc = (uint)Universe::heap()->kind();
638 _cpu_features_offset = cpu_features_offset;
639 }
640
641 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
642 // First checks affect all cached AOT code
643 #ifdef ASSERT
644 if ((_flags & debugVM) == 0) {
645 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
646 return false;
647 }
648 #else
649 if ((_flags & debugVM) != 0) {
650 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
651 return false;
652 }
653 #endif
654
655 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
656 if (codeCacheSize > _codeCacheSize) { // Only allow smaller or equal CodeCache size in production run
657 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
658 return false;
659 }
660
661 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
662 if (aot_gc != Universe::heap()->kind()) {
663 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
664 return false;
665 }
666
667 // We don't need to cache CardTable::card_shift() if GCCardSizeInBytes stay the same
668 if (_gcCardSize != (uint)GCCardSizeInBytes) {
669 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with GCCardSizeInBytes = %d vs current %d", _gcCardSize, GCCardSizeInBytes);
670 return false;
671 }
672
673 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
674 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
675 return false;
676 }
677
678 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
679 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (EnableContended ? "false" : "true"), (EnableContended ? "true" : "false"));
680 return false;
681 }
682 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
683 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (RestrictContended ? "false" : "true"), (RestrictContended ? "true" : "false"));
684 return false;
685 }
686 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
687 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
688 return false;
689 }
690
691 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
692 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (PreserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
693 return false;
694 }
695
696 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
697 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (UseCompressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
698 return false;
699 }
700 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
701 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
702 return false;
703 }
704 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
705 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
706 return false;
707 }
708
709 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
710 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (UseCompressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
711 return false;
712 }
713 if (_compressedOopShift != (uint)CompressedOops::shift()) {
714 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
715 return false;
716 }
717 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
718 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
719 return false;
720 }
721
722 LogStreamHandle(Debug, aot, codecache, init) log;
723 if (log.is_enabled()) {
724 log.print_cr("Available CPU features: %s", VM_Version::features_string());
725 }
726
727 uint offset = _cpu_features_offset;
728 uint cpu_features_size = *(uint *)cache->addr(offset);
729 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
730 offset += sizeof(uint);
731
732 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
733 if (log.is_enabled()) {
734 ResourceMark rm; // required for stringStream::as_string()
735 stringStream ss;
736 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
737 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
738 }
739
740 if (AOTCodeCPUFeatureCheck && !VM_Version::supports_features(cached_cpu_features_buffer)) {
741 if (log.is_enabled()) {
742 ResourceMark rm; // required for stringStream::as_string()
743 stringStream ss;
744 VM_Version::get_missing_features_name(cached_cpu_features_buffer, ss);
745 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
746 }
747 return false;
748 }
749
750 // Next affects only AOT nmethod
751 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
752 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (JavaAssertions::systemClassDefault() ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
753 FLAG_SET_ERGO(AOTCodeCaching, false);
754 }
755 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
756 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (JavaAssertions::userClassDefault() ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
757 FLAG_SET_ERGO(AOTCodeCaching, false);
758 }
759
760 return true;
761 }
762
763 bool AOTCodeCache::Header::verify(uint load_size) const {
764 if (_version != AOT_CODE_VERSION) {
765 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
766 return false;
767 }
768 if (load_size < _cache_size) {
769 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
770 return false;
771 }
772 return true;
773 }
774
775 volatile int AOTCodeCache::_nmethod_readers = 0;
776
777 AOTCodeCache* AOTCodeCache::open_for_use() {
778 if (AOTCodeCache::is_on_for_use()) {
779 return AOTCodeCache::cache();
780 }
781 return nullptr;
782 }
783
784 AOTCodeCache* AOTCodeCache::open_for_dump() {
785 if (AOTCodeCache::is_on_for_dump()) {
786 AOTCodeCache* cache = AOTCodeCache::cache();
787 cache->clear_lookup_failed(); // Reset bit
788 return cache;
789 }
790 return nullptr;
791 }
792
793 bool AOTCodeCache::is_address_in_aot_cache(address p) {
794 AOTCodeCache* cache = open_for_use();
795 if (cache == nullptr) {
796 return false;
797 }
798 if ((p >= (address)cache->cache_buffer()) &&
799 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
800 return true;
801 }
802 return false;
803 }
804
805 static void copy_bytes(const char* from, address to, uint size) {
806 assert((int)size > 0, "sanity");
807 memcpy(to, from, size);
808 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
809 }
810
811 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
812 _cache = cache;
813 _entry = entry;
814 _load_buffer = cache->cache_buffer();
815 _read_position = 0;
816 if (task != nullptr) {
817 _compile_id = task->compile_id();
818 _comp_level = task->comp_level();
819 _preload = task->preload();
820 } else {
821 _compile_id = 0;
822 _comp_level = 0;
823 _preload = false;
824 }
825 _lookup_failed = false;
826 }
827
828 void AOTCodeReader::set_read_position(uint pos) {
829 if (pos == _read_position) {
830 return;
831 }
832 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
833 _read_position = pos;
834 }
835
836 bool AOTCodeCache::set_write_position(uint pos) {
837 if (pos == _write_position) {
838 return true;
839 }
840 if (_store_size < _write_position) {
841 _store_size = _write_position; // Adjust during write
842 }
843 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
844 _write_position = pos;
887 if (nbytes == 0) {
888 return 0;
889 }
890 uint new_position = _write_position + nbytes;
891 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
892 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
893 nbytes, _write_position);
894 set_failed();
895 report_store_failure();
896 return 0;
897 }
898 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
899 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
900 _write_position += nbytes;
901 if (_store_size < _write_position) {
902 _store_size = _write_position;
903 }
904 return nbytes;
905 }
906
907 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
908 assert(is_using_code(), "AOT code caching should be enabled");
909 if (!method->in_aot_cache()) {
910 return nullptr;
911 }
912 switch (comp_level) {
913 case CompLevel_simple:
914 if ((DisableAOTCode & (1 << 0)) != 0) {
915 return nullptr;
916 }
917 break;
918 case CompLevel_limited_profile:
919 if ((DisableAOTCode & (1 << 1)) != 0) {
920 return nullptr;
921 }
922 break;
923 case CompLevel_full_optimization:
924 if ((DisableAOTCode & (1 << 2)) != 0) {
925 return nullptr;
926 }
927 break;
928
929 default: return nullptr; // Level 1, 2, and 4 only
930 }
931 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
932 if (is_on() && _cache->cache_buffer() != nullptr) {
933 uint id = AOTCacheAccess::convert_method_to_offset(method());
934 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Nmethod, id, comp_level);
935 if (entry == nullptr) {
936 LogStreamHandle(Info, aot, codecache, nmethod) log;
937 if (log.is_enabled()) {
938 ResourceMark rm;
939 const char* target_name = method->name_and_sig_as_C_string();
940 log.print("Missing entry for '%s' (comp_level %d, id: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, id);
941 }
942 #ifdef ASSERT
943 } else {
944 ResourceMark rm;
945 assert(method() == entry->method(), "AOTCodeCache: saved nmethod's method %p (name: %s id: " UINT32_FORMAT_X_0
946 ") is different from the method %p (name: %s, id: " UINT32_FORMAT_X_0 " being looked up" ,
947 entry->method(), entry->method()->name_and_sig_as_C_string(), entry->id(), method(), method()->name_and_sig_as_C_string(), id);
948 #endif
949 }
950
951 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
952 if (directives->IgnorePrecompiledOption) {
953 LogStreamHandle(Info, aot, codecache, compilation) log;
954 if (log.is_enabled()) {
955 log.print("Ignore AOT code entry on level %d for ", comp_level);
956 method->print_value_on(&log);
957 }
958 return nullptr;
959 }
960
961 return entry;
962 }
963 return nullptr;
964 }
965
966 Method* AOTCodeEntry::method() {
967 assert(_kind == Nmethod, "invalid kind %d", _kind);
968 assert(AOTCodeCache::is_on_for_use(), "must be");
969 return AOTCacheAccess::convert_offset_to_method(_id);
970 }
971
972 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
973 return (void*)(cache->add_entry());
974 }
975
976 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
977 if (entry->kind() == kind) {
978 assert(entry->id() == id, "sanity");
979 if (kind != AOTCodeEntry::Nmethod || // addapters and stubs have only one version
980 // Look only for normal AOT code entry, preload code is handled separately
981 (!entry->not_entrant() && !entry->has_clinit_barriers() && (entry->comp_level() == comp_level))) {
982 return true; // Found
983 }
984 }
985 return false;
986 }
987
988 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
989 assert(_for_use, "sanity");
990 uint count = _load_header->entries_count();
991 if (_load_entries == nullptr) {
992 // Read it
993 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
994 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
995 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
996 }
997 // Binary search
998 int l = 0;
999 int h = count - 1;
1000 while (l <= h) {
1001 int mid = (l + h) >> 1;
1002 int ix = mid * 2;
1003 uint is = _search_entries[ix];
1004 if (is == id) {
1005 int index = _search_entries[ix + 1];
1006 AOTCodeEntry* entry = &(_load_entries[index]);
1007 if (check_entry(kind, id, comp_level, entry)) {
1008 return entry; // Found
1009 }
1010 // Leaner search around
1011 for (int i = mid - 1; i >= l; i--) { // search back
1012 ix = i * 2;
1013 is = _search_entries[ix];
1014 if (is != id) {
1015 break;
1016 }
1017 index = _search_entries[ix + 1];
1018 AOTCodeEntry* entry = &(_load_entries[index]);
1019 if (check_entry(kind, id, comp_level, entry)) {
1020 return entry; // Found
1021 }
1022 }
1023 for (int i = mid + 1; i <= h; i++) { // search forward
1024 ix = i * 2;
1025 is = _search_entries[ix];
1026 if (is != id) {
1027 break;
1028 }
1029 index = _search_entries[ix + 1];
1030 AOTCodeEntry* entry = &(_load_entries[index]);
1031 if (check_entry(kind, id, comp_level, entry)) {
1032 return entry; // Found
1033 }
1034 }
1035 break; // No match found
1036 } else if (is < id) {
1037 l = mid + 1;
1038 } else {
1039 h = mid - 1;
1040 }
1041 }
1042 return nullptr;
1043 }
1044
1045 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1046 assert(entry!= nullptr, "all entries should be read already");
1047 if (entry->not_entrant()) {
1048 return; // Someone invalidated it already
1049 }
1050 #ifdef ASSERT
1051 assert(_load_entries != nullptr, "sanity");
1052 {
1053 uint name_offset = entry->offset() + entry->name_offset();
1054 const char* name = _load_buffer + name_offset;;
1055 uint level = entry->comp_level();
1056 uint comp_id = entry->comp_id();
1057 bool for_preload = entry->for_preload();
1058 bool clinit_brs = entry->has_clinit_barriers();
1059 log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1060 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1061 }
1062 assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1063 bool found = false;
1064 uint i = 0;
1065 uint count = 0;
1066 if (entry->for_preload()) {
1067 count = _load_header->preload_entries_count();
1068 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1069 for (; i < count; i++) {
1070 if (entry == &preload_entry[i]) {
1071 break;
1072 }
1073 }
1074 } else {
1075 count = _load_header->entries_count();
1076 for(; i < count; i++) {
1077 if (entry == &(_load_entries[i])) {
1078 break;
1079 }
1080 }
1081 }
1082 found = (i < count);
1083 assert(found, "entry should exist");
1084 #endif
1085 entry->set_not_entrant();
1086 uint name_offset = entry->offset() + entry->name_offset();
1087 const char* name = _load_buffer + name_offset;;
1088 uint level = entry->comp_level();
1089 uint comp_id = entry->comp_id();
1090 bool for_preload = entry->for_preload();
1091 bool clinit_brs = entry->has_clinit_barriers();
1092 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1093 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1094
1095 if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1096 // Invalidate preload code if normal AOT C2 code is invalidated,
1097 // most likely because some dependencies changed during run.
1098 // We can still use normal AOT code if preload code is
1099 // invalidated - normal AOT code has less restrictions.
1100 Method* method = entry->method();
1101 AOTCodeEntry* preload_entry = method->aot_code_entry();
1102 if (preload_entry != nullptr) {
1103 assert(preload_entry->for_preload(), "expecting only such entries here");
1104 invalidate_entry(preload_entry);
1105 }
1106 }
1107 }
1108
1109 static int uint_cmp(const void *i, const void *j) {
1110 uint a = *(uint *)i;
1111 uint b = *(uint *)j;
1112 return a > b ? 1 : a < b ? -1 : 0;
1113 }
1114
1115 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1116 uint* size_ptr = (uint *)buffer;
1117 *size_ptr = buffer_size;
1118 buffer += sizeof(uint);
1119
1120 VM_Version::store_cpu_features(buffer);
1121 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1122 buffer += buffer_size;
1123 buffer = align_up(buffer, DATA_ALIGNMENT);
1124 }
1125
1126 bool AOTCodeCache::finish_write() {
1127 if (!align_write()) {
1128 return false;
1129 }
1130 uint strings_offset = _write_position;
1131 int strings_count = store_strings();
1132 if (strings_count < 0) {
1133 return false;
1134 }
1135 if (!align_write()) {
1136 return false;
1137 }
1138 uint strings_size = _write_position - strings_offset;
1139
1140 uint code_count = _store_entries_cnt;
1141 if (code_count > 0) {
1142 _aot_code_directory = CachedCodeDirectory::create();
1143 assert(_aot_code_directory != nullptr, "Sanity check");
1144
1145 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1146 uint search_count = code_count * 2;
1147 uint search_size = search_count * sizeof(uint);
1148 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1149 // _write_position should include code and strings
1150 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1151 uint cpu_features_size = VM_Version::cpu_features_size();
1152 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1153 uint total_size = _write_position + header_size + code_alignment +
1154 search_size + entries_size +
1155 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1156 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1157
1158 // Allocate in AOT Cache buffer
1159 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1160 char* start = align_up(buffer, DATA_ALIGNMENT);
1161 char* current = start + header_size; // Skip header
1162
1163 uint cpu_features_offset = current - start;
1164 store_cpu_features(current, cpu_features_size);
1165 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1166 assert(current < start + total_size, "sanity check");
1167
1168 // Create ordered search table for entries [id, index];
1169 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1170
1171 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1172 AOTCodeStats stats;
1173 uint max_size = 0;
1174 // AOTCodeEntry entries were allocated in reverse in store buffer.
1175 // Process them in reverse order to cache first code first.
1176
1177 // Store AOTCodeEntry-s for preload code
1178 current = align_up(current, DATA_ALIGNMENT);
1179 uint preload_entries_cnt = 0;
1180 uint preload_entries_offset = current - start;
1181 AOTCodeEntry* preload_entries = (AOTCodeEntry*)current;
1182 for (int i = code_count - 1; i >= 0; i--) {
1183 AOTCodeEntry* entry = &entries_address[i];
1184 if (entry->load_fail()) {
1185 continue;
1186 }
1187 if (entry->for_preload()) {
1188 if (entry->not_entrant()) {
1189 // Skip not entrant preload code:
1190 // we can't pre-load code which may have failing dependencies.
1191 log_info(aot, codecache, exit)("Skip not entrant preload code comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1192 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1193 } else {
1194 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1195 stats.collect_entry_stats(entry);
1196 current += sizeof(AOTCodeEntry);
1197 preload_entries_cnt++;
1198 }
1199 }
1200 }
1201
1202 // Now write the data for preload AOTCodeEntry
1203 for (int i = 0; i < (int)preload_entries_cnt; i++) {
1204 AOTCodeEntry* entry = &preload_entries[i];
1205 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1206 if (size > max_size) {
1207 max_size = size;
1208 }
1209 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1210 entry->set_offset(current - start); // New offset
1211 current += size;
1212 }
1213
1214 current = align_up(current, DATA_ALIGNMENT);
1215 uint entries_count = 0;
1216 uint new_entries_offset = current - start;
1217 AOTCodeEntry* code_entries = (AOTCodeEntry*)current;
1218 // Now scan normal entries
1219 for (int i = code_count - 1; i >= 0; i--) {
1220 AOTCodeEntry* entry = &entries_address[i];
1221 if (entry->load_fail() || entry->for_preload()) {
1222 continue;
1223 }
1224 if (entry->not_entrant()) {
1225 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1226 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1227 entry->set_entrant(); // Reset
1228 }
1229 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1230 stats.collect_entry_stats(entry);
1231 current += sizeof(AOTCodeEntry);
1232 search[entries_count*2 + 0] = entry->id();
1233 search[entries_count*2 + 1] = entries_count;
1234 entries_count++;
1235 }
1236
1237 // Now write the data for normal AOTCodeEntry
1238 for (int i = 0; i < (int)entries_count; i++) {
1239 AOTCodeEntry* entry = &code_entries[i];
1240 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1241 if (size > max_size) {
1242 max_size = size;
1243 }
1244 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1245 entry->set_offset(current - start); // New offset
1246 current += size;
1247 }
1248
1249 if (preload_entries_cnt == 0 && entries_count == 0) {
1250 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entries");
1251 FREE_C_HEAP_ARRAY(uint, search);
1252 return true; // Nothing to write
1253 }
1254 uint total_entries_cnt = preload_entries_cnt + entries_count;
1255 assert(total_entries_cnt <= code_count, "%d > %d", total_entries_cnt, code_count);
1256 // Write strings
1257 if (strings_count > 0) {
1258 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1259 strings_offset = (current - start); // New offset
1260 current += strings_size;
1261 }
1262
1263 uint search_table_offset = current - start;
1264 // Sort and store search table
1265 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1266 search_size = 2 * entries_count * sizeof(uint);
1267 copy_bytes((const char*)search, (address)current, search_size);
1268 FREE_C_HEAP_ARRAY(uint, search);
1269 current += search_size;
1270
1271 log_stats_on_exit(stats);
1272
1273 uint size = (current - start);
1274 assert(size <= total_size, "%d > %d", size , total_size);
1275 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1276
1277 // Finalize header
1278 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1279 header->init(size, (uint)strings_count, strings_offset,
1280 entries_count, search_table_offset, new_entries_offset,
1281 preload_entries_cnt, preload_entries_offset,
1282 stats.entry_count(AOTCodeEntry::Adapter), stats.entry_count(AOTCodeEntry::SharedBlob),
1283 stats.entry_count(AOTCodeEntry::C1Blob), stats.entry_count(AOTCodeEntry::C2Blob),
1284 stats.entry_count(AOTCodeEntry::Stub), cpu_features_offset);
1285
1286 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", total_entries_cnt);
1287
1288 _aot_code_directory->set_aot_code_data(size, start);
1289 }
1290 return true;
1291 }
1292
1293 //------------------Store/Load AOT code ----------------------
1294
1295 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1296 AOTCodeCache* cache = open_for_dump();
1297 if (cache == nullptr) {
1298 return false;
1299 }
1300 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1301
1302 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1303 return false;
1304 }
1305 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1306 return false;
1307 }
1308 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1343 return false;
1344 }
1345 CodeBlob::archive_blob(&blob, archive_buffer);
1346
1347 uint reloc_data_size = blob.relocation_size();
1348 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1349 if (n != reloc_data_size) {
1350 return false;
1351 }
1352
1353 bool has_oop_maps = false;
1354 if (blob.oop_maps() != nullptr) {
1355 if (!cache->write_oop_map_set(blob)) {
1356 return false;
1357 }
1358 has_oop_maps = true;
1359 }
1360
1361 #ifndef PRODUCT
1362 // Write asm remarks
1363 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1364 return false;
1365 }
1366 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1367 return false;
1368 }
1369 #endif /* PRODUCT */
1370
1371 if (!cache->write_relocations(blob)) {
1372 if (!cache->failed()) {
1373 // We may miss an address in AOT table - skip this code blob.
1374 cache->set_write_position(entry_position);
1375 }
1376 return false;
1377 }
1378
1379 uint entry_size = cache->_write_position - entry_position;
1380 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1381 entry_position, entry_size, name_offset, name_size,
1382 blob_offset, has_oop_maps, blob.content_begin());
1383 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1384 return true;
1385 }
1386
1392
1393 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1394 AOTCodeCache* cache = open_for_use();
1395 if (cache == nullptr) {
1396 return nullptr;
1397 }
1398 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1399
1400 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1401 return nullptr;
1402 }
1403 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1404 return nullptr;
1405 }
1406 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1407
1408 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1409 if (entry == nullptr) {
1410 return nullptr;
1411 }
1412 AOTCodeReader reader(cache, entry, nullptr);
1413 CodeBlob* blob = reader.compile_code_blob(name);
1414
1415 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1416 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1417 return blob;
1418 }
1419
1420 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1421 assert(AOTCodeEntry::is_blob(entry_kind),
1422 "wrong entry kind for blob id %s", StubInfo::name(id));
1423 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1424 }
1425
1426 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1427 uint entry_position = _entry->offset();
1428
1429 // Read name
1430 uint name_offset = entry_position + _entry->name_offset();
1431 uint name_size = _entry->name_size(); // Includes '/0'
1432 const char* stored_name = addr(name_offset);
1433
1434 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1435 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1436 stored_name, name);
1437 set_lookup_failed(); // Skip this blob
1438 return nullptr;
1439 }
1440
1441 // Read archived code blob
1442 uint offset = entry_position + _entry->code_offset();
1443 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1444 offset += archived_blob->size();
1445
1446 address reloc_data = (address)addr(offset);
1447 offset += archived_blob->relocation_size();
1448 set_read_position(offset);
1449
1450 ImmutableOopMapSet* oop_maps = nullptr;
1451 if (_entry->has_oop_maps()) {
1452 oop_maps = read_oop_map_set();
1453 }
1454
1455 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1456 stored_name,
1457 reloc_data,
1458 oop_maps
1459 );
1460 if (code_blob == nullptr) { // no space left in CodeCache
1461 return nullptr;
1462 }
1463
1464 #ifndef PRODUCT
1465 code_blob->asm_remarks().init();
1466 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1467 code_blob->dbg_strings().init();
1468 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1469 #endif // PRODUCT
1470
1471 fix_relocations(code_blob);
1472
1473 #ifdef ASSERT
1474 LogStreamHandle(Trace, aot, codecache, stubs) log;
1475 if (log.is_enabled()) {
1476 FlagSetting fs(PrintRelocations, true);
1477 code_blob->print_on(&log);
1478 }
1479 #endif
1480 return code_blob;
1481 }
1482
1483 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1484 if (!is_dumping_stub()) {
1485 return false;
1486 }
1487 AOTCodeCache* cache = open_for_dump();
1488 if (cache == nullptr) {
1489 return false;
1490 }
1491 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1492 if (!cache->align_write()) {
1493 return false;
1494 }
1495 #ifdef ASSERT
1496 CodeSection* cs = cgen->assembler()->code_section();
1497 if (cs->has_locs()) {
1498 uint reloc_count = cs->locs_count();
1499 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1500 // Collect additional data
1501 RelocIterator iter(cs);
1502 while (iter.next()) {
1503 switch (iter.type()) {
1504 case relocInfo::none:
1505 break;
1506 default: {
1507 iter.print_current_on(tty);
1508 fatal("stub's relocation %d unimplemented", (int)iter.type());
1509 break;
1510 }
1511 }
1512 }
1513 }
1514 #endif
1515 uint entry_position = cache->_write_position;
1516
1517 // Write code
1518 uint code_offset = 0;
1519 uint code_size = cgen->assembler()->pc() - start;
1520 uint n = cache->write_bytes(start, code_size);
1521 if (n != code_size) {
1522 return false;
1523 }
1524 // Write name
1525 uint name_offset = cache->_write_position - entry_position;
1526 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1527 n = cache->write_bytes(name, name_size);
1528 if (n != name_size) {
1529 return false;
1530 }
1531 uint entry_size = cache->_write_position - entry_position;
1532 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1533 code_offset, code_size,
1534 AOTCodeEntry::Stub, (uint32_t)id);
1535 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1536 return true;
1537 }
1538
1539 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1540 if (!is_using_stub()) {
1541 return false;
1542 }
1543 assert(start == cgen->assembler()->pc(), "wrong buffer");
1544 AOTCodeCache* cache = open_for_use();
1545 if (cache == nullptr) {
1546 return false;
1547 }
1548 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1549 if (entry == nullptr) {
1550 return false;
1551 }
1552 uint entry_position = entry->offset();
1553 // Read name
1554 uint name_offset = entry->name_offset() + entry_position;
1555 uint name_size = entry->name_size(); // Includes '/0'
1556 const char* saved_name = cache->addr(name_offset);
1557 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1558 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1559 cache->set_failed();
1560 report_load_failure();
1561 return false;
1562 }
1563 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1564 // Read code
1565 uint code_offset = entry->code_offset() + entry_position;
1566 uint code_size = entry->code_size();
1567 copy_bytes(cache->addr(code_offset), start, code_size);
1568 cgen->assembler()->code_section()->set_end(start + code_size);
1569 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1570 return true;
1571 }
1572
1573 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1574 if (!is_dumping_code()) {
1575 return nullptr;
1576 }
1577 assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1578 AOTCodeCache* cache = open_for_dump();
1579 precond(cache != nullptr);
1580 precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1581 if (!compiler->is_c1() && !compiler->is_c2()) {
1582 // Only c1 and c2 compilers
1583 return nullptr;
1584 }
1585 int comp_level = nm->comp_level();
1586 if (comp_level == CompLevel_full_profile) {
1587 // Do not cache C1 compiles with full profile i.e. tier3
1588 return nullptr;
1589 }
1590 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1591
1592 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1593 AOTCodeEntry* entry = nullptr;
1594 entry = cache->write_nmethod(nm, for_preload);
1595 if (entry == nullptr) {
1596 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1597 }
1598 return entry;
1599 }
1600
1601 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1602 AOTCodeCache* cache = open_for_dump();
1603 assert(cache != nullptr, "sanity check");
1604 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1605 uint comp_id = nm->compile_id();
1606 uint comp_level = nm->comp_level();
1607 Method* method = nm->method();
1608 if (!AOTCacheAccess::can_generate_aot_code(method)) {
1609 ResourceMark rm;
1610 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1611 assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1612 return nullptr;
1613 }
1614 InstanceKlass* holder = method->method_holder();
1615 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1616 if (!builtin_loader) {
1617 ResourceMark rm;
1618 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1619 assert(builtin_loader, "sanity");
1620 return nullptr;
1621 }
1622
1623 _for_preload = for_preload;
1624 _has_clinit_barriers = nm->has_clinit_barriers();
1625
1626 if (!align_write()) {
1627 return nullptr;
1628 }
1629
1630 uint entry_position = _write_position;
1631
1632 // Write name
1633 uint name_offset = 0;
1634 uint name_size = 0;
1635 uint id = 0;
1636 uint n;
1637 {
1638 ResourceMark rm;
1639 const char* name = method->name_and_sig_as_C_string();
1640 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1641 comp_id, (int)comp_level, name, comp_level,
1642 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1643
1644 LogStreamHandle(Info, aot, codecache, loader) log;
1645 if (log.is_enabled()) {
1646 oop loader = holder->class_loader();
1647 oop domain = holder->protection_domain();
1648 log.print("Holder: ");
1649 holder->print_value_on(&log);
1650 log.print(" loader: ");
1651 if (loader == nullptr) {
1652 log.print("nullptr");
1653 } else {
1654 loader->print_value_on(&log);
1655 }
1656 log.print(" domain: ");
1657 if (domain == nullptr) {
1658 log.print("nullptr");
1659 } else {
1660 domain->print_value_on(&log);
1661 }
1662 log.cr();
1663 }
1664 name_offset = _write_position - entry_position;
1665 name_size = (uint)strlen(name) + 1; // Includes '/0'
1666 n = write_bytes(name, name_size);
1667 if (n != name_size) {
1668 return nullptr;
1669 }
1670 }
1671 id = AOTCacheAccess::delta_from_base_address((address)nm->method());
1672
1673 // Write CodeBlob
1674 if (!cache->align_write()) {
1675 return nullptr;
1676 }
1677 uint blob_offset = cache->_write_position - entry_position;
1678 address archive_buffer = cache->reserve_bytes(nm->size());
1679 if (archive_buffer == nullptr) {
1680 return nullptr;
1681 }
1682 CodeBlob::archive_blob(nm, archive_buffer);
1683
1684 uint reloc_data_size = nm->relocation_size();
1685 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1686 if (n != reloc_data_size) {
1687 return nullptr;
1688 }
1689
1690 // Write oops and metadata present in the nmethod's data region
1691 if (!write_oops(nm)) {
1692 if (lookup_failed() && !failed()) {
1693 // Skip this method and reposition file
1694 set_write_position(entry_position);
1695 }
1696 return nullptr;
1697 }
1698 if (!write_metadata(nm)) {
1699 if (lookup_failed() && !failed()) {
1700 // Skip this method and reposition file
1701 set_write_position(entry_position);
1702 }
1703 return nullptr;
1704 }
1705
1706 bool has_oop_maps = false;
1707 if (nm->oop_maps() != nullptr) {
1708 if (!cache->write_oop_map_set(*nm)) {
1709 return nullptr;
1710 }
1711 has_oop_maps = true;
1712 }
1713
1714 uint immutable_data_size = nm->immutable_data_size();
1715 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1716 if (n != immutable_data_size) {
1717 return nullptr;
1718 }
1719
1720 JavaThread* thread = JavaThread::current();
1721 HandleMark hm(thread);
1722 GrowableArray<Handle> oop_list;
1723 GrowableArray<Metadata*> metadata_list;
1724
1725 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1726 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1727 if (lookup_failed() && !failed()) {
1728 // Skip this method and reposition file
1729 set_write_position(entry_position);
1730 }
1731 return nullptr;
1732 }
1733
1734 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1735 return nullptr;
1736 }
1737
1738 #ifndef PRODUCT
1739 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1740 return nullptr;
1741 }
1742 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1743 return nullptr;
1744 }
1745 #endif /* PRODUCT */
1746
1747 uint entry_size = _write_position - entry_position;
1748 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Nmethod, id,
1749 entry_position, entry_size,
1750 name_offset, name_size,
1751 blob_offset, has_oop_maps,
1752 nm->content_begin(), comp_level, comp_id,
1753 nm->has_clinit_barriers(), for_preload);
1754 #ifdef ASSERT
1755 if (nm->has_clinit_barriers() || for_preload) {
1756 assert(for_preload, "sanity");
1757 }
1758 #endif
1759 {
1760 ResourceMark rm;
1761 const char* name = nm->method()->name_and_sig_as_C_string();
1762 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1763 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1764 }
1765 if (VerifyAOTCode) {
1766 return nullptr;
1767 }
1768 return entry;
1769 }
1770
1771 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1772 if (!is_using_code()) {
1773 return false;
1774 }
1775 AOTCodeCache* cache = open_for_use();
1776 if (cache == nullptr) {
1777 return false;
1778 }
1779 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1780 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1781 CompileTask* task = env->task();
1782 task->mark_aot_load_start(os::elapsed_counter());
1783 AOTCodeEntry* entry = task->aot_code_entry();
1784 bool preload = task->preload();
1785 assert(entry != nullptr, "sanity");
1786 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1787 VM_ENTRY_MARK;
1788 ResourceMark rm;
1789 methodHandle method(THREAD, target->get_Method());
1790 const char* target_name = method->name_and_sig_as_C_string();
1791 uint id = AOTCacheAccess::convert_method_to_offset(method());
1792 bool clinit_brs = entry->has_clinit_barriers();
1793 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (id: " UINT32_FORMAT_X_0 "%s)",
1794 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1795 target_name, id, (clinit_brs ? ", has clinit barriers" : ""));
1796 }
1797 ReadingMark rdmk;
1798 if (rdmk.failed()) {
1799 // Cache is closed, cannot touch anything.
1800 return false;
1801 }
1802
1803 AOTCodeReader reader(cache, entry, task);
1804 bool success = reader.compile_nmethod(env, target, compiler);
1805 if (success) {
1806 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1807 } else {
1808 entry->set_load_fail();
1809 entry->set_not_entrant();
1810 }
1811 task->mark_aot_load_finish(os::elapsed_counter());
1812 return success;
1813 }
1814
1815 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1816 CompileTask* task = env->task();
1817 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1818 nmethod* nm = nullptr;
1819
1820 uint entry_position = aot_code_entry->offset();
1821 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1822 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1823 set_read_position(archived_nm_offset + archived_nm->size());
1824
1825 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1826 env->set_oop_recorder(oop_recorder);
1827
1828 uint offset;
1829
1830 offset = read_position();
1831 address reloc_data = (address)addr(offset);
1832 offset += archived_nm->relocation_size();
1833 set_read_position(offset);
1834
1835 // Read oops and metadata
1836 VM_ENTRY_MARK
1837 GrowableArray<Handle> oop_list;
1838 GrowableArray<Metadata*> metadata_list;
1839
1840 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1841 return false;
1842 }
1843
1844 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1845
1846 offset = read_position();
1847 address immutable_data = (address)addr(offset);
1848 offset += archived_nm->immutable_data_size();
1849 set_read_position(offset);
1850
1851 GrowableArray<Handle> reloc_immediate_oop_list;
1852 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1853 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1854 return false;
1855 }
1856
1857 // Read Dependencies (compressed already)
1858 Dependencies* dependencies = new Dependencies(env);
1859 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1860 env->set_dependencies(dependencies);
1861
1862 const char* name = addr(entry_position + aot_code_entry->name_offset());
1863
1864 if (VerifyAOTCode) {
1865 return false;
1866 }
1867
1868 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1869 nm = env->register_aot_method(THREAD,
1870 target,
1871 compiler,
1872 archived_nm,
1873 reloc_data,
1874 oop_list,
1875 metadata_list,
1876 oopmaps,
1877 immutable_data,
1878 reloc_immediate_oop_list,
1879 reloc_immediate_metadata_list,
1880 this);
1881 bool success = task->is_success();
1882 if (success) {
1883 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1884 #ifdef ASSERT
1885 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1886 if (log.is_enabled()) {
1887 FlagSetting fs(PrintRelocations, true);
1888 nm->print_on(&log);
1889 nm->decode2(&log);
1890 }
1891 #endif
1892 }
1893
1894 return success;
1895 }
1896
1897 bool skip_preload(methodHandle mh) {
1898 if (!mh->method_holder()->is_loaded()) {
1899 return true;
1900 }
1901 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1902 if (directives->DontPreloadOption) {
1903 LogStreamHandle(Info, aot, codecache, init) log;
1904 if (log.is_enabled()) {
1905 log.print("Exclude preloading code for ");
1906 mh->print_value_on(&log);
1907 }
1908 return true;
1909 }
1910 return false;
1911 }
1912
1913 void AOTCodeCache::preload_code(JavaThread* thread) {
1914 if (!is_using_code()) {
1915 return;
1916 }
1917 if ((DisableAOTCode & (1 << 3)) != 0) {
1918 return; // no preloaded code (level 5);
1919 }
1920 _cache->preload_aot_code(thread);
1921 }
1922
1923 void AOTCodeCache::preload_aot_code(TRAPS) {
1924 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1925 // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
1926 // level we want (that is CompLevel_full_optimization).
1927 return;
1928 }
1929 TraceTime t1("Total time to preload AOT code", &_t_totalPreload, enable_timers(), false);
1930 assert(_for_use, "sanity");
1931 uint count = _load_header->entries_count();
1932 uint preload_entries_count = _load_header->preload_entries_count();
1933 if (preload_entries_count > 0) {
1934 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
1935 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1936 uint count = MIN2(preload_entries_count, AOTCodePreloadStop);
1937 for (uint i = AOTCodePreloadStart; i < count; i++) {
1938 AOTCodeEntry* entry = &preload_entry[i];
1939 if (entry->not_entrant()) {
1940 continue;
1941 }
1942 methodHandle mh(THREAD, entry->method());
1943 assert((mh.not_null() && AOTMetaspace::in_aot_cache((address)mh())), "sanity");
1944 if (skip_preload(mh)) {
1945 continue; // Exclude preloading for this method
1946 }
1947 assert(mh->method_holder()->is_loaded(), "");
1948 if (!mh->method_holder()->is_linked()) {
1949 assert(!HAS_PENDING_EXCEPTION, "");
1950 mh->method_holder()->link_class(THREAD);
1951 if (HAS_PENDING_EXCEPTION) {
1952 LogStreamHandle(Info, aot, codecache) log;
1953 if (log.is_enabled()) {
1954 ResourceMark rm;
1955 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
1956 THREAD->pending_exception()->print_value_on(&log);
1957 if (log_is_enabled(Debug, aot, codecache)) {
1958 THREAD->pending_exception()->print_on(&log);
1959 }
1960 }
1961 CLEAR_PENDING_EXCEPTION;
1962 }
1963 }
1964 if (mh->aot_code_entry() != nullptr) {
1965 // Second C2 compilation of the same method could happen for
1966 // different reasons without marking first entry as not entrant.
1967 continue; // Keep old entry to avoid issues
1968 }
1969 mh->set_aot_code_entry(entry);
1970 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
1971 }
1972 }
1973 }
1974
1975 // ------------ process code and data --------------
1976
1977 // Can't use -1. It is valid value for jump to iteself destination
1978 // used by static call stub: see NativeJump::jump_destination().
1979 #define BAD_ADDRESS_ID -2
1980
1981 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
1982 GrowableArray<uint> reloc_data;
1983 RelocIterator iter(&code_blob);
1984 LogStreamHandle(Trace, aot, codecache, reloc) log;
1985 while (iter.next()) {
1986 int idx = reloc_data.append(0); // default value
1987 switch (iter.type()) {
1988 case relocInfo::none:
1989 break;
1990 case relocInfo::oop_type: {
1991 oop_Relocation* r = (oop_Relocation*)iter.reloc();
1992 if (r->oop_is_immediate()) {
1993 assert(oop_list != nullptr, "sanity check");
1994 // store index of oop in the reloc immediate oop list
1995 Handle h(JavaThread::current(), r->oop_value());
1996 int oop_idx = oop_list->find(h);
1997 assert(oop_idx != -1, "sanity check");
1998 reloc_data.at_put(idx, (uint)oop_idx);
1999 }
2000 break;
2001 }
2002 case relocInfo::metadata_type: {
2003 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2004 if (r->metadata_is_immediate()) {
2005 assert(metadata_list != nullptr, "sanity check");
2006 // store index of metadata in the reloc immediate metadata list
2007 int metadata_idx = metadata_list->find(r->metadata_value());
2008 assert(metadata_idx != -1, "sanity check");
2009 reloc_data.at_put(idx, (uint)metadata_idx);
2010 }
2011 break;
2012 }
2013 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2014 case relocInfo::opt_virtual_call_type:
2015 case relocInfo::static_call_type: {
2016 CallRelocation* r = (CallRelocation*)iter.reloc();
2017 address dest = r->destination();
2018 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2019 dest = (address)-1; // do nothing in this case when loading this relocation
2020 }
2021 int id = _table->id_for_address(dest, iter, &code_blob);
2022 if (id == BAD_ADDRESS_ID) {
2023 return false;
2024 }
2025 reloc_data.at_put(idx, id);
2026 break;
2027 }
2028 case relocInfo::trampoline_stub_type: {
2029 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2030 int id = _table->id_for_address(dest, iter, &code_blob);
2031 if (id == BAD_ADDRESS_ID) {
2032 return false;
2033 }
2034 reloc_data.at_put(idx, id);
2035 break;
2036 }
2037 case relocInfo::static_stub_type:
2038 break;
2039 case relocInfo::runtime_call_type: {
2040 // Record offset of runtime destination
2041 CallRelocation* r = (CallRelocation*)iter.reloc();
2042 address dest = r->destination();
2043 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2044 dest = (address)-1; // do nothing in this case when loading this relocation
2045 }
2046 int id = _table->id_for_address(dest, iter, &code_blob);
2047 if (id == BAD_ADDRESS_ID) {
2048 return false;
2049 }
2050 reloc_data.at_put(idx, id);
2051 break;
2052 }
2053 case relocInfo::runtime_call_w_cp_type:
2054 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2055 return false;
2056 case relocInfo::external_word_type: {
2057 // Record offset of runtime target
2058 address target = ((external_word_Relocation*)iter.reloc())->target();
2059 int id = _table->id_for_address(target, iter, &code_blob);
2060 if (id == BAD_ADDRESS_ID) {
2061 return false;
2062 }
2063 reloc_data.at_put(idx, id);
2064 break;
2065 }
2066 case relocInfo::internal_word_type:
2067 break;
2068 case relocInfo::section_word_type:
2069 break;
2070 case relocInfo::poll_type:
2071 break;
2072 case relocInfo::poll_return_type:
2073 break;
2074 case relocInfo::post_call_nop_type:
2075 break;
2076 case relocInfo::entry_guard_type:
2077 break;
2078 default:
2079 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2080 return false;
2081 break;
2082 }
2083 if (log.is_enabled()) {
2084 iter.print_current_on(&log);
2085 }
2086 }
2087
2088 // Write additional relocation data: uint per relocation
2089 // Write the count first
2090 int count = reloc_data.length();
2091 write_bytes(&count, sizeof(int));
2092 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2093 iter != reloc_data.end(); ++iter) {
2094 uint value = *iter;
2095 int n = write_bytes(&value, sizeof(uint));
2096 if (n != sizeof(uint)) {
2097 return false;
2098 }
2099 }
2100 return true;
2101 }
2102
2103 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2104 LogStreamHandle(Trace, aot, reloc) log;
2105 uint offset = read_position();
2106 int count = *(int*)addr(offset);
2107 offset += sizeof(int);
2108 if (log.is_enabled()) {
2109 log.print_cr("======== extra relocations count=%d", count);
2110 }
2111 uint* reloc_data = (uint*)addr(offset);
2112 offset += (count * sizeof(uint));
2113 set_read_position(offset);
2114
2115 RelocIterator iter(code_blob);
2116 int j = 0;
2117 while (iter.next()) {
2118 switch (iter.type()) {
2119 case relocInfo::none:
2120 break;
2121 case relocInfo::oop_type: {
2122 assert(code_blob->is_nmethod(), "sanity check");
2123 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2124 if (r->oop_is_immediate()) {
2125 assert(oop_list != nullptr, "sanity check");
2126 Handle h = oop_list->at(reloc_data[j]);
2127 r->set_value(cast_from_oop<address>(h()));
2128 } else {
2129 r->fix_oop_relocation();
2130 }
2131 break;
2132 }
2133 case relocInfo::metadata_type: {
2134 assert(code_blob->is_nmethod(), "sanity check");
2135 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2136 Metadata* m;
2137 if (r->metadata_is_immediate()) {
2138 assert(metadata_list != nullptr, "sanity check");
2139 m = metadata_list->at(reloc_data[j]);
2140 } else {
2141 // Get already updated value from nmethod.
2142 int index = r->metadata_index();
2143 m = code_blob->as_nmethod()->metadata_at(index);
2144 }
2145 r->set_value((address)m);
2146 break;
2147 }
2148 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2149 case relocInfo::opt_virtual_call_type:
2150 case relocInfo::static_call_type: {
2151 address dest = _cache->address_for_id(reloc_data[j]);
2152 if (dest != (address)-1) {
2153 ((CallRelocation*)iter.reloc())->set_destination(dest);
2154 }
2155 break;
2156 }
2157 case relocInfo::trampoline_stub_type: {
2158 address dest = _cache->address_for_id(reloc_data[j]);
2159 if (dest != (address)-1) {
2160 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2161 }
2162 break;
2163 }
2164 case relocInfo::static_stub_type:
2165 break;
2166 case relocInfo::runtime_call_type: {
2167 address dest = _cache->address_for_id(reloc_data[j]);
2168 if (dest != (address)-1) {
2169 ((CallRelocation*)iter.reloc())->set_destination(dest);
2170 }
2171 break;
2172 }
2173 case relocInfo::runtime_call_w_cp_type:
2174 // this relocation should not be in cache (see write_relocations)
2175 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2176 break;
2177 case relocInfo::external_word_type: {
2178 address target = _cache->address_for_id(reloc_data[j]);
2179 // Add external address to global table
2180 int index = ExternalsRecorder::find_index(target);
2181 // Update index in relocation
2182 Relocation::add_jint(iter.data(), index);
2183 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2184 assert(reloc->target() == target, "sanity");
2185 reloc->set_value(target); // Patch address in the code
2186 break;
2187 }
2188 case relocInfo::internal_word_type: {
2189 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2190 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2191 break;
2192 }
2193 case relocInfo::section_word_type: {
2194 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2195 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2196 break;
2197 }
2198 case relocInfo::poll_type:
2199 break;
2200 case relocInfo::poll_return_type:
2201 break;
2202 case relocInfo::post_call_nop_type:
2203 break;
2204 case relocInfo::entry_guard_type:
2205 break;
2206 default:
2207 assert(false,"relocation %d unimplemented", (int)iter.type());
2208 break;
2209 }
2210 if (log.is_enabled()) {
2211 iter.print_current_on(&log);
2212 }
2213 j++;
2214 }
2215 assert(j == count, "sanity");
2216 }
2217
2218 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2219 int count = oop_list.length();
2220 if (!write_bytes(&count, sizeof(int))) {
2221 return false;
2222 }
2223 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2224 iter != oop_list.end(); ++iter) {
2225 Handle h = *iter;
2226 if (!write_oop(h())) {
2227 return false;
2228 }
2229 }
2230
2231 count = metadata_list.length();
2232 if (!write_bytes(&count, sizeof(int))) {
2233 return false;
2234 }
2235 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2236 iter != metadata_list.end(); ++iter) {
2237 Metadata* m = *iter;
2238 if (!write_metadata(m)) {
2239 return false;
2240 }
2241 }
2242 return true;
2243 }
2244
2245 bool AOTCodeCache::write_metadata(nmethod* nm) {
2246 int count = nm->metadata_count()-1;
2247 if (!write_bytes(&count, sizeof(int))) {
2248 return false;
2249 }
2250 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2251 if (!write_metadata(*p)) {
2252 return false;
2253 }
2254 }
2255 return true;
2256 }
2257
2258 bool AOTCodeCache::write_metadata(Metadata* m) {
2259 uint n = 0;
2260 if (m == nullptr) {
2261 DataKind kind = DataKind::Null;
2262 n = write_bytes(&kind, sizeof(int));
2263 if (n != sizeof(int)) {
2264 return false;
2265 }
2266 } else if (m == (Metadata*)Universe::non_oop_word()) {
2267 DataKind kind = DataKind::No_Data;
2268 n = write_bytes(&kind, sizeof(int));
2269 if (n != sizeof(int)) {
2270 return false;
2271 }
2272 } else if (m->is_klass()) {
2273 if (!write_klass((Klass*)m)) {
2274 return false;
2275 }
2276 } else if (m->is_method()) {
2277 if (!write_method((Method*)m)) {
2278 return false;
2279 }
2280 } else if (m->is_methodCounters()) {
2281 DataKind kind = DataKind::MethodCnts;
2282 n = write_bytes(&kind, sizeof(int));
2283 if (n != sizeof(int)) {
2284 return false;
2285 }
2286 if (!write_method(((MethodCounters*)m)->method())) {
2287 return false;
2288 }
2289 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2290 } else { // Not supported
2291 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2292 return false;
2293 }
2294 return true;
2295 }
2296
2297 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2298 uint code_offset = read_position();
2299 Metadata* m = nullptr;
2300 DataKind kind = *(DataKind*)addr(code_offset);
2301 code_offset += sizeof(DataKind);
2302 set_read_position(code_offset);
2303 if (kind == DataKind::Null) {
2304 m = (Metadata*)nullptr;
2305 } else if (kind == DataKind::No_Data) {
2306 m = (Metadata*)Universe::non_oop_word();
2307 } else if (kind == DataKind::Klass) {
2308 m = (Metadata*)read_klass(comp_method);
2309 } else if (kind == DataKind::Method) {
2310 m = (Metadata*)read_method(comp_method);
2311 } else if (kind == DataKind::MethodCnts) {
2312 kind = *(DataKind*)addr(code_offset);
2313 code_offset += sizeof(DataKind);
2314 set_read_position(code_offset);
2315 m = (Metadata*)read_method(comp_method);
2316 if (m != nullptr) {
2317 Method* method = (Method*)m;
2318 m = method->get_method_counters(Thread::current());
2319 if (m == nullptr) {
2320 set_lookup_failed();
2321 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2322 } else {
2323 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2324 }
2325 }
2326 } else {
2327 set_lookup_failed();
2328 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2329 }
2330 return m;
2331 }
2332
2333 bool AOTCodeCache::write_method(Method* method) {
2334 ResourceMark rm; // To method's name printing
2335 if (AOTCacheAccess::can_generate_aot_code(method)) {
2336 DataKind kind = DataKind::Method;
2337 uint n = write_bytes(&kind, sizeof(int));
2338 if (n != sizeof(int)) {
2339 return false;
2340 }
2341 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2342 n = write_bytes(&method_offset, sizeof(uint));
2343 if (n != sizeof(uint)) {
2344 return false;
2345 }
2346 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2347 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2348 return true;
2349 }
2350 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2351 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2352 set_lookup_failed();
2353 return false;
2354 }
2355
2356 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2357 uint code_offset = read_position();
2358 uint method_offset = *(uint*)addr(code_offset);
2359 code_offset += sizeof(uint);
2360 set_read_position(code_offset);
2361 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2362 if (!AOTMetaspace::in_aot_cache((address)m)) {
2363 // Something changed in CDS
2364 set_lookup_failed();
2365 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2366 return nullptr;
2367 }
2368 assert(m->is_method(), "sanity");
2369 ResourceMark rm;
2370 Klass* k = m->method_holder();
2371 if (!k->is_instance_klass()) {
2372 set_lookup_failed();
2373 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2374 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2375 return nullptr;
2376 } else if (!AOTMetaspace::in_aot_cache((address)k)) {
2377 set_lookup_failed();
2378 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2379 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2380 return nullptr;
2381 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2382 set_lookup_failed();
2383 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2384 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2385 return nullptr;
2386 } else if (!InstanceKlass::cast(k)->is_linked()) {
2387 set_lookup_failed();
2388 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2389 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2390 return nullptr;
2391 }
2392 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2393 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2394 return m;
2395 }
2396
2397 bool AOTCodeCache::write_klass(Klass* klass) {
2398 uint array_dim = 0;
2399 if (klass->is_objArray_klass()) {
2400 array_dim = ObjArrayKlass::cast(klass)->dimension();
2401 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2402 }
2403 uint init_state = 0;
2404 bool can_write = true;
2405 if (klass->is_instance_klass()) {
2406 InstanceKlass* ik = InstanceKlass::cast(klass);
2407 init_state = (ik->is_initialized() ? 1 : 0);
2408 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2409 } else {
2410 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2411 }
2412 ResourceMark rm;
2413 uint state = (array_dim << 1) | (init_state & 1);
2414 if (can_write) {
2415 DataKind kind = DataKind::Klass;
2416 uint n = write_bytes(&kind, sizeof(int));
2417 if (n != sizeof(int)) {
2418 return false;
2419 }
2420 // Record state of instance klass initialization and array dimentions.
2421 n = write_bytes(&state, sizeof(int));
2422 if (n != sizeof(int)) {
2423 return false;
2424 }
2425 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2426 n = write_bytes(&klass_offset, sizeof(uint));
2427 if (n != sizeof(uint)) {
2428 return false;
2429 }
2430 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2431 compile_id(), comp_level(), klass->external_name(),
2432 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2433 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2434 return true;
2435 }
2436 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2437 compile_id(), comp_level(), klass->external_name(),
2438 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2439 (array_dim > 0 ? " (object array)" : ""));
2440 set_lookup_failed();
2441 return false;
2442 }
2443
2444 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2445 uint code_offset = read_position();
2446 uint state = *(uint*)addr(code_offset);
2447 uint init_state = (state & 1);
2448 uint array_dim = (state >> 1);
2449 code_offset += sizeof(int);
2450 uint klass_offset = *(uint*)addr(code_offset);
2451 code_offset += sizeof(uint);
2452 set_read_position(code_offset);
2453 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2454 if (!AOTMetaspace::in_aot_cache((address)k)) {
2455 // Something changed in CDS
2456 set_lookup_failed();
2457 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2458 return nullptr;
2459 }
2460 assert(k->is_klass(), "sanity");
2461 ResourceMark rm;
2462 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2463 set_lookup_failed();
2464 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2465 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2466 return nullptr;
2467 } else
2468 // Allow not initialized klass which was uninitialized during code caching or for preload
2469 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2470 set_lookup_failed();
2471 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2472 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2473 return nullptr;
2474 }
2475 if (array_dim > 0) {
2476 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2477 Klass* ak = k->array_klass_or_null(array_dim);
2478 // FIXME: what would it take to create an array class on the fly?
2479 // Klass* ak = k->array_klass(dim, JavaThread::current());
2480 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2481 if (ak == nullptr) {
2482 set_lookup_failed();
2483 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2484 compile_id(), comp_level(), array_dim, k->external_name());
2485 }
2486 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2487 return ak;
2488 } else {
2489 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2490 compile_id(), comp_level(), k->external_name());
2491 return k;
2492 }
2493 }
2494
2495 bool AOTCodeCache::write_oop(jobject& jo) {
2496 oop obj = JNIHandles::resolve(jo);
2497 return write_oop(obj);
2498 }
2499
2500 bool AOTCodeCache::write_oop(oop obj) {
2501 DataKind kind;
2502 uint n = 0;
2503 if (obj == nullptr) {
2504 kind = DataKind::Null;
2505 n = write_bytes(&kind, sizeof(int));
2506 if (n != sizeof(int)) {
2507 return false;
2508 }
2509 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2510 kind = DataKind::No_Data;
2511 n = write_bytes(&kind, sizeof(int));
2512 if (n != sizeof(int)) {
2513 return false;
2514 }
2515 } else if (java_lang_Class::is_instance(obj)) {
2516 if (java_lang_Class::is_primitive(obj)) {
2517 int bt = (int)java_lang_Class::primitive_type(obj);
2518 kind = DataKind::Primitive;
2519 n = write_bytes(&kind, sizeof(int));
2520 if (n != sizeof(int)) {
2521 return false;
2522 }
2523 n = write_bytes(&bt, sizeof(int));
2524 if (n != sizeof(int)) {
2525 return false;
2526 }
2527 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2528 } else {
2529 Klass* klass = java_lang_Class::as_Klass(obj);
2530 if (!write_klass(klass)) {
2531 return false;
2532 }
2533 }
2534 } else if (java_lang_String::is_instance(obj)) { // herere
2535 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2536 ResourceMark rm;
2537 size_t length_sz = 0;
2538 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2539 if (k >= 0) {
2540 kind = DataKind::String;
2541 n = write_bytes(&kind, sizeof(int));
2542 if (n != sizeof(int)) {
2543 return false;
2544 }
2545 n = write_bytes(&k, sizeof(int));
2546 if (n != sizeof(int)) {
2547 return false;
2548 }
2549 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2550 return true;
2551 }
2552 // Not archived String object - bailout
2553 set_lookup_failed();
2554 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2555 compile_id(), comp_level(), p2i(obj), string);
2556 return false;
2557 } else if (java_lang_Module::is_instance(obj)) {
2558 fatal("Module object unimplemented");
2559 } else if (java_lang_ClassLoader::is_instance(obj)) {
2560 if (obj == SystemDictionary::java_system_loader()) {
2561 kind = DataKind::SysLoader;
2562 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2563 } else if (obj == SystemDictionary::java_platform_loader()) {
2564 kind = DataKind::PlaLoader;
2565 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2566 } else {
2567 ResourceMark rm;
2568 set_lookup_failed();
2569 log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2570 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2571 return false;
2572 }
2573 n = write_bytes(&kind, sizeof(int));
2574 if (n != sizeof(int)) {
2575 return false;
2576 }
2577 } else { // herere
2578 ResourceMark rm;
2579 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2580 if (k >= 0) {
2581 kind = DataKind::MH_Oop;
2582 n = write_bytes(&kind, sizeof(int));
2583 if (n != sizeof(int)) {
2584 return false;
2585 }
2586 n = write_bytes(&k, sizeof(int));
2587 if (n != sizeof(int)) {
2588 return false;
2589 }
2590 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2591 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2592 return true;
2593 }
2594 // Not archived Java object - bailout
2595 set_lookup_failed();
2596 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2597 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2598 return false;
2599 }
2600 return true;
2601 }
2602
2603 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2604 uint code_offset = read_position();
2605 oop obj = nullptr;
2606 DataKind kind = *(DataKind*)addr(code_offset);
2607 code_offset += sizeof(DataKind);
2608 set_read_position(code_offset);
2609 if (kind == DataKind::Null) {
2610 return nullptr;
2611 } else if (kind == DataKind::No_Data) {
2612 return cast_to_oop(Universe::non_oop_word());
2613 } else if (kind == DataKind::Klass) {
2614 Klass* k = read_klass(comp_method);
2615 if (k == nullptr) {
2616 return nullptr;
2617 }
2618 obj = k->java_mirror();
2619 if (obj == nullptr) {
2620 set_lookup_failed();
2621 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2622 return nullptr;
2623 }
2624 } else if (kind == DataKind::Primitive) {
2625 code_offset = read_position();
2626 int t = *(int*)addr(code_offset);
2627 code_offset += sizeof(int);
2628 set_read_position(code_offset);
2629 BasicType bt = (BasicType)t;
2630 obj = java_lang_Class::primitive_mirror(bt);
2631 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2632 } else if (kind == DataKind::String) {
2633 code_offset = read_position();
2634 int k = *(int*)addr(code_offset);
2635 code_offset += sizeof(int);
2636 set_read_position(code_offset);
2637 obj = AOTCacheAccess::get_archived_object(k);
2638 if (obj == nullptr) {
2639 set_lookup_failed();
2640 log_debug(aot, codecache, oops)("Lookup failed for String object");
2641 return nullptr;
2642 }
2643 assert(java_lang_String::is_instance(obj), "must be string");
2644
2645 ResourceMark rm;
2646 size_t length_sz = 0;
2647 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2648 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2649 } else if (kind == DataKind::SysLoader) {
2650 obj = SystemDictionary::java_system_loader();
2651 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2652 } else if (kind == DataKind::PlaLoader) {
2653 obj = SystemDictionary::java_platform_loader();
2654 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2655 } else if (kind == DataKind::MH_Oop) {
2656 code_offset = read_position();
2657 int k = *(int*)addr(code_offset);
2658 code_offset += sizeof(int);
2659 set_read_position(code_offset);
2660 obj = AOTCacheAccess::get_archived_object(k);
2661 if (obj == nullptr) {
2662 set_lookup_failed();
2663 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2664 return nullptr;
2665 }
2666 ResourceMark rm;
2667 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2668 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2669 } else {
2670 set_lookup_failed();
2671 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2672 compile_id(), comp_level(), (int)kind);
2673 return nullptr;
2674 }
2675 return obj;
2676 }
2677
2678 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2679 methodHandle comp_method(JavaThread::current(), target->get_Method());
2680 JavaThread* current = JavaThread::current();
2681 uint offset = read_position();
2682 int count = *(int *)addr(offset);
2683 offset += sizeof(int);
2684 set_read_position(offset);
2685 for (int i = 0; i < count; i++) {
2686 oop obj = read_oop(current, comp_method);
2687 if (lookup_failed()) {
2688 return false;
2689 }
2690 Handle h(thread, obj);
2691 oop_list.append(h);
2692 if (oop_recorder != nullptr) {
2693 jobject jo = JNIHandles::make_local(thread, obj);
2694 if (oop_recorder->is_real(jo)) {
2695 oop_recorder->find_index(jo);
2696 } else {
2697 oop_recorder->allocate_oop_index(jo);
2698 }
2699 }
2700 LogStreamHandle(Debug, aot, codecache, oops) log;
2701 if (log.is_enabled()) {
2702 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2703 if (obj == Universe::non_oop_word()) {
2704 log.print("non-oop word");
2705 } else if (obj == nullptr) {
2706 log.print("nullptr-oop");
2707 } else {
2708 obj->print_value_on(&log);
2709 }
2710 log.cr();
2711 }
2712 }
2713
2714 offset = read_position();
2715 count = *(int *)addr(offset);
2716 offset += sizeof(int);
2717 set_read_position(offset);
2718 for (int i = 0; i < count; i++) {
2719 Metadata* m = read_metadata(comp_method);
2720 if (lookup_failed()) {
2721 return false;
2722 }
2723 metadata_list.append(m);
2724 if (oop_recorder != nullptr) {
2725 if (oop_recorder->is_real(m)) {
2726 oop_recorder->find_index(m);
2727 } else {
2728 oop_recorder->allocate_metadata_index(m);
2729 }
2730 }
2731 LogTarget(Debug, aot, codecache, metadata) log;
2732 if (log.is_enabled()) {
2733 LogStream ls(log);
2734 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2735 if (m == (Metadata*)Universe::non_oop_word()) {
2736 ls.print("non-metadata word");
2737 } else if (m == nullptr) {
2738 ls.print("nullptr-oop");
2739 } else {
2740 Metadata::print_value_on_maybe_null(&ls, m);
2741 }
2742 ls.cr();
2743 }
2744 }
2745 return true;
2746 }
2747
2748 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2749 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2750 int oopmaps_size = oopmaps->nr_of_bytes();
2751 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2752 return false;
2753 }
2754 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2755 if (n != (uint)oopmaps->nr_of_bytes()) {
2756 return false;
2757 }
2758 return true;
2759 }
2760
2761 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2762 uint offset = read_position();
2763 int size = *(int *)addr(offset);
2764 offset += sizeof(int);
2765 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2766 offset += size;
2767 set_read_position(offset);
2768 return oopmaps;
2769 }
2770
2771 bool AOTCodeCache::write_oops(nmethod* nm) {
2772 int count = nm->oops_count()-1;
2773 if (!write_bytes(&count, sizeof(int))) {
2774 return false;
2775 }
2776 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2777 if (!write_oop(*p)) {
2778 return false;
2779 }
2780 }
2781 return true;
2782 }
2783
2784 #ifndef PRODUCT
2785 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2786 // Write asm remarks
2787 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2788 if (count_ptr == nullptr) {
2789 return false;
2790 }
2791 uint count = 0;
2792 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2793 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2794 uint n = write_bytes(&offset, sizeof(uint));
2795 if (n != sizeof(uint)) {
2796 return false;
2797 }
2798 if (use_string_table) {
2799 const char* cstr = add_C_string(str);
2800 int id = _table->id_for_C_string((address)cstr);
2801 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2802 n = write_bytes(&id, sizeof(int));
2803 if (n != sizeof(int)) {
2804 return false;
2805 }
2806 } else {
2807 n = write_bytes(str, (uint)strlen(str) + 1);
2808 if (n != strlen(str) + 1) {
2809 return false;
2810 }
2811 }
2812 count += 1;
2813 return true;
2814 });
2815 *count_ptr = count;
2816 return result;
2817 }
2818
2819 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2820 // Read asm remarks
2821 uint offset = read_position();
2822 uint count = *(uint *)addr(offset);
2823 offset += sizeof(uint);
2824 for (uint i = 0; i < count; i++) {
2825 uint remark_offset = *(uint *)addr(offset);
2826 offset += sizeof(uint);
2827 const char* remark = nullptr;
2828 if (use_string_table) {
2829 int remark_string_id = *(uint *)addr(offset);
2830 offset += sizeof(int);
2831 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2832 } else {
2833 remark = (const char*)addr(offset);
2834 offset += (uint)strlen(remark)+1;
2835 }
2836 asm_remarks.insert(remark_offset, remark);
2837 }
2838 set_read_position(offset);
2839 }
2840
2841 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2842 // Write dbg strings
2843 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2844 if (count_ptr == nullptr) {
2845 return false;
2846 }
2847 uint count = 0;
2848 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2849 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2850 if (use_string_table) {
2851 const char* cstr = add_C_string(str);
2852 int id = _table->id_for_C_string((address)cstr);
2853 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2854 uint n = write_bytes(&id, sizeof(int));
2855 if (n != sizeof(int)) {
2856 return false;
2857 }
2858 } else {
2859 uint n = write_bytes(str, (uint)strlen(str) + 1);
2860 if (n != strlen(str) + 1) {
2861 return false;
2862 }
2863 }
2864 count += 1;
2865 return true;
2866 });
2867 *count_ptr = count;
2868 return result;
2869 }
2870
2871 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2872 // Read dbg strings
2873 uint offset = read_position();
2874 uint count = *(uint *)addr(offset);
2875 offset += sizeof(uint);
2876 for (uint i = 0; i < count; i++) {
2877 const char* str = nullptr;
2878 if (use_string_table) {
2879 int string_id = *(uint *)addr(offset);
2880 offset += sizeof(int);
2881 str = (const char*)_cache->address_for_C_string(string_id);
2882 } else {
2883 str = (const char*)addr(offset);
2884 offset += (uint)strlen(str)+1;
2885 }
2886 dbg_strings.insert(str);
2887 }
2888 set_read_position(offset);
2889 }
2890 #endif // PRODUCT
2891
2892 //======================= AOTCodeAddressTable ===============
2893
2894 // address table ids for generated routines, external addresses and C
2895 // string addresses are partitioned into positive integer ranges
2896 // defined by the following positive base and max values
2897 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2898 // [_stubs_base, _stubs_base + _stubs_max -1],
2899 // ...
2900 // [_c_str_base, _c_str_base + _c_str_max -1],
2901 #define _extrs_max 140
2902 #define _stubs_max 210
2903 #define _shared_blobs_max 25
2904 #define _C1_blobs_max 50
2905 #define _C2_blobs_max 25
2906 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2907 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2908
2909 #define _extrs_base 0
2910 #define _stubs_base (_extrs_base + _extrs_max)
2911 #define _shared_blobs_base (_stubs_base + _stubs_max)
2912 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2913 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2914 #define _blobs_end (_shared_blobs_base + _blobs_max)
2915 #if (_C2_blobs_base >= _all_max)
2916 #error AOTCodeAddressTable ranges need adjusting
2917 #endif
2918
2919 #define SET_ADDRESS(type, addr) \
2920 { \
2921 type##_addr[type##_length++] = (address) (addr); \
2922 assert(type##_length <= type##_max, "increase size"); \
2923 }
2924
2925 static bool initializing_extrs = false;
2926
2927 void AOTCodeAddressTable::init_extrs() {
2928 if (_extrs_complete || initializing_extrs) return; // Done already
2929
2930 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
2931
2932 initializing_extrs = true;
2933 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
2934
2935 _extrs_length = 0;
2936
2937 // Record addresses of VM runtime methods
2938 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
2939 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
2940 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
2941 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
2942 {
2943 // Required by Shared blobs
2944 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
2945 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
2946 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
2947 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
2948 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
2949 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
2950 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
2951 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
2952 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
2953 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
2954 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
2955 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
2956 SET_ADDRESS(_extrs, CompressedOops::base_addr());
2957 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
2958 }
2959 {
2960 // Required by initial stubs
2961 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
2962 #if defined(AMD64)
2963 SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
2964 #endif
2965 }
2966
2967 #ifdef COMPILER1
2968 {
2969 // Required by C1 blobs
2970 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
2971 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
2972 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
2973 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
2974 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
2975 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
2976 SET_ADDRESS(_extrs, Runtime1::new_instance);
2977 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
2978 SET_ADDRESS(_extrs, Runtime1::new_type_array);
2979 SET_ADDRESS(_extrs, Runtime1::new_object_array);
2980 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
2981 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
2982 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
2983 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
2984 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
2985 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
2986 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
2987 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
2988 SET_ADDRESS(_extrs, Runtime1::monitorenter);
2989 SET_ADDRESS(_extrs, Runtime1::monitorexit);
2990 SET_ADDRESS(_extrs, Runtime1::deoptimize);
2991 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
2992 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
2993 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
2994 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
2995 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
2996 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
2997 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
2998 #ifdef X86
2999 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3000 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3001 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3002 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3003 #endif
3004 #ifndef PRODUCT
3005 SET_ADDRESS(_extrs, os::breakpoint);
3006 #endif
3007 }
3008 #endif // COMPILER1
3009
3010 #ifdef COMPILER2
3011 {
3012 // Required by C2 blobs
3013 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3014 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3015 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3016 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3017 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3018 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3019 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3020 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3021 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3022 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3023 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3024 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3025 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3026 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3027 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3028 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3029 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3030 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
3031 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
3032 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
3033 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
3034 #if defined(AMD64)
3035 // Use by C2 intinsic
3036 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3037 #endif
3038 }
3039 #endif // COMPILER2
3040 #if INCLUDE_G1GC
3041 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3042 #endif
3043
3044 #if INCLUDE_SHENANDOAHGC
3045 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3046 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3047 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3048 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3049 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3050 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3051 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3052 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3053 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3054 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
3055 #endif
3056
3057 #if INCLUDE_ZGC
3058 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
3059 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3060 #if defined(AMD64)
3061 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3062 #endif
3063 #if defined(AARCH64)
3064 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
3065 SET_ADDRESS(_extrs, bs_asm->patching_epoch_addr());
3066 #endif
3067 #endif // INCLUDE_ZGC
3068
3069 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3070 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3071 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3072
3073 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3074 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3075 #if defined(AMD64) && !defined(ZERO)
3076 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3077 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3078 #endif // AMD64
3079 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3080 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3081 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3082 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3083 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3084 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3085 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3086 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3087 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3088 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3089 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3090 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3091 #ifndef ZERO
3092 SET_ADDRESS(_extrs, SharedRuntime::drem);
3093 SET_ADDRESS(_extrs, SharedRuntime::frem);
3094 #endif
3095 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3096 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3097 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3098 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3099 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3100
3101 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3102 SET_ADDRESS(_extrs, Thread::current);
3103
3104 SET_ADDRESS(_extrs, os::javaTimeMillis);
3105 SET_ADDRESS(_extrs, os::javaTimeNanos);
3106 // For JFR
3107 SET_ADDRESS(_extrs, os::elapsed_counter);
3108 #if defined(X86) && !defined(ZERO)
3109 SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3110 #endif
3111
3112 #if INCLUDE_JVMTI
3113 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3114 #endif /* INCLUDE_JVMTI */
3115 SET_ADDRESS(_extrs, MountUnmountDisabler::notify_jvmti_events_address());
3116 SET_ADDRESS(_extrs, MountUnmountDisabler::global_vthread_transition_disable_count_address());
3117
3118 #ifndef PRODUCT
3119 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3120 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3121 #endif
3122
3123 #ifndef ZERO
3124 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3125 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3126 #endif
3127 #if defined(AARCH64)
3128 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3129 #endif
3130 #endif // ZERO
3131
3132 // addresses of fields in AOT runtime constants area
3133 address* p = AOTRuntimeConstants::field_addresses_list();
3134 while (*p != nullptr) {
3135 SET_ADDRESS(_extrs, *p++);
3136 }
3137
3138 _extrs_complete = true;
3139 log_info(aot, codecache, init)("External addresses recorded");
3140 }
3141
3142 static bool initializing_early_stubs = false;
3143
3144 void AOTCodeAddressTable::init_early_stubs() {
3145 if (_complete || initializing_early_stubs) return; // Done already
3146 initializing_early_stubs = true;
3147 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3148 _stubs_length = 0;
3149 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3150
3151 {
3152 // Required by C1 blobs
3153 #if defined(AMD64) && !defined(ZERO)
3154 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3155 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3156 #endif // AMD64
3157 }
3158
3159 _early_stubs_complete = true;
3160 log_info(aot, codecache, init)("Early stubs recorded");
3161 }
3162
3163 static bool initializing_shared_blobs = false;
3164
3165 void AOTCodeAddressTable::init_shared_blobs() {
3166 if (_complete || initializing_shared_blobs) return; // Done already
3167 initializing_shared_blobs = true;
3168 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3169
3170 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3171 _shared_blobs_addr = blobs_addr;
3172 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3173 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3174
3175 _shared_blobs_length = 0;
3176 _C1_blobs_length = 0;
3177 _C2_blobs_length = 0;
3178
3179 // clear the address table
3180 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3181
3182 // Record addresses of generated code blobs
3183 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3184 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3185 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3186 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3187 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3188 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3189 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3190 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3191 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3192 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3193 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3194 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3195 #ifdef COMPILER2
3196 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3197 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3198 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3199 }
3200 #endif
3201 #if INCLUDE_JVMCI
3202 if (EnableJVMCI) {
3203 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3204 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3205 }
3206 #endif
3207 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3208 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3209 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3210 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3211 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3212
3213 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3214 _shared_blobs_complete = true;
3215 log_info(aot, codecache, init)("All shared blobs recorded");
3216 }
3217
3218 static bool initializing_stubs = false;
3219 void AOTCodeAddressTable::init_stubs() {
3220 if (_complete || initializing_stubs) return; // Done already
3221 assert(_early_stubs_complete, "early stubs whould be initialized");
3222 initializing_stubs = true;
3223
3224 // Stubs
3225 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3226 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3227 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3228 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3229 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3230 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3231
3232 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3233 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3234 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3235
3236 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3237
3238 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3239 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3240 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3241 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3242 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3243 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3244
3245 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3246 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3247 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3248 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3249 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3250 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3251
3252 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3253 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3254 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3255 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3256 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3257 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3258
3259 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3260 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3261 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3262 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3263 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3264 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3265
3266 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3267 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3268
3269 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3270 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3271
3272 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3273 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3274 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3275 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3276 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3277 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3278
3279 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3280 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3281
3282 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3283 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3284 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3285 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3286 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3287 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3288 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3289 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3290 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3291 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3292 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3293 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3294 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3295 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3296 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3297 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3298 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3299 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3300 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3301 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3302 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3303 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3304 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3305 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3306 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3307 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3308 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3309 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3310 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3311 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3312
3313 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3314 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3315 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3316
3317 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3318 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3319 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3320 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3321 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3322 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3323 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3324 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3325
3326 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3327
3328 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3329
3330 SET_ADDRESS(_stubs, StubRoutines::dexp());
3331 SET_ADDRESS(_stubs, StubRoutines::dlog());
3332 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3333 SET_ADDRESS(_stubs, StubRoutines::dpow());
3334 SET_ADDRESS(_stubs, StubRoutines::dsin());
3335 SET_ADDRESS(_stubs, StubRoutines::dcos());
3336 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3337 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3338 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3339 SET_ADDRESS(_stubs, StubRoutines::dtan());
3340
3341 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3342 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3343
3344 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3345 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3346 }
3347 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3348
3349 #if defined(AMD64) && !defined(ZERO)
3350 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3351 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3352 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3353 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3354 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3355 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3356 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3357 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3358 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3359 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3360 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3361 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3362 SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3363 SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3364 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3365 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3366 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3367 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3368 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3369 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3370 // See C2_MacroAssembler::load_iota_indices().
3371 for (int i = 0; i < 6; i++) {
3372 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3373 }
3374 #endif
3375 #if defined(AARCH64) && !defined(ZERO)
3376 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3377 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3378 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3379 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3380 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3381 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3382 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3383 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3384 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3385 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3386 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3387 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3388 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3389
3390 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3391 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3392 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3393 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3394 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3395 #endif
3396
3397 _complete = true;
3398 log_info(aot, codecache, init)("Stubs recorded");
3399 }
3400
3401 void AOTCodeAddressTable::init_early_c1() {
3402 #ifdef COMPILER1
3403 // Runtime1 Blobs
3404 StubId id = StubInfo::stub_base(StubGroup::C1);
3405 // include forward_exception in range we publish
3406 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
3407 for (; id != limit; id = StubInfo::next(id)) {
3408 if (Runtime1::blob_for(id) == nullptr) {
3409 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3410 continue;
3411 }
3412 if (Runtime1::entry_for(id) == nullptr) {
3413 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3414 continue;
3415 }
3416 address entry = Runtime1::entry_for(id);
3417 SET_ADDRESS(_C1_blobs, entry);
3418 }
3419 #endif // COMPILER1
3420 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3421 _early_c1_complete = true;
3422 }
3423
3424 void AOTCodeAddressTable::init_c1() {
3425 #ifdef COMPILER1
3426 // Runtime1 Blobs
3427 assert(_early_c1_complete, "early C1 blobs should be initialized");
3428 StubId id = StubInfo::next(StubId::c1_forward_exception_id);
3429 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
3430 for (; id != limit; id = StubInfo::next(id)) {
3431 if (Runtime1::blob_for(id) == nullptr) {
3432 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3433 continue;
3434 }
3435 if (Runtime1::entry_for(id) == nullptr) {
3436 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3437 continue;
3438 }
3439 address entry = Runtime1::entry_for(id);
3440 SET_ADDRESS(_C1_blobs, entry);
3441 }
3442 #if INCLUDE_G1GC
3443 if (UseG1GC) {
3444 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3445 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3446 SET_ADDRESS(_C1_blobs, entry);
3447 }
3448 #endif // INCLUDE_G1GC
3449 #if INCLUDE_ZGC
3450 if (UseZGC) {
3451 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3452 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3453 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3454 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3455 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3456 }
3457 #endif // INCLUDE_ZGC
3458 #if INCLUDE_SHENANDOAHGC
3459 if (UseShenandoahGC) {
3460 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3461 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3462 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3463 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3464 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3465 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3466 }
3467 #endif // INCLUDE_SHENANDOAHGC
3468 #endif // COMPILER1
3469
3470 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3471 _c1_complete = true;
3472 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3473 }
3474
3475 void AOTCodeAddressTable::init_c2() {
3476 #ifdef COMPILER2
3477 // OptoRuntime Blobs
3478 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3479 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3480 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3481 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3482 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3483 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3484 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3485 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3486 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3487 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3488 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3489 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3490 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3491 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3492 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3493 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3494 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3495 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3496 #if INCLUDE_JVMTI
3497 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_first_transition_Java());
3498 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_final_transition_Java());
3499 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_transition_Java());
3500 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_transition_Java());
3501 #endif /* INCLUDE_JVMTI */
3502 #endif
3503
3504 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3505 _c2_complete = true;
3506 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3507 }
3508 #undef SET_ADDRESS
3509
3510 AOTCodeAddressTable::~AOTCodeAddressTable() {
3511 if (_extrs_addr != nullptr) {
3512 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3513 }
3514 if (_stubs_addr != nullptr) {
3515 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3516 }
3517 if (_shared_blobs_addr != nullptr) {
3518 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3519 }
3520 }
3521
3522 #ifdef PRODUCT
3523 #define MAX_STR_COUNT 200
3524 #else
3525 #define MAX_STR_COUNT 500
3526 #endif
3527 #define _c_str_max MAX_STR_COUNT
3528 static const int _c_str_base = _all_max;
3529
3530 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3531 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3532 static int _C_strings_count = 0;
3533 static int _C_strings_s[MAX_STR_COUNT] = {0};
3534 static int _C_strings_id[MAX_STR_COUNT] = {0};
3535 static int _C_strings_used = 0;
3536
3537 void AOTCodeCache::load_strings() {
3538 uint strings_count = _load_header->strings_count();
3539 if (strings_count == 0) {
3540 return;
3541 }
3542 uint strings_offset = _load_header->strings_offset();
3543 uint* string_lengths = (uint*)addr(strings_offset);
3544 strings_offset += (strings_count * sizeof(uint));
3545 uint strings_size = _load_header->search_table_offset() - strings_offset;
3546 // We have to keep cached strings longer than _cache buffer
3547 // because they are refernced from compiled code which may
3548 // still be executed on VM exit after _cache is freed.
3549 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3550 memcpy(p, addr(strings_offset), strings_size);
3551 _C_strings_buf = p;
3552 assert(strings_count <= MAX_STR_COUNT, "sanity");
3553 for (uint i = 0; i < strings_count; i++) {
3554 _C_strings[i] = p;
3555 uint len = string_lengths[i];
3556 _C_strings_s[i] = i;
3557 _C_strings_id[i] = i;
3558 p += len;
3559 }
3560 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3561 _C_strings_count = strings_count;
3562 _C_strings_used = strings_count;
3563 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3564 }
3565
3667 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3668 if (idx == -1) {
3669 return (address)-1;
3670 }
3671 uint id = (uint)idx;
3672 // special case for symbols based relative to os::init
3673 if (id > (_c_str_base + _c_str_max)) {
3674 return (address)os::init + idx;
3675 }
3676 if (idx < 0) {
3677 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3678 return nullptr;
3679 }
3680 // no need to compare unsigned id against 0
3681 if (/* id >= _extrs_base && */ id < _extrs_length) {
3682 return _extrs_addr[id - _extrs_base];
3683 }
3684 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3685 return _stubs_addr[id - _stubs_base];
3686 }
3687 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3688 return _stubs_addr[id - _stubs_base];
3689 }
3690 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3691 return _shared_blobs_addr[id - _shared_blobs_base];
3692 }
3693 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3694 return _C1_blobs_addr[id - _C1_blobs_base];
3695 }
3696 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3697 return _C1_blobs_addr[id - _C1_blobs_base];
3698 }
3699 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3700 return _C2_blobs_addr[id - _C2_blobs_base];
3701 }
3702 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3703 return address_for_C_string(id - _c_str_base);
3704 }
3705 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3706 return nullptr;
3707 }
3708
3709 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3710 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3711 int id = -1;
3712 if (addr == (address)-1) { // Static call stub has jump to itself
3713 return id;
3714 }
3715 // Check card_table_base address first since it can point to any address
3716 BarrierSet* bs = BarrierSet::barrier_set();
3717 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
3718 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
3719
3720 // Seach for C string
3721 id = id_for_C_string(addr);
3722 if (id >= 0) {
3723 return id + _c_str_base;
3724 }
3725 if (StubRoutines::contains(addr)) {
3726 // Search in stubs
3727 id = search_address(addr, _stubs_addr, _stubs_length);
3728 if (id == BAD_ADDRESS_ID) {
3729 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3730 if (desc == nullptr) {
3731 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3732 }
3733 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3734 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3735 } else {
3736 return _stubs_base + id;
3737 }
3738 } else {
3739 CodeBlob* cb = CodeCache::find_blob(addr);
3740 if (cb != nullptr) {
3741 int id_base = _shared_blobs_base;
3742 // Search in code blobs
3743 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3744 if (id == BAD_ADDRESS_ID) {
3745 id_base = _C1_blobs_base;
3746 // search C1 blobs
3747 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3748 }
3749 if (id == BAD_ADDRESS_ID) {
3750 id_base = _C2_blobs_base;
3751 // search C2 blobs
3752 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3753 }
3754 if (id == BAD_ADDRESS_ID) {
3755 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3756 } else {
3757 return id_base + id;
3758 }
3759 } else {
3760 // Search in runtime functions
3761 id = search_address(addr, _extrs_addr, _extrs_length);
3762 if (id == BAD_ADDRESS_ID) {
3763 ResourceMark rm;
3764 const int buflen = 1024;
3765 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3766 int offset = 0;
3767 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3768 if (offset > 0) {
3769 // Could be address of C string
3770 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3771 CompileTask* task = ciEnv::current()->task();
3772 uint compile_id = 0;
3773 uint comp_level =0;
3774 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3775 compile_id = task->compile_id();
3776 comp_level = task->comp_level();
3777 }
3778 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3779 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3780 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3781 return dist;
3782 }
3783 reloc.print_current_on(tty);
3784 blob->print_on(tty);
3785 blob->print_code_on(tty);
3786 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3787 } else {
3788 reloc.print_current_on(tty);
3789 blob->print_on(tty);
3790 blob->print_code_on(tty);
3791 os::find(addr, tty);
3792 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3793 }
3794 } else {
3795 return _extrs_base + id;
3796 }
3797 }
3798 }
3799 return id;
3800 }
3801
3802 #undef _extrs_max
3803 #undef _stubs_max
3804 #undef _shared_blobs_max
3805 #undef _C1_blobs_max
3806 #undef _C2_blobs_max
3807 #undef _blobs_max
3808 #undef _extrs_base
3809 #undef _stubs_base
3810 #undef _shared_blobs_base
3811 #undef _C1_blobs_base
3812 #undef _C2_blobs_base
3813 #undef _blobs_end
3814
3815 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3816
3817 void AOTRuntimeConstants::initialize_from_runtime() {
3818 BarrierSet* bs = BarrierSet::barrier_set();
3819 address card_table_base = nullptr;
3820 uint grain_shift = 0;
3821 #if INCLUDE_G1GC
3822 if (bs->is_a(BarrierSet::G1BarrierSet)) {
3823 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
3824 } else
3825 #endif
3826 #if INCLUDE_SHENANDOAHGC
3827 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
3828 grain_shift = 0;
3829 } else
3830 #endif
3831 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3832 CardTable::CardValue* base = ci_card_table_address_const();
3833 assert(base != nullptr, "unexpected byte_map_base");
3834 card_table_base = base;
3835 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
3836 grain_shift = ctbs->grain_shift();
3837 }
3838 _aot_runtime_constants._card_table_address = card_table_base;
3839 _aot_runtime_constants._grain_shift = grain_shift;
3840 }
3841
3842 address AOTRuntimeConstants::_field_addresses_list[] = {
3843 ((address)&_aot_runtime_constants._card_table_address),
3844 ((address)&_aot_runtime_constants._grain_shift),
3845 nullptr
3846 };
3847
3848 address AOTRuntimeConstants::card_table_address() {
3849 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
3850 return (address)&_aot_runtime_constants._card_table_address;
3851 }
3852
3853 void AOTCodeCache::wait_for_no_nmethod_readers() {
3854 while (true) {
3855 int cur = AtomicAccess::load(&_nmethod_readers);
3856 int upd = -(cur + 1);
3857 if (cur >= 0 && AtomicAccess::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3858 // Success, no new readers should appear.
3859 break;
3860 }
3861 }
3862
3863 // Now wait for all readers to leave.
3864 SpinYield w;
3865 while (AtomicAccess::load(&_nmethod_readers) != -1) {
3866 w.wait();
3867 }
3868 }
3869
3870 AOTCodeCache::ReadingMark::ReadingMark() {
3871 while (true) {
3872 int cur = AtomicAccess::load(&_nmethod_readers);
3873 if (cur < 0) {
3874 // Cache is already closed, cannot proceed.
3875 _failed = true;
3876 return;
3877 }
3878 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3879 // Successfully recorded ourselves as entered.
3880 _failed = false;
3881 return;
3882 }
3883 }
3884 }
3885
3886 AOTCodeCache::ReadingMark::~ReadingMark() {
3887 if (_failed) {
3888 return;
3889 }
3890 while (true) {
3891 int cur = AtomicAccess::load(&_nmethod_readers);
3892 if (cur > 0) {
3893 // Cache is open, we are counting down towards 0.
3894 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3895 return;
3896 }
3897 } else {
3898 // Cache is closed, we are counting up towards -1.
3899 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3900 return;
3901 }
3902 }
3903 }
3904 }
3905
3906 void AOTCodeCache::print_timers_on(outputStream* st) {
3907 if (is_using_code()) {
3908 st->print_cr (" AOT Code Preload Time: %7.3f s", _t_totalPreload.seconds());
3909 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3910 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
3911 st->print_cr (" find AOT code entry: %7.3f s", _t_totalFind.seconds());
3912 }
3913 if (is_dumping_code()) {
3914 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
3915 }
3916 }
3917
3918 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
3919 AOTCodeStats result;
3920 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3921 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
3922 }
3923
3924 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3925 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
3926 }
3927 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
3928 return result;
3929 }
3930
3931 void AOTCodeCache::log_stats_on_exit(AOTCodeStats& stats) {
3932 LogStreamHandle(Debug, aot, codecache, exit) log;
3933 if (log.is_enabled()) {
3934 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3935 log.print_cr(" %s: total=%u", aot_code_entry_kind_name[kind], stats.entry_count(kind));
3936 if (kind == AOTCodeEntry::Nmethod) {
3937 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3938 log.print_cr(" Tier %d: total=%u", lvl, stats.nmethod_count(lvl));
3939 }
3940 }
3941 }
3942 }
3943 }
3944
3945 static void print_helper1(outputStream* st, const char* name, int count) {
3946 if (count > 0) {
3947 st->print(" %s=%d", name, count);
3948 }
3949 }
3950
3951 void AOTCodeCache::print_statistics_on(outputStream* st) {
3952 AOTCodeCache* cache = open_for_use();
3953 if (cache != nullptr) {
3954 ReadingMark rdmk;
3955 if (rdmk.failed()) {
3956 // Cache is closed, cannot touch anything.
3957 return;
3958 }
3959 AOTCodeStats stats;
3960
3961 uint preload_count = cache->_load_header->preload_entries_count();
3962 AOTCodeEntry* preload_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->preload_entries_offset());
3963 for (uint i = 0; i < preload_count; i++) {
3964 stats.collect_all_stats(&preload_entries[i]);
3965 }
3966
3967 uint count = cache->_load_header->entries_count();
3968 AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
3969 for (uint i = 0; i < count; i++) {
3970 stats.collect_all_stats(&load_entries[i]);
3971 }
3972
3973 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3974 if (stats.entry_count(kind) > 0) {
3975 st->print(" %s:", aot_code_entry_kind_name[kind]);
3976 print_helper1(st, "total", stats.entry_count(kind));
3977 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
3978 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
3979 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
3980 st->cr();
3981 }
3982 if (kind == AOTCodeEntry::Nmethod) {
3983 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3984 if (stats.nmethod_count(lvl) > 0) {
3985 st->print(" AOT Code T%d", lvl);
3986 print_helper1(st, "total", stats.nmethod_count(lvl));
3987 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
3988 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
3989 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
3990 if (lvl == AOTCompLevel_count-1) {
3991 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
3992 }
3993 st->cr();
3994 }
3995 }
3996 }
3997 }
3998 LogStreamHandle(Debug, aot, codecache, init) log;
3999 if (log.is_enabled()) {
4000 AOTCodeCache::print_unused_entries_on(&log);
4001 }
4002 LogStreamHandle(Trace, aot, codecache) aot_info;
4003 // need a lock to traverse the code cache
4004 if (aot_info.is_enabled()) {
4005 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4006 NMethodIterator iter(NMethodIterator::all);
4007 while (iter.next()) {
4008 nmethod* nm = iter.method();
4009 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4010 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4011 (nm->method()->in_aot_cache() ? 'S' : ' '),
4012 (nm->is_aot() ? 'A' : ' '),
4013 (nm->preloaded() ? 'P' : ' '),
4014 nm->comp_level());
4015 print_helper(nm, &aot_info);
4016 aot_info.print(": ");
4017 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4018 LogStreamHandle(Trace, aot, codecache) aot_debug;
4019 if (aot_debug.is_enabled()) {
4020 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4021 if (mtd != nullptr) {
4022 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4023 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4024 });
4025 }
4026 }
4027 }
4028 }
4029 }
4030 }
4031 }
4032
4033 void AOTCodeEntry::print(outputStream* st) const {
4034 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4035 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4036 (_not_entrant? "not_entrant" : "entrant"),
4037 (_loaded ? ", loaded" : ""),
4038 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4039 (_for_preload ? ", for_preload" : ""));
4040 }
4041
4042 // This is called after initialize() but before init2()
4043 // and _cache is not set yet.
4044 void AOTCodeCache::print_on(outputStream* st) {
4045 if (opened_cache != nullptr && opened_cache->for_use()) {
4046 ReadingMark rdmk;
4047 if (rdmk.failed()) {
4048 // Cache is closed, cannot touch anything.
4049 return;
4050 }
4051
4052 st->print_cr("\nAOT Code Cache Preload entries");
4053
4054 uint preload_count = opened_cache->_load_header->preload_entries_count();
4055 AOTCodeEntry* preload_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->preload_entries_offset());
4056 for (uint i = 0; i < preload_count; i++) {
4057 AOTCodeEntry* entry = &preload_entries[i];
4058
4059 uint entry_position = entry->offset();
4060 uint name_offset = entry->name_offset() + entry_position;
4061 const char* saved_name = opened_cache->addr(name_offset);
4062
4063 st->print_cr("%4u: %10s Id:%u L%u size=%u '%s' %s%s%s",
4064 i, aot_code_entry_kind_name[entry->kind()], entry->id(), entry->comp_level(),
4065 entry->size(), saved_name,
4066 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4067 entry->is_loaded() ? " loaded" : "",
4068 entry->not_entrant() ? " not_entrant" : "");
4069
4070 st->print_raw(" ");
4071 AOTCodeReader reader(opened_cache, entry, nullptr);
4072 reader.print_on(st);
4073 }
4074
4075 st->print_cr("\nAOT Code Cache entries");
4076
4077 uint count = opened_cache->_load_header->entries_count();
4078 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4079 AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4080
4081 for (uint i = 0; i < count; i++) {
4082 int index = search_entries[2*i + 1];
4083 AOTCodeEntry* entry = &(load_entries[index]);
4084
4085 uint entry_position = entry->offset();
4086 uint name_offset = entry->name_offset() + entry_position;
4087 const char* saved_name = opened_cache->addr(name_offset);
4088
4089 st->print_cr("%4u: %10s idx:%4u Id:%u L%u size=%u '%s' %s%s%s%s",
4090 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4091 entry->size(), saved_name,
4092 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4093 entry->for_preload() ? " for_preload" : "",
4094 entry->is_loaded() ? " loaded" : "",
4095 entry->not_entrant() ? " not_entrant" : "");
4096
4097 st->print_raw(" ");
4098 AOTCodeReader reader(opened_cache, entry, nullptr);
4099 reader.print_on(st);
4100 }
4101 }
4102 }
4103
4104 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4105 LogStreamHandle(Info, aot, codecache, init) info;
4106 if (info.is_enabled()) {
4107 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4108 if (entry->is_nmethod() && !entry->is_loaded()) {
4109 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4110 if (mtd != nullptr) {
4111 if (mtd->has_holder()) {
4112 if (mtd->holder()->method_holder()->is_initialized()) {
4113 ResourceMark rm;
4114 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4115 if ((uint)ctd->level() == entry->comp_level()) {
4116 if (ctd->init_deps_left_acquire() == 0) {
4117 nmethod* nm = mtd->holder()->code();
4118 if (nm == nullptr) {
4119 if (mtd->holder()->queued_for_compilation()) {
4120 return; // scheduled for compilation
4121 }
4122 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4123 return; // already online compiled and superseded by a more optimal method
4124 }
4125 info.print("AOT Code Cache entry not loaded: ");
4126 ctd->print_on(&info);
4127 info.cr();
4128 }
4129 }
4130 });
4131 } else {
4132 // not yet initialized
4133 }
4134 } else {
4135 info.print("AOT Code Cache entry doesn't have a holder: ");
4136 mtd->print_on(&info);
4137 info.cr();
4138 }
4139 }
4140 }
4141 });
4142 }
4143 }
4144
4145 void AOTCodeReader::print_on(outputStream* st) {
4146 uint entry_position = _entry->offset();
4147 set_read_position(entry_position);
4148
4149 // Read name
4150 uint name_offset = entry_position + _entry->name_offset();
4151 uint name_size = _entry->name_size(); // Includes '/0'
4152 const char* name = addr(name_offset);
4153
4154 st->print_cr(" name: %s", name);
4155 }
4156
|