1 /*
2 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "classfile/javaAssertions.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "code/codeCache.hpp"
35 #include "gc/shared/gcConfig.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/memoryReserver.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/flags/flagSetting.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "utilities/copy.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Runtime1.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
53 #if INCLUDE_G1GC
54 #include "gc/g1/g1BarrierSetRuntime.hpp"
55 #endif
56 #if INCLUDE_SHENANDOAHGC
57 #include "gc/shenandoah/shenandoahRuntime.hpp"
58 #endif
59 #if INCLUDE_ZGC
60 #include "gc/z/zBarrierSetRuntime.hpp"
61 #endif
62
63 #include <sys/stat.h>
64 #include <errno.h>
65
66 const char* aot_code_entry_kind_name[] = {
67 #define DECL_KIND_STRING(kind) XSTR(kind),
68 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
69 #undef DECL_KIND_STRING
70 };
71
72 static void report_load_failure() {
73 if (AbortVMOnAOTCodeFailure) {
74 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
75 }
76 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
77 AOTAdapterCaching = false;
78 AOTStubCaching = false;
79 }
80
81 static void report_store_failure() {
82 if (AbortVMOnAOTCodeFailure) {
83 tty->print_cr("Unable to create AOT Code Cache.");
84 vm_abort(false);
85 }
86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
87 AOTAdapterCaching = false;
88 AOTStubCaching = false;
89 }
90
91 bool AOTCodeCache::is_dumping_adapter() {
92 return AOTAdapterCaching && is_on_for_dump();
93 }
94
95 bool AOTCodeCache::is_using_adapter() {
96 return AOTAdapterCaching && is_on_for_use();
97 }
98
99 bool AOTCodeCache::is_dumping_stub() {
100 return AOTStubCaching && is_on_for_dump();
101 }
102
103 bool AOTCodeCache::is_using_stub() {
104 return AOTStubCaching && is_on_for_use();
105 }
106
107 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
108 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
109 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
110 // becasue both id and kind are used to find an entry, and that combination should be unique
111 if (kind == AOTCodeEntry::Adapter) {
112 return id;
113 } else if (kind == AOTCodeEntry::SharedBlob) {
114 return id;
115 } else if (kind == AOTCodeEntry::C1Blob) {
116 return (int)SharedStubId::NUM_STUBIDS + id;
117 } else {
118 // kind must be AOTCodeEntry::C2Blob
119 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
120 }
121 }
122
123 static uint _max_aot_code_size = 0;
124 uint AOTCodeCache::max_aot_code_size() {
125 return _max_aot_code_size;
126 }
127
128 void AOTCodeCache::initialize() {
129 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
130 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
131 AOTAdapterCaching = false;
132 AOTStubCaching = false;
133 return;
134 #else
135 if (FLAG_IS_DEFAULT(AOTCache)) {
136 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
137 AOTAdapterCaching = false;
138 AOTStubCaching = false;
139 return; // AOTCache must be specified to dump and use AOT code
140 }
141
142 // Disable stubs caching until JDK-8357398 is fixed.
143 FLAG_SET_ERGO(AOTStubCaching, false);
144
145 if (VerifyOops) {
146 // Disable AOT stubs caching when VerifyOops flag is on.
147 // Verify oops code generated a lot of C strings which overflow
148 // AOT C string table (which has fixed size).
149 // AOT C string table will be reworked later to handle such cases.
150 //
151 // Note: AOT adapters are not affected - they don't have oop operations.
152 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
153 FLAG_SET_ERGO(AOTStubCaching, false);
154 }
155
156 bool is_dumping = false;
157 bool is_using = false;
158 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
159 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
160 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
161 is_dumping = true;
162 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
163 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
164 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
165 is_using = true;
166 } else {
167 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
168 return; // nothing to do
169 }
170 if (!AOTAdapterCaching && !AOTStubCaching) {
171 return; // AOT code caching disabled on command line
172 }
173 _max_aot_code_size = AOTCodeMaxSize;
174 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
175 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
176 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
177 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
178 }
179 }
180 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
181 if (is_using && aot_code_size == 0) {
182 log_info(aot, codecache, init)("AOT Code Cache is empty");
183 return;
184 }
185 if (!open_cache(is_dumping, is_using)) {
186 if (is_using) {
187 report_load_failure();
188 } else {
189 report_store_failure();
190 }
191 return;
192 }
193 if (is_dumping) {
194 FLAG_SET_DEFAULT(ForceUnreachable, true);
195 }
196 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
197 #endif // defined(AMD64) || defined(AARCH64)
198 }
199
200 void AOTCodeCache::init2() {
201 if (!is_on()) {
202 return;
203 }
204 if (!verify_vm_config()) {
205 close();
206 report_load_failure();
207 }
208
209 // initialize the table of external routines so we can save
210 // generated code blobs that reference them
211 init_extrs_table();
212 init_early_stubs_table();
213 }
214
215 AOTCodeCache* AOTCodeCache::_cache = nullptr;
216
217 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
218 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
219 if (cache->failed()) {
220 delete cache;
221 _cache = nullptr;
222 return false;
223 }
224 _cache = cache;
225 return true;
226 }
227
228 void AOTCodeCache::close() {
229 if (is_on()) {
230 delete _cache; // Free memory
231 _cache = nullptr;
232 }
233 }
234
235 #define DATA_ALIGNMENT HeapWordSize
236
237 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
238 _load_header(nullptr),
239 _load_buffer(nullptr),
240 _store_buffer(nullptr),
241 _C_store_buffer(nullptr),
242 _write_position(0),
243 _load_size(0),
244 _store_size(0),
245 _for_use(is_using),
246 _for_dump(is_dumping),
247 _closing(false),
248 _failed(false),
249 _lookup_failed(false),
250 _table(nullptr),
251 _load_entries(nullptr),
252 _search_entries(nullptr),
253 _store_entries(nullptr),
254 _C_strings_buf(nullptr),
255 _store_entries_cnt(0)
256 {
257 // Read header at the begining of cache
258 if (_for_use) {
259 // Read cache
260 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
261 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
262 if (!rs.is_reserved()) {
263 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
264 set_failed();
265 return;
266 }
267 if (!AOTCacheAccess::map_aot_code_region(rs)) {
268 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
269 set_failed();
270 return;
271 }
272
273 _load_size = (uint)load_size;
274 _load_buffer = (char*)rs.base();
275 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
276 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
277
278 _load_header = (Header*)addr(0);
279 if (!_load_header->verify_config(_load_size)) {
280 set_failed();
281 return;
282 }
283 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
284 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
285 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
286 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
287 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
288 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
289
290 // Read strings
291 load_strings();
292 }
293 if (_for_dump) {
294 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
295 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
296 // Entries allocated at the end of buffer in reverse (as on stack).
297 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
298 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
299 }
300 _table = new AOTCodeAddressTable();
301 }
302
303 void AOTCodeCache::init_extrs_table() {
304 AOTCodeAddressTable* table = addr_table();
305 if (table != nullptr) {
306 table->init_extrs();
307 }
308 }
309
310 void AOTCodeCache::init_early_stubs_table() {
311 AOTCodeAddressTable* table = addr_table();
312 if (table != nullptr) {
313 table->init_early_stubs();
314 }
315 }
316
317 void AOTCodeCache::init_shared_blobs_table() {
318 AOTCodeAddressTable* table = addr_table();
319 if (table != nullptr) {
320 table->init_shared_blobs();
321 }
322 }
323
324 void AOTCodeCache::init_early_c1_table() {
325 AOTCodeAddressTable* table = addr_table();
326 if (table != nullptr) {
327 table->init_early_c1();
328 }
329 }
330
331 AOTCodeCache::~AOTCodeCache() {
332 if (_closing) {
333 return; // Already closed
334 }
335 // Stop any further access to cache.
336 _closing = true;
337
338 MutexLocker ml(Compile_lock);
339 if (for_dump()) { // Finalize cache
340 finish_write();
341 }
342 _load_buffer = nullptr;
343 if (_C_store_buffer != nullptr) {
344 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
345 _C_store_buffer = nullptr;
346 _store_buffer = nullptr;
347 }
348 if (_table != nullptr) {
349 delete _table;
350 _table = nullptr;
351 }
352 }
353
354 void AOTCodeCache::Config::record() {
355 _flags = 0;
356 #ifdef ASSERT
357 _flags |= debugVM;
358 #endif
359 if (UseCompressedOops) {
360 _flags |= compressedOops;
361 }
362 if (UseCompressedClassPointers) {
363 _flags |= compressedClassPointers;
364 }
365 if (UseTLAB) {
366 _flags |= useTLAB;
367 }
368 if (JavaAssertions::systemClassDefault()) {
369 _flags |= systemClassAssertions;
370 }
371 if (JavaAssertions::userClassDefault()) {
372 _flags |= userClassAssertions;
373 }
374 if (EnableContended) {
375 _flags |= enableContendedPadding;
376 }
377 if (RestrictContended) {
378 _flags |= restrictContendedPadding;
379 }
380 _compressedOopShift = CompressedOops::shift();
381 _compressedOopBase = CompressedOops::base();
382 _compressedKlassShift = CompressedKlassPointers::shift();
383 _contendedPaddingWidth = ContendedPaddingWidth;
384 _objectAlignment = ObjectAlignmentInBytes;
385 _gc = (uint)Universe::heap()->kind();
386 }
387
388 bool AOTCodeCache::Config::verify() const {
389 #ifdef ASSERT
390 if ((_flags & debugVM) == 0) {
391 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
392 return false;
393 }
394 #else
395 if ((_flags & debugVM) != 0) {
396 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
397 return false;
398 }
399 #endif
400
401 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
402 if (aot_gc != Universe::heap()->kind()) {
403 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
404 return false;
405 }
406
407 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
408 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
409 return false;
410 }
411 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
412 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
413 return false;
414 }
415
416 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
417 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s", JavaAssertions::systemClassDefault() ? "disabled" : "enabled");
418 return false;
419 }
420 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
421 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s", JavaAssertions::userClassDefault() ? "disabled" : "enabled");
422 return false;
423 }
424
425 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
426 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s", EnableContended ? "false" : "true");
427 return false;
428 }
429 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
430 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
431 return false;
432 }
433 if (_compressedOopShift != (uint)CompressedOops::shift()) {
434 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
435 return false;
436 }
437 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
438 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
439 return false;
440 }
441 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
442 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
443 return false;
444 }
445 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
446 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
447 return false;
448 }
449
450 // This should be the last check as it only disables AOTStubCaching
451 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
452 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
453 AOTStubCaching = false;
454 }
455
456 return true;
457 }
458
459 bool AOTCodeCache::Header::verify_config(uint load_size) const {
460 if (_version != AOT_CODE_VERSION) {
461 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
462 return false;
463 }
464 if (load_size < _cache_size) {
465 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
466 return false;
467 }
468 return true;
469 }
470
471 AOTCodeCache* AOTCodeCache::open_for_use() {
472 if (AOTCodeCache::is_on_for_use()) {
473 return AOTCodeCache::cache();
474 }
475 return nullptr;
476 }
477
478 AOTCodeCache* AOTCodeCache::open_for_dump() {
479 if (AOTCodeCache::is_on_for_dump()) {
480 AOTCodeCache* cache = AOTCodeCache::cache();
481 cache->clear_lookup_failed(); // Reset bit
482 return cache;
483 }
484 return nullptr;
485 }
486
487 void copy_bytes(const char* from, address to, uint size) {
488 assert(size > 0, "sanity");
489 bool by_words = true;
490 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
491 // Use wordwise copies if possible:
492 Copy::disjoint_words((HeapWord*)from,
493 (HeapWord*)to,
494 ((size_t)size + HeapWordSize-1) / HeapWordSize);
495 } else {
496 by_words = false;
497 Copy::conjoint_jbytes(from, to, (size_t)size);
498 }
499 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
500 }
501
502 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
503 _cache = cache;
504 _entry = entry;
505 _load_buffer = cache->cache_buffer();
506 _read_position = 0;
507 _lookup_failed = false;
508 }
509
510 void AOTCodeReader::set_read_position(uint pos) {
511 if (pos == _read_position) {
512 return;
513 }
514 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
515 _read_position = pos;
516 }
517
518 bool AOTCodeCache::set_write_position(uint pos) {
519 if (pos == _write_position) {
520 return true;
521 }
522 if (_store_size < _write_position) {
523 _store_size = _write_position; // Adjust during write
524 }
525 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
526 _write_position = pos;
569 if (nbytes == 0) {
570 return 0;
571 }
572 uint new_position = _write_position + nbytes;
573 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
574 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
575 nbytes, _write_position);
576 set_failed();
577 report_store_failure();
578 return 0;
579 }
580 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
581 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
582 _write_position += nbytes;
583 if (_store_size < _write_position) {
584 _store_size = _write_position;
585 }
586 return nbytes;
587 }
588
589 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
590 return (void*)(cache->add_entry());
591 }
592
593 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
594 if (entry->kind() == kind) {
595 assert(entry->id() == id, "sanity");
596 return true; // Found
597 }
598 return false;
599 }
600
601 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
602 assert(_for_use, "sanity");
603 uint count = _load_header->entries_count();
604 if (_load_entries == nullptr) {
605 // Read it
606 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
607 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
608 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
609 }
610 // Binary search
611 int l = 0;
612 int h = count - 1;
613 while (l <= h) {
614 int mid = (l + h) >> 1;
615 int ix = mid * 2;
616 uint is = _search_entries[ix];
617 if (is == id) {
618 int index = _search_entries[ix + 1];
619 AOTCodeEntry* entry = &(_load_entries[index]);
620 if (check_entry(kind, id, entry)) {
621 return entry; // Found
622 }
623 // Linear search around to handle id collission
624 for (int i = mid - 1; i >= l; i--) { // search back
625 ix = i * 2;
626 is = _search_entries[ix];
627 if (is != id) {
628 break;
629 }
630 index = _search_entries[ix + 1];
631 AOTCodeEntry* entry = &(_load_entries[index]);
632 if (check_entry(kind, id, entry)) {
633 return entry; // Found
634 }
635 }
636 for (int i = mid + 1; i <= h; i++) { // search forward
637 ix = i * 2;
638 is = _search_entries[ix];
639 if (is != id) {
640 break;
641 }
642 index = _search_entries[ix + 1];
643 AOTCodeEntry* entry = &(_load_entries[index]);
644 if (check_entry(kind, id, entry)) {
645 return entry; // Found
646 }
647 }
648 break; // Not found match
649 } else if (is < id) {
650 l = mid + 1;
651 } else {
652 h = mid - 1;
653 }
654 }
655 return nullptr;
656 }
657
658 extern "C" {
659 static int uint_cmp(const void *i, const void *j) {
660 uint a = *(uint *)i;
661 uint b = *(uint *)j;
662 return a > b ? 1 : a < b ? -1 : 0;
663 }
664 }
665
666 bool AOTCodeCache::finish_write() {
667 if (!align_write()) {
668 return false;
669 }
670 uint strings_offset = _write_position;
671 int strings_count = store_strings();
672 if (strings_count < 0) {
673 return false;
674 }
675 if (!align_write()) {
676 return false;
677 }
678 uint strings_size = _write_position - strings_offset;
679
680 uint entries_count = 0; // Number of entrant (useful) code entries
681 uint entries_offset = _write_position;
682
683 uint store_count = _store_entries_cnt;
684 if (store_count > 0) {
685 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
686 uint code_count = store_count;
687 uint search_count = code_count * 2;
688 uint search_size = search_count * sizeof(uint);
689 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
690 // _write_position includes size of code and strings
691 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
692 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
693 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
694
695 // Create ordered search table for entries [id, index];
696 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
697 // Allocate in AOT Cache buffer
698 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
699 char* start = align_up(buffer, DATA_ALIGNMENT);
700 char* current = start + header_size; // Skip header
701
702 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
703 uint adapters_count = 0;
704 uint shared_blobs_count = 0;
705 uint C1_blobs_count = 0;
706 uint C2_blobs_count = 0;
707 uint max_size = 0;
708 // AOTCodeEntry entries were allocated in reverse in store buffer.
709 // Process them in reverse order to cache first code first.
710 for (int i = store_count - 1; i >= 0; i--) {
711 entries_address[i].set_next(nullptr); // clear pointers before storing data
712 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
713 if (size > max_size) {
714 max_size = size;
715 }
716 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
717 entries_address[i].set_offset(current - start); // New offset
718 current += size;
719 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
720 if (n != sizeof(AOTCodeEntry)) {
721 FREE_C_HEAP_ARRAY(uint, search);
722 return false;
723 }
724 search[entries_count*2 + 0] = entries_address[i].id();
725 search[entries_count*2 + 1] = entries_count;
726 entries_count++;
727 AOTCodeEntry::Kind kind = entries_address[i].kind();
728 if (kind == AOTCodeEntry::Adapter) {
729 adapters_count++;
730 } else if (kind == AOTCodeEntry::SharedBlob) {
731 shared_blobs_count++;
732 } else if (kind == AOTCodeEntry::C1Blob) {
733 C1_blobs_count++;
734 } else if (kind == AOTCodeEntry::C2Blob) {
735 C2_blobs_count++;
736 }
737 }
738 if (entries_count == 0) {
739 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
740 FREE_C_HEAP_ARRAY(uint, search);
741 return true; // Nothing to write
742 }
743 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
744 // Write strings
745 if (strings_count > 0) {
746 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
747 strings_offset = (current - start); // New offset
748 current += strings_size;
749 }
750
751 uint new_entries_offset = (current - start); // New offset
752 // Sort and store search table
753 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
754 search_size = 2 * entries_count * sizeof(uint);
755 copy_bytes((const char*)search, (address)current, search_size);
756 FREE_C_HEAP_ARRAY(uint, search);
757 current += search_size;
758
759 // Write entries
760 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
761 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
762 current += entries_size;
763 uint size = (current - start);
764 assert(size <= total_size, "%d > %d", size , total_size);
765
766 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
767 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
768 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
769 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
770 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
771
772 // Finalize header
773 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
774 header->init(size, (uint)strings_count, strings_offset,
775 entries_count, new_entries_offset,
776 adapters_count, shared_blobs_count,
777 C1_blobs_count, C2_blobs_count);
778
779 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
780 }
781 return true;
782 }
783
784 //------------------Store/Load AOT code ----------------------
785
786 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
787 AOTCodeCache* cache = open_for_dump();
788 if (cache == nullptr) {
789 return false;
790 }
791 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
792
793 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
794 return false;
795 }
796 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
797 return false;
798 }
799 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
800
801 #ifdef ASSERT
802 LogStreamHandle(Trace, aot, codecache, stubs) log;
803 if (log.is_enabled()) {
804 FlagSetting fs(PrintRelocations, true);
805 blob.print_on(&log);
806 }
807 #endif
808 // we need to take a lock to prevent race between compiler threads generating AOT code
809 // and the main thread generating adapter
810 MutexLocker ml(Compile_lock);
811 if (!cache->align_write()) {
812 return false;
813 }
814 uint entry_position = cache->_write_position;
815
816 // Write name
817 uint name_offset = cache->_write_position - entry_position;
818 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
819 uint n = cache->write_bytes(name, name_size);
820 if (n != name_size) {
821 return false;
822 }
823
824 // Write CodeBlob
825 if (!cache->align_write()) {
826 return false;
827 }
828 uint blob_offset = cache->_write_position - entry_position;
829 address archive_buffer = cache->reserve_bytes(blob.size());
830 if (archive_buffer == nullptr) {
831 return false;
832 }
833 CodeBlob::archive_blob(&blob, archive_buffer);
834
835 uint reloc_data_size = blob.relocation_size();
836 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
837 if (n != reloc_data_size) {
838 return false;
839 }
840
841 bool has_oop_maps = false;
842 if (blob.oop_maps() != nullptr) {
843 if (!cache->write_oop_map_set(blob)) {
844 return false;
845 }
846 has_oop_maps = true;
847 }
848
849 #ifndef PRODUCT
850 // Write asm remarks
851 if (!cache->write_asm_remarks(blob)) {
852 return false;
853 }
854 if (!cache->write_dbg_strings(blob)) {
855 return false;
856 }
857 #endif /* PRODUCT */
858
859 if (!cache->write_relocations(blob)) {
860 return false;
861 }
862
863 // Write entries offsets
864 n = cache->write_bytes(&entry_offset_count, sizeof(int));
865 if (n != sizeof(int)) {
866 return false;
867 }
868 for (int i = 0; i < entry_offset_count; i++) {
869 uint32_t off = (uint32_t)entry_offsets[i];
870 n = cache->write_bytes(&off, sizeof(uint32_t));
871 if (n != sizeof(uint32_t)) {
872 return false;
873 }
874 }
875 uint entry_size = cache->_write_position - entry_position;
876 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
877 entry_position, entry_size, name_offset, name_size,
878 blob_offset, has_oop_maps, blob.content_begin());
879 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
882
883 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
884 AOTCodeCache* cache = open_for_use();
885 if (cache == nullptr) {
886 return nullptr;
887 }
888 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
889
890 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
891 return nullptr;
892 }
893 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
894 return nullptr;
895 }
896 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
897
898 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
899 if (entry == nullptr) {
900 return nullptr;
901 }
902 AOTCodeReader reader(cache, entry);
903 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
904
905 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
906 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
907 return blob;
908 }
909
910 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
911 uint entry_position = _entry->offset();
912
913 // Read name
914 uint name_offset = entry_position + _entry->name_offset();
915 uint name_size = _entry->name_size(); // Includes '/0'
916 const char* stored_name = addr(name_offset);
917
918 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
919 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
920 stored_name, name);
921 set_lookup_failed(); // Skip this blob
922 return nullptr;
923 }
924
925 // Read archived code blob
926 uint offset = entry_position + _entry->blob_offset();
927 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
928 offset += archived_blob->size();
929
930 address reloc_data = (address)addr(offset);
931 offset += archived_blob->relocation_size();
932 set_read_position(offset);
933
934 ImmutableOopMapSet* oop_maps = nullptr;
935 if (_entry->has_oop_maps()) {
936 oop_maps = read_oop_map_set();
937 }
938
939 #ifndef PRODUCT
940 AsmRemarks asm_remarks;
941 read_asm_remarks(asm_remarks);
942 DbgStrings dbg_strings;
943 read_dbg_strings(dbg_strings);
944 #endif // PRODUCT
945
946 CodeBlob* code_blob = CodeBlob::create(archived_blob,
947 stored_name,
948 reloc_data,
949 oop_maps
950 #ifndef PRODUCT
951 , asm_remarks
952 , dbg_strings
953 #endif
954 );
955 if (code_blob == nullptr) { // no space left in CodeCache
956 return nullptr;
957 }
958
959 fix_relocations(code_blob);
960
961 // Read entries offsets
962 offset = read_position();
963 int stored_count = *(int*)addr(offset);
964 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count);
965 offset += sizeof(int);
966 set_read_position(offset);
967 for (int i = 0; i < stored_count; i++) {
968 uint32_t off = *(uint32_t*)addr(offset);
969 offset += sizeof(uint32_t);
970 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
971 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
972 stored_name, entry_name, _entry->id(), off);
973 entry_offsets[i] = off;
974 }
975
976 #ifdef ASSERT
977 LogStreamHandle(Trace, aot, codecache, stubs) log;
978 if (log.is_enabled()) {
979 FlagSetting fs(PrintRelocations, true);
980 code_blob->print_on(&log);
981 }
982 #endif
983 return code_blob;
984 }
985
986 // ------------ process code and data --------------
987
988 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
989 GrowableArray<uint> reloc_data;
990 RelocIterator iter(&code_blob);
991 LogStreamHandle(Trace, aot, codecache, reloc) log;
992 while (iter.next()) {
993 int idx = reloc_data.append(0); // default value
994 switch (iter.type()) {
995 case relocInfo::none:
996 break;
997 case relocInfo::runtime_call_type: {
998 // Record offset of runtime destination
999 CallRelocation* r = (CallRelocation*)iter.reloc();
1000 address dest = r->destination();
1001 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1002 dest = (address)-1; // do nothing in this case when loading this relocation
1003 }
1004 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
1005 break;
1006 }
1007 case relocInfo::runtime_call_w_cp_type:
1008 fatal("runtime_call_w_cp_type unimplemented");
1009 break;
1010 case relocInfo::external_word_type: {
1011 // Record offset of runtime target
1012 address target = ((external_word_Relocation*)iter.reloc())->target();
1013 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob));
1014 break;
1015 }
1016 case relocInfo::internal_word_type:
1017 break;
1018 case relocInfo::section_word_type:
1019 break;
1020 case relocInfo::post_call_nop_type:
1021 break;
1022 default:
1023 fatal("relocation %d unimplemented", (int)iter.type());
1024 break;
1025 }
1026 if (log.is_enabled()) {
1027 iter.print_current_on(&log);
1028 }
1029 }
1030
1031 // Write additional relocation data: uint per relocation
1032 // Write the count first
1033 int count = reloc_data.length();
1034 write_bytes(&count, sizeof(int));
1035 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1036 iter != reloc_data.end(); ++iter) {
1037 uint value = *iter;
1038 int n = write_bytes(&value, sizeof(uint));
1039 if (n != sizeof(uint)) {
1040 return false;
1041 }
1042 }
1043 return true;
1044 }
1045
1046 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1047 LogStreamHandle(Trace, aot, reloc) log;
1048 uint offset = read_position();
1049 int count = *(int*)addr(offset);
1050 offset += sizeof(int);
1051 if (log.is_enabled()) {
1052 log.print_cr("======== extra relocations count=%d", count);
1053 }
1054 uint* reloc_data = (uint*)addr(offset);
1055 offset += (count * sizeof(uint));
1056 set_read_position(offset);
1057
1058 RelocIterator iter(code_blob);
1059 int j = 0;
1060 while (iter.next()) {
1061 switch (iter.type()) {
1062 case relocInfo::none:
1063 break;
1064 case relocInfo::runtime_call_type: {
1065 address dest = _cache->address_for_id(reloc_data[j]);
1066 if (dest != (address)-1) {
1067 ((CallRelocation*)iter.reloc())->set_destination(dest);
1068 }
1069 break;
1070 }
1071 case relocInfo::runtime_call_w_cp_type:
1072 fatal("runtime_call_w_cp_type unimplemented");
1073 break;
1074 case relocInfo::external_word_type: {
1075 address target = _cache->address_for_id(reloc_data[j]);
1076 // Add external address to global table
1077 int index = ExternalsRecorder::find_index(target);
1078 // Update index in relocation
1079 Relocation::add_jint(iter.data(), index);
1080 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1081 assert(reloc->target() == target, "sanity");
1082 reloc->set_value(target); // Patch address in the code
1083 break;
1084 }
1085 case relocInfo::internal_word_type: {
1086 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1087 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1088 break;
1089 }
1090 case relocInfo::section_word_type: {
1091 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1092 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1093 break;
1094 }
1095 case relocInfo::post_call_nop_type:
1096 break;
1097 default:
1098 fatal("relocation %d unimplemented", (int)iter.type());
1099 break;
1100 }
1101 if (log.is_enabled()) {
1102 iter.print_current_on(&log);
1103 }
1104 j++;
1105 }
1106 assert(j == count, "sanity");
1107 }
1108
1109 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1110 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1111 int oopmaps_size = oopmaps->nr_of_bytes();
1112 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1113 return false;
1114 }
1115 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1116 if (n != (uint)oopmaps->nr_of_bytes()) {
1117 return false;
1118 }
1119 return true;
1120 }
1121
1122 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1123 uint offset = read_position();
1124 int size = *(int *)addr(offset);
1125 offset += sizeof(int);
1126 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1127 offset += size;
1128 set_read_position(offset);
1129 return oopmaps;
1130 }
1131
1132 #ifndef PRODUCT
1133 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1134 // Write asm remarks
1135 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1136 if (count_ptr == nullptr) {
1137 return false;
1138 }
1139 uint count = 0;
1140 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1141 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1142 uint n = write_bytes(&offset, sizeof(uint));
1143 if (n != sizeof(uint)) {
1144 return false;
1145 }
1146 const char* cstr = add_C_string(str);
1147 int id = _table->id_for_C_string((address)cstr);
1148 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1149 n = write_bytes(&id, sizeof(int));
1150 if (n != sizeof(int)) {
1151 return false;
1152 }
1153 count += 1;
1154 return true;
1155 });
1156 *count_ptr = count;
1157 return result;
1158 }
1159
1160 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1161 // Read asm remarks
1162 uint offset = read_position();
1163 uint count = *(uint *)addr(offset);
1164 offset += sizeof(uint);
1165 for (uint i = 0; i < count; i++) {
1166 uint remark_offset = *(uint *)addr(offset);
1167 offset += sizeof(uint);
1168 int remark_string_id = *(uint *)addr(offset);
1169 offset += sizeof(int);
1170 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1171 asm_remarks.insert(remark_offset, remark);
1172 }
1173 set_read_position(offset);
1174 }
1175
1176 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1177 // Write dbg strings
1178 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1179 if (count_ptr == nullptr) {
1180 return false;
1181 }
1182 uint count = 0;
1183 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1184 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1185 const char* cstr = add_C_string(str);
1186 int id = _table->id_for_C_string((address)cstr);
1187 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1188 uint n = write_bytes(&id, sizeof(int));
1189 if (n != sizeof(int)) {
1190 return false;
1191 }
1192 count += 1;
1193 return true;
1194 });
1195 *count_ptr = count;
1196 return result;
1197 }
1198
1199 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1200 // Read dbg strings
1201 uint offset = read_position();
1202 uint count = *(uint *)addr(offset);
1203 offset += sizeof(uint);
1204 for (uint i = 0; i < count; i++) {
1205 int string_id = *(uint *)addr(offset);
1206 offset += sizeof(int);
1207 const char* str = (const char*)_cache->address_for_C_string(string_id);
1208 dbg_strings.insert(str);
1209 }
1210 set_read_position(offset);
1211 }
1212 #endif // PRODUCT
1213
1214 //======================= AOTCodeAddressTable ===============
1215
1216 // address table ids for generated routines, external addresses and C
1217 // string addresses are partitioned into positive integer ranges
1218 // defined by the following positive base and max values
1219 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1220 // [_blobs_base, _blobs_base + _blobs_max -1],
1221 // ...
1222 // [_c_str_base, _c_str_base + _c_str_max -1],
1223
1224 #define _extrs_max 100
1225 #define _stubs_max 3
1226
1227 #define _shared_blobs_max 20
1228 #define _C1_blobs_max 10
1229 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1230 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1231
1232 #define _extrs_base 0
1233 #define _stubs_base (_extrs_base + _extrs_max)
1234 #define _shared_blobs_base (_stubs_base + _stubs_max)
1235 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1236 #define _blobs_end (_shared_blobs_base + _blobs_max)
1237
1238 #define SET_ADDRESS(type, addr) \
1239 { \
1240 type##_addr[type##_length++] = (address) (addr); \
1241 assert(type##_length <= type##_max, "increase size"); \
1242 }
1243
1244 static bool initializing_extrs = false;
1245
1246 void AOTCodeAddressTable::init_extrs() {
1247 if (_extrs_complete || initializing_extrs) return; // Done already
1248
1249 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1250
1251 initializing_extrs = true;
1252 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1253
1254 _extrs_length = 0;
1255
1256 // Record addresses of VM runtime methods
1257 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1258 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1259 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1260 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1261 #if defined(AARCH64) && !defined(ZERO)
1262 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1263 #endif
1264 {
1265 // Required by Shared blobs
1266 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1267 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1268 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1269 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1270 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1271 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1272 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1273 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1274 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1275 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1276 }
1277
1278 #ifdef COMPILER1
1279 {
1280 // Required by C1 blobs
1281 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1282 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1283 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1284 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1285 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1286 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1287 SET_ADDRESS(_extrs, Runtime1::new_instance);
1288 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1289 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1290 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1291 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1292 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1293 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1294 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1295 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1296 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1297 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1298 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1299 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1300 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1301 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1302 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1303 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1304 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1305 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1306 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1307 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1308 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1309 SET_ADDRESS(_extrs, Thread::current);
1310 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1311 #ifndef PRODUCT
1312 SET_ADDRESS(_extrs, os::breakpoint);
1313 #endif
1314 }
1315 #endif
1316
1317 #ifdef COMPILER2
1318 {
1319 // Required by C2 blobs
1320 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1321 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1322 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1323 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1324 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1325 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1326 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1327 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1328 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1329 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1330 #if INCLUDE_JVMTI
1331 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
1332 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
1333 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
1334 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
1335 #endif
1336 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1337 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1338 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1339 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1340 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1341 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1342 #if defined(AARCH64)
1343 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1344 #endif // AARCH64
1345 }
1346 #endif // COMPILER2
1347
1348 #if INCLUDE_G1GC
1349 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
1350 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1351 #endif
1352 #if INCLUDE_SHENANDOAHGC
1353 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
1354 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1355 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1356 #endif
1357 #if INCLUDE_ZGC
1358 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1359 #if defined(AMD64)
1360 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1361 #endif
1362 #endif
1363 #ifndef ZERO
1364 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1365 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1366 #endif
1367 #endif // ZERO
1368
1369 _extrs_complete = true;
1370 log_debug(aot, codecache, init)("External addresses recorded");
1371 }
1372
1373 static bool initializing_early_stubs = false;
1374
1375 void AOTCodeAddressTable::init_early_stubs() {
1376 if (_complete || initializing_early_stubs) return; // Done already
1377 initializing_early_stubs = true;
1378 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1379 _stubs_length = 0;
1380 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1381
1382 {
1383 // Required by C1 blobs
1384 #if defined(AMD64) && !defined(ZERO)
1385 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1386 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1387 #endif // AMD64
1388 }
1389
1390 _early_stubs_complete = true;
1391 log_info(aot, codecache, init)("Early stubs recorded");
1392 }
1393
1394 static bool initializing_shared_blobs = false;
1395
1396 void AOTCodeAddressTable::init_shared_blobs() {
1397 if (_complete || initializing_shared_blobs) return; // Done already
1398 initializing_shared_blobs = true;
1399 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1400 _shared_blobs_addr = blobs_addr;
1401 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1402 _shared_blobs_length = _C1_blobs_length = 0;
1403
1404 // clear the address table
1405 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1406
1407 // Record addresses of generated code blobs
1408 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1409 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1410 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1411 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1412 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1413 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1414 #if INCLUDE_JVMCI
1415 if (EnableJVMCI) {
1416 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1417 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1418 }
1419 #endif
1420
1421 _shared_blobs_complete = true;
1422 log_debug(aot, codecache, init)("Early shared blobs recorded");
1423 _complete = true;
1424 }
1425
1426 void AOTCodeAddressTable::init_early_c1() {
1427 #ifdef COMPILER1
1428 // Runtime1 Blobs
1429 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
1430 C1StubId id = (C1StubId)i;
1431 if (Runtime1::blob_for(id) == nullptr) {
1432 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1433 continue;
1434 }
1435 if (Runtime1::entry_for(id) == nullptr) {
1436 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1437 continue;
1438 }
1439 address entry = Runtime1::entry_for(id);
1440 SET_ADDRESS(_C1_blobs, entry);
1441 }
1442 #endif // COMPILER1
1443 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1444 _early_c1_complete = true;
1445 }
1446
1447 #undef SET_ADDRESS
1448
1449 AOTCodeAddressTable::~AOTCodeAddressTable() {
1450 if (_extrs_addr != nullptr) {
1451 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1452 }
1453 if (_shared_blobs_addr != nullptr) {
1454 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1455 }
1456 }
1457
1458 #ifdef PRODUCT
1459 #define MAX_STR_COUNT 200
1460 #else
1461 #define MAX_STR_COUNT 500
1462 #endif
1463 #define _c_str_max MAX_STR_COUNT
1464 static const int _c_str_base = _all_max;
1465
1466 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1467 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1468 static int _C_strings_count = 0;
1469 static int _C_strings_s[MAX_STR_COUNT] = {0};
1470 static int _C_strings_id[MAX_STR_COUNT] = {0};
1471 static int _C_strings_used = 0;
1472
1484 // still be executed on VM exit after _cache is freed.
1485 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1486 memcpy(p, addr(strings_offset), strings_size);
1487 _C_strings_buf = p;
1488 assert(strings_count <= MAX_STR_COUNT, "sanity");
1489 for (uint i = 0; i < strings_count; i++) {
1490 _C_strings[i] = p;
1491 uint len = string_lengths[i];
1492 _C_strings_s[i] = i;
1493 _C_strings_id[i] = i;
1494 p += len;
1495 }
1496 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1497 _C_strings_count = strings_count;
1498 _C_strings_used = strings_count;
1499 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1500 }
1501
1502 int AOTCodeCache::store_strings() {
1503 if (_C_strings_used > 0) {
1504 uint offset = _write_position;
1505 uint length = 0;
1506 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1507 if (lengths == nullptr) {
1508 return -1;
1509 }
1510 for (int i = 0; i < _C_strings_used; i++) {
1511 const char* str = _C_strings[_C_strings_s[i]];
1512 uint len = (uint)strlen(str) + 1;
1513 length += len;
1514 assert(len < 1000, "big string: %s", str);
1515 lengths[i] = len;
1516 uint n = write_bytes(str, len);
1517 if (n != len) {
1518 return -1;
1519 }
1520 }
1521 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1522 _C_strings_used, length, offset);
1523 }
1524 return _C_strings_used;
1525 }
1526
1527 const char* AOTCodeCache::add_C_string(const char* str) {
1528 if (is_on_for_dump() && str != nullptr) {
1529 return _cache->_table->add_C_string(str);
1530 }
1531 return str;
1532 }
1533
1534 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1535 if (_extrs_complete) {
1536 LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock
1537 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1538 // Check previous strings address
1539 for (int i = 0; i < _C_strings_count; i++) {
1540 if (_C_strings_in[i] == str) {
1541 return _C_strings[i]; // Found previous one - return our duplicate
1542 } else if (strcmp(_C_strings[i], str) == 0) {
1543 return _C_strings[i];
1544 }
1545 }
1546 // Add new one
1547 if (_C_strings_count < MAX_STR_COUNT) {
1548 // Passed in string can be freed and used space become inaccessible.
1549 // Keep original address but duplicate string for future compare.
1550 _C_strings_id[_C_strings_count] = -1; // Init
1551 _C_strings_in[_C_strings_count] = str;
1552 const char* dup = os::strdup(str);
1553 _C_strings[_C_strings_count++] = dup;
1554 if (log.is_enabled()) {
1555 log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1556 }
1557 return dup;
1558 } else {
1559 fatal("Number of C strings >= MAX_STR_COUNT");
1560 }
1561 }
1562 return str;
1563 }
1564
1565 int AOTCodeAddressTable::id_for_C_string(address str) {
1566 if (str == nullptr) {
1567 return -1;
1568 }
1569 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1570 for (int i = 0; i < _C_strings_count; i++) {
1571 if (_C_strings[i] == (const char*)str) { // found
1572 int id = _C_strings_id[i];
1573 if (id >= 0) {
1574 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1575 return id; // Found recorded
1576 }
1577 // Not found in recorded, add new
1578 id = _C_strings_used++;
1579 _C_strings_s[id] = i;
1580 _C_strings_id[i] = id;
1581 return id;
1582 }
1583 }
1584 return -1;
1585 }
1586
1587 address AOTCodeAddressTable::address_for_C_string(int idx) {
1588 assert(idx < _C_strings_count, "sanity");
1589 return (address)_C_strings[idx];
1590 }
1591
1592 static int search_address(address addr, address* table, uint length) {
1593 for (int i = 0; i < (int)length; i++) {
1594 if (table[i] == addr) {
1595 return i;
1596 }
1597 }
1598 return -1;
1599 }
1600
1601 address AOTCodeAddressTable::address_for_id(int idx) {
1602 if (!_extrs_complete) {
1603 fatal("AOT Code Cache VM runtime addresses table is not complete");
1604 }
1605 if (idx == -1) {
1606 return (address)-1;
1607 }
1608 uint id = (uint)idx;
1609 // special case for symbols based relative to os::init
1610 if (id > (_c_str_base + _c_str_max)) {
1611 return (address)os::init + idx;
1612 }
1613 if (idx < 0) {
1614 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1615 }
1616 // no need to compare unsigned id against 0
1617 if (/* id >= _extrs_base && */ id < _extrs_length) {
1618 return _extrs_addr[id - _extrs_base];
1619 }
1620 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1621 return _stubs_addr[id - _stubs_base];
1622 }
1623 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1624 return _shared_blobs_addr[id - _shared_blobs_base];
1625 }
1626 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1627 return _C1_blobs_addr[id - _C1_blobs_base];
1628 }
1629 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1630 return address_for_C_string(id - _c_str_base);
1631 }
1632 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1633 return nullptr;
1634 }
1635
1636 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1637 if (!_extrs_complete) {
1638 fatal("AOT Code Cache VM runtime addresses table is not complete");
1639 }
1640 int id = -1;
1641 if (addr == (address)-1) { // Static call stub has jump to itself
1642 return id;
1643 }
1644 // Seach for C string
1645 id = id_for_C_string(addr);
1646 if (id >= 0) {
1647 return id + _c_str_base;
1648 }
1649 if (StubRoutines::contains(addr)) {
1650 // Search in stubs
1651 id = search_address(addr, _stubs_addr, _stubs_length);
1652 if (id < 0) {
1653 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1654 if (desc == nullptr) {
1655 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1656 }
1657 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1658 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1659 } else {
1660 return id + _stubs_base;
1661 }
1662 } else {
1663 CodeBlob* cb = CodeCache::find_blob(addr);
1664 if (cb != nullptr) {
1665 // Search in code blobs
1666 int id_base = _shared_blobs_base;
1667 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1668 if (id < 0) {
1669 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1670 } else {
1671 return id_base + id;
1672 }
1673 } else {
1674 // Search in runtime functions
1675 id = search_address(addr, _extrs_addr, _extrs_length);
1676 if (id < 0) {
1677 ResourceMark rm;
1678 const int buflen = 1024;
1679 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1680 int offset = 0;
1681 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1682 if (offset > 0) {
1683 // Could be address of C string
1684 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1685 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1686 p2i(addr), dist, (const char*)addr);
1687 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1688 return dist;
1689 }
1690 reloc.print_current_on(tty);
1691 code_blob->print_on(tty);
1692 code_blob->print_code_on(tty);
1693 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1694 } else {
1695 reloc.print_current_on(tty);
1696 code_blob->print_on(tty);
1697 code_blob->print_code_on(tty);
1698 os::find(addr, tty);
1699 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1700 }
1701 } else {
1702 return _extrs_base + id;
1703 }
1704 }
1705 }
1706 return id;
1707 }
1708
1709 void AOTCodeCache::print_on(outputStream* st) {
1710 AOTCodeCache* cache = open_for_use();
1711 if (cache != nullptr) {
1712 uint count = cache->_load_header->entries_count();
1713 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
1714 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1715
1716 for (uint i = 0; i < count; i++) {
1717 // Use search_entries[] to order ouput
1718 int index = search_entries[2*i + 1];
1719 AOTCodeEntry* entry = &(load_entries[index]);
1720
1721 uint entry_position = entry->offset();
1722 uint name_offset = entry->name_offset() + entry_position;
1723 const char* saved_name = cache->addr(name_offset);
1724
1725 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u size=%u '%s'",
1726 i, index, entry->kind(), entry->id(), entry->size(), saved_name);
1727 }
1728 } else {
1729 st->print_cr("failed to map code cache");
1730 }
1731 }
|
1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/gcConfig.hpp"
55 #include "logging/logStream.hpp"
56 #include "memory/memoryReserver.hpp"
57 #include "memory/universe.hpp"
58 #include "oops/klass.inline.hpp"
59 #include "oops/method.inline.hpp"
60 #include "oops/trainingData.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "runtime/atomic.hpp"
63 #include "runtime/deoptimization.hpp"
64 #include "runtime/flags/flagSetting.hpp"
65 #include "runtime/globals_extension.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/mutexLocker.hpp"
70 #include "runtime/os.inline.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/stubCodeGenerator.hpp"
73 #include "runtime/stubRoutines.hpp"
74 #include "runtime/timerTrace.hpp"
75 #include "runtime/threadIdentifier.hpp"
76 #include "utilities/copy.hpp"
77 #include "utilities/formatBuffer.hpp"
78 #include "utilities/ostream.hpp"
79 #include "utilities/spinYield.hpp"
80 #ifdef COMPILER1
81 #include "c1/c1_Runtime1.hpp"
82 #include "c1/c1_LIRAssembler.hpp"
83 #include "gc/shared/c1/barrierSetC1.hpp"
84 #include "gc/g1/c1/g1BarrierSetC1.hpp"
85 #if INCLUDE_SHENANDOAHGC
86 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
87 #endif // INCLUDE_SHENANDOAHGC
88 #include "gc/z/c1/zBarrierSetC1.hpp"
89 #endif // COMPILER1
90 #ifdef COMPILER2
91 #include "opto/runtime.hpp"
92 #endif
93 #if INCLUDE_JVMCI
94 #include "jvmci/jvmci.hpp"
95 #endif
96 #if INCLUDE_G1GC
97 #include "gc/g1/g1BarrierSetRuntime.hpp"
98 #endif
99 #if INCLUDE_SHENANDOAHGC
100 #include "gc/shenandoah/shenandoahRuntime.hpp"
101 #endif
102 #if INCLUDE_ZGC
103 #include "gc/z/zBarrierSetRuntime.hpp"
104 #endif
105 #if defined(X86) && !defined(ZERO)
106 #include "rdtsc_x86.hpp"
107 #endif
108
109 #include <sys/stat.h>
110 #include <errno.h>
111
112 const char* aot_code_entry_kind_name[] = {
113 #define DECL_KIND_STRING(kind) XSTR(kind),
114 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
115 #undef DECL_KIND_STRING
116 };
117
118 static elapsedTimer _t_totalLoad;
119 static elapsedTimer _t_totalRegister;
120 static elapsedTimer _t_totalFind;
121 static elapsedTimer _t_totalStore;
122
123 static bool enable_timers() {
124 return CITime || log_is_enabled(Info, init);
125 }
126
127 static void report_load_failure() {
128 if (AbortVMOnAOTCodeFailure) {
129 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
130 }
131 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
132 AOTCodeCache::disable_caching();
133 }
134
135 static void report_store_failure() {
136 if (AbortVMOnAOTCodeFailure) {
137 tty->print_cr("Unable to create AOT Code Cache.");
138 vm_abort(false);
139 }
140 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
141 AOTCodeCache::disable_caching();
142 }
143
144 // The sequence of AOT code caching flags and parametters settings.
145 //
146 // 1. The initial AOT code caching flags setting is done
147 // during call to CDSConfig::check_vm_args_consistency().
148 //
149 // 2. The earliest AOT code state check done in compilationPolicy_init()
150 // where we set number of compiler threads for AOT assembly phase.
151 //
152 // 3. We determine presence of AOT code in AOT Cache in
153 // MetaspaceShared::open_static_archive() which is calles
154 // after compilationPolicy_init() but before codeCache_init().
155 //
156 // 4. AOTCodeCache::initialize() is called during universe_init()
157 // and does final AOT state and flags settings.
158 //
159 // 5. Finally AOTCodeCache::init2() is called after universe_init()
160 // when all GC settings are finalized.
161
162 // Next methods determine which action we do with AOT code depending
163 // on phase of AOT process: assembly or production.
164
165 bool AOTCodeCache::is_dumping_adapter() {
166 return AOTAdapterCaching && is_on_for_dump();
167 }
168
169 bool AOTCodeCache::is_using_adapter() {
170 return AOTAdapterCaching && is_on_for_use();
171 }
172
173 bool AOTCodeCache::is_dumping_stub() {
174 return AOTStubCaching && is_on_for_dump();
175 }
176
177 bool AOTCodeCache::is_using_stub() {
178 return AOTStubCaching && is_on_for_use();
179 }
180
181 bool AOTCodeCache::is_dumping_code() {
182 return AOTCodeCaching && is_on_for_dump();
183 }
184
185 bool AOTCodeCache::is_using_code() {
186 return AOTCodeCaching && is_on_for_use();
187 }
188
189 // This is used before AOTCodeCahe is initialized
190 // but after AOT (CDS) Cache flags consistency is checked.
191 bool AOTCodeCache::maybe_dumping_code() {
192 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
193 }
194
195 // Next methods could be called regardless of AOT code cache status.
196 // Initially they are called during AOT flags parsing and finilized
197 // in AOTCodeCache::initialize().
198 void AOTCodeCache::enable_caching() {
199 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
200 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
201 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
202 }
203
204 void AOTCodeCache::disable_caching() {
205 FLAG_SET_ERGO(AOTCodeCaching, false);
206 FLAG_SET_ERGO(AOTStubCaching, false);
207 FLAG_SET_ERGO(AOTAdapterCaching, false);
208 }
209
210 bool AOTCodeCache::is_caching_enabled() {
211 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
212 }
213
214 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
215 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
216 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
217 // becasue both id and kind are used to find an entry, and that combination should be unique
218 if (kind == AOTCodeEntry::Adapter) {
219 return id;
220 } else if (kind == AOTCodeEntry::SharedBlob) {
221 return id;
222 } else if (kind == AOTCodeEntry::C1Blob) {
223 return (int)SharedStubId::NUM_STUBIDS + id;
224 } else {
225 // kind must be AOTCodeEntry::C2Blob
226 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
227 }
228 }
229
230 static uint _max_aot_code_size = 0;
231 uint AOTCodeCache::max_aot_code_size() {
232 return _max_aot_code_size;
233 }
234
235 bool AOTCodeCache::is_code_load_thread_on() {
236 return UseAOTCodeLoadThread && AOTCodeCaching;
237 }
238
239 bool AOTCodeCache::allow_const_field(ciConstant& value) {
240 ciEnv* env = CURRENT_ENV;
241 precond(env != nullptr);
242 assert(!env->is_precompile() || is_dumping_code(), "AOT compilation should be enabled");
243 return !env->is_precompile() // Restrict only when we generate AOT code
244 // Can not trust primitive too || !is_reference_type(value.basic_type())
245 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
246 ;
247 }
248
249 // It is called from MetaspaceShared::initialize_shared_spaces()
250 // which is called from universe_init().
251 // At this point all AOT class linking seetings are finilized
252 // and AOT cache is open so we can map AOT code region.
253 void AOTCodeCache::initialize() {
254 if (!is_caching_enabled()) {
255 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
256 return;
257 }
258 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
259 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
260 disable_caching();
261 return;
262 #else
263 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
264
265 // Disable stubs caching until JDK-8357398 is fixed.
266 FLAG_SET_ERGO(AOTStubCaching, false);
267
268 if (VerifyOops) {
269 // Disable AOT stubs caching when VerifyOops flag is on.
270 // Verify oops code generated a lot of C strings which overflow
271 // AOT C string table (which has fixed size).
272 // AOT C string table will be reworked later to handle such cases.
273 //
274 // Note: AOT adapters are not affected - they don't have oop operations.
275 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
276 FLAG_SET_ERGO(AOTStubCaching, false);
277 }
278
279 bool is_dumping = false;
280 bool is_using = false;
281 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
282 is_dumping = is_caching_enabled();
283 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
284 is_using = is_caching_enabled();
285 }
286 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
287 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
288 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
289 }
290 if (!(is_dumping || is_using)) {
291 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
292 disable_caching();
293 return; // AOT code caching disabled on command line
294 }
295 // Reserve AOT Cache region when we dumping AOT code.
296 _max_aot_code_size = AOTCodeMaxSize;
297 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
298 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
299 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
300 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
301 }
302 }
303 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
304 if (is_using && aot_code_size == 0) {
305 log_info(aot, codecache, init)("AOT Code Cache is empty");
306 disable_caching();
307 return;
308 }
309 if (!open_cache(is_dumping, is_using)) {
310 if (is_using) {
311 report_load_failure();
312 } else {
313 report_store_failure();
314 }
315 return;
316 }
317 if (is_dumping) {
318 FLAG_SET_DEFAULT(FoldStableValues, false);
319 FLAG_SET_DEFAULT(ForceUnreachable, true);
320 }
321 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
322 #endif // defined(AMD64) || defined(AARCH64)
323 }
324
325 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
326 AOTCodeCache* AOTCodeCache::_cache = nullptr;
327 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
328
329 // It is called after universe_init() when all GC settings are finalized.
330 void AOTCodeCache::init2() {
331 DEBUG_ONLY( _passed_init2 = true; )
332 if (opened_cache == nullptr) {
333 return;
334 }
335 // After Universe initialized
336 BarrierSet* bs = BarrierSet::barrier_set();
337 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
338 address byte_map_base = ci_card_table_address_as<address>();
339 if (is_on_for_dump() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
340 // Bail out since we can't encode card table base address with relocation
341 log_warning(aot, codecache, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
342 close();
343 report_load_failure();
344 return;
345 }
346 }
347 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
348 delete opened_cache;
349 opened_cache = nullptr;
350 report_load_failure();
351 return;
352 }
353
354 // initialize aot runtime constants as appropriate to this runtime
355 AOTRuntimeConstants::initialize_from_runtime();
356
357 // initialize the table of external routines and initial stubs so we can save
358 // generated code blobs that reference them
359 AOTCodeAddressTable* table = opened_cache->_table;
360 assert(table != nullptr, "should be initialized already");
361 table->init_extrs();
362
363 // Now cache and address table are ready for AOT code generation
364 _cache = opened_cache;
365
366 // Set ClassInitBarrierMode after all checks since it affects code generation
367 if (is_dumping_code()) {
368 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
369 } else {
370 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
371 }
372 }
373
374 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
375 opened_cache = new AOTCodeCache(is_dumping, is_using);
376 if (opened_cache->failed()) {
377 delete opened_cache;
378 opened_cache = nullptr;
379 return false;
380 }
381 return true;
382 }
383
384 static void print_helper(nmethod* nm, outputStream* st) {
385 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
386 if (e->method() == nm->method()) {
387 ResourceMark rm;
388 stringStream ss;
389 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
390 ss.print("[%s%s%s]",
391 (e->is_loaded() ? "L" : ""),
392 (e->load_fail() ? "F" : ""),
393 (e->not_entrant() ? "I" : ""));
394 ss.print("#%d", e->comp_id());
395
396 st->print(" %s", ss.freeze());
397 }
398 });
399 }
400
401 void AOTCodeCache::close() {
402 if (is_on()) {
403 delete _cache; // Free memory
404 _cache = nullptr;
405 opened_cache = nullptr;
406 }
407 }
408
409 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
410 public:
411 uint _aot_code_size;
412 char* _aot_code_data;
413
414 void set_aot_code_data(uint size, char* aot_data) {
415 _aot_code_size = size;
416 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
417 }
418
419 static CachedCodeDirectory* create();
420 };
421
422 // Storing AOT code in the AOT code region (ac) of AOT Cache:
423 //
424 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
425 // E.g., you can build a hashtable to record what methods have been archived.
426 //
427 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
428 // allocated using AOTCacheAccess::allocate_aot_code_region().
429 //
430 // [3] CachedCodeDirectory must be the very first allocation.
431 //
432 // [4] Two kinds of pointer can be stored:
433 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
434 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
435 // (It's OK to point to an interior location within this buffer).
436 // Such pointers must be stored using AOTCacheAccess::set_pointer()
437 //
438 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
439 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
440 // (e.g., to account for the runtime location of the CodeCache).
441 //
442 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
443 static CachedCodeDirectory* _aot_code_directory = nullptr;
444
445 CachedCodeDirectory* CachedCodeDirectory::create() {
446 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
447 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
448 dir->dumptime_init_internal();
449 return dir;
450 }
451
452 #define DATA_ALIGNMENT HeapWordSize
453
454 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
455 _load_header(nullptr),
456 _load_buffer(nullptr),
457 _store_buffer(nullptr),
458 _C_store_buffer(nullptr),
459 _write_position(0),
460 _load_size(0),
461 _store_size(0),
462 _for_use(is_using),
463 _for_dump(is_dumping),
464 _closing(false),
465 _failed(false),
466 _lookup_failed(false),
467 _for_preload(false),
468 _has_clinit_barriers(false),
469 _table(nullptr),
470 _load_entries(nullptr),
471 _search_entries(nullptr),
472 _store_entries(nullptr),
473 _C_strings_buf(nullptr),
474 _store_entries_cnt(0),
475 _compile_id(0),
476 _comp_level(0)
477 {
478 // Read header at the begining of cache
479 if (_for_use) {
480 // Read cache
481 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
482 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
483 if (!rs.is_reserved()) {
484 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
485 set_failed();
486 return;
487 }
488 if (!AOTCacheAccess::map_aot_code_region(rs)) {
489 log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
490 set_failed();
491 return;
492 }
493 _aot_code_directory = (CachedCodeDirectory*)rs.base();
494 _aot_code_directory->runtime_init_internal();
495
496 _load_size = _aot_code_directory->_aot_code_size;
497 _load_buffer = _aot_code_directory->_aot_code_data;
498 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
499 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
500
501 _load_header = (Header*)addr(0);
502 if (!_load_header->verify(_load_size)) {
503 set_failed();
504 return;
505 }
506 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
507 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
508 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
509 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
510 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
511 log_debug(aot, codecache, init)(" Stubs: total=%u", _load_header->stubs_count());
512 log_debug(aot, codecache, init)(" Nmethods: total=%u", _load_header->nmethods_count());
513 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
514
515 // Read strings
516 load_strings();
517 }
518 if (_for_dump) {
519 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
520 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
521 // Entries allocated at the end of buffer in reverse (as on stack).
522 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
523 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
524 }
525 _table = new AOTCodeAddressTable();
526 }
527
528 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
529 // This could be concurent execution
530 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
531 _cache->invalidate_entry(entry);
532 }
533 }
534
535 void AOTCodeCache::init_early_stubs_table() {
536 AOTCodeAddressTable* table = addr_table();
537 if (table != nullptr) {
538 table->init_early_stubs();
539 }
540 }
541
542 void AOTCodeCache::init_shared_blobs_table() {
543 AOTCodeAddressTable* table = addr_table();
544 if (table != nullptr) {
545 table->init_shared_blobs();
546 }
547 }
548
549 void AOTCodeCache::init_stubs_table() {
550 AOTCodeAddressTable* table = addr_table();
551 if (table != nullptr) {
552 table->init_stubs();
553 }
554 }
555
556 void AOTCodeCache::init_early_c1_table() {
557 AOTCodeAddressTable* table = addr_table();
558 if (table != nullptr) {
559 table->init_early_c1();
560 }
561 }
562
563 void AOTCodeCache::init_c1_table() {
564 AOTCodeAddressTable* table = addr_table();
565 if (table != nullptr) {
566 table->init_c1();
567 }
568 }
569
570 void AOTCodeCache::init_c2_table() {
571 AOTCodeAddressTable* table = addr_table();
572 if (table != nullptr) {
573 table->init_c2();
574 }
575 }
576
577 AOTCodeCache::~AOTCodeCache() {
578 if (_closing) {
579 return; // Already closed
580 }
581 // Stop any further access to cache.
582 // Checked on entry to load_nmethod() and store_nmethod().
583 _closing = true;
584 if (_for_use) {
585 // Wait for all load_nmethod() finish.
586 wait_for_no_nmethod_readers();
587 }
588 // Prevent writing code into cache while we are closing it.
589 // This lock held by ciEnv::register_method() which calls store_nmethod().
590 MutexLocker ml(Compile_lock);
591 if (for_dump()) { // Finalize cache
592 finish_write();
593 }
594 _load_buffer = nullptr;
595 if (_C_store_buffer != nullptr) {
596 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
597 _C_store_buffer = nullptr;
598 _store_buffer = nullptr;
599 }
600 if (_table != nullptr) {
601 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
602 delete _table;
603 _table = nullptr;
604 }
605 }
606
607 void AOTCodeCache::Config::record(uint cpu_features_offset) {
608 _flags = 0;
609 #ifdef ASSERT
610 _flags |= debugVM;
611 #endif
612 if (UseCompressedOops) {
613 _flags |= compressedOops;
614 }
615 if (UseCompressedClassPointers) {
616 _flags |= compressedClassPointers;
617 }
618 if (UseTLAB) {
619 _flags |= useTLAB;
620 }
621 if (JavaAssertions::systemClassDefault()) {
622 _flags |= systemClassAssertions;
623 }
624 if (JavaAssertions::userClassDefault()) {
625 _flags |= userClassAssertions;
626 }
627 if (EnableContended) {
628 _flags |= enableContendedPadding;
629 }
630 if (RestrictContended) {
631 _flags |= restrictContendedPadding;
632 }
633 if (PreserveFramePointer) {
634 _flags |= preserveFramePointer;
635 }
636 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
637 _compressedOopShift = CompressedOops::shift();
638 _compressedOopBase = CompressedOops::base();
639 _compressedKlassShift = CompressedKlassPointers::shift();
640 _compressedKlassBase = CompressedKlassPointers::base();
641 _contendedPaddingWidth = ContendedPaddingWidth;
642 _objectAlignment = ObjectAlignmentInBytes;
643 _gc = (uint)Universe::heap()->kind();
644 _cpu_features_offset = cpu_features_offset;
645 }
646
647 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
648 // First checks affect all cached AOT code
649 #ifdef ASSERT
650 if ((_flags & debugVM) == 0) {
651 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
652 return false;
653 }
654 #else
655 if ((_flags & debugVM) != 0) {
656 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
657 return false;
658 }
659 #endif
660
661 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
662 if (_codeCacheSize != codeCacheSize) {
663 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
664 return false;
665 }
666
667 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
668 if (aot_gc != Universe::heap()->kind()) {
669 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
670 return false;
671 }
672
673 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
674 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
675 return false;
676 }
677
678 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
679 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (EnableContended ? "false" : "true"), (EnableContended ? "true" : "false"));
680 return false;
681 }
682 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
683 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (RestrictContended ? "false" : "true"), (RestrictContended ? "true" : "false"));
684 return false;
685 }
686 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
687 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
688 return false;
689 }
690
691 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
692 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (PreserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
693 return false;
694 }
695
696 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
697 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (UseCompressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
698 return false;
699 }
700 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
701 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
702 return false;
703 }
704 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
705 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
706 return false;
707 }
708
709 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
710 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (UseCompressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
711 return false;
712 }
713 if (_compressedOopShift != (uint)CompressedOops::shift()) {
714 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
715 return false;
716 }
717 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
718 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
719 return false;
720 }
721
722 LogStreamHandle(Debug, aot, codecache, init) log;
723 if (log.is_enabled()) {
724 log.print_cr("Available CPU features: %s", VM_Version::features_string());
725 }
726
727 uint offset = _cpu_features_offset;
728 uint cpu_features_size = *(uint *)cache->addr(offset);
729 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
730 offset += sizeof(uint);
731
732 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
733 if (log.is_enabled()) {
734 ResourceMark rm; // required for stringStream::as_string()
735 stringStream ss;
736 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
737 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
738 }
739
740 if (AOTCodeCPUFeatureCheck && !VM_Version::supports_features(cached_cpu_features_buffer)) {
741 if (log.is_enabled()) {
742 ResourceMark rm; // required for stringStream::as_string()
743 stringStream ss;
744 VM_Version::get_missing_features_name(cached_cpu_features_buffer, ss);
745 log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
746 }
747 return false;
748 }
749
750 // Next affects only AOT nmethod
751 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
752 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (JavaAssertions::systemClassDefault() ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
753 FLAG_SET_ERGO(AOTCodeCaching, false);
754 }
755 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
756 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (JavaAssertions::userClassDefault() ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
757 FLAG_SET_ERGO(AOTCodeCaching, false);
758 }
759
760 return true;
761 }
762
763 bool AOTCodeCache::Header::verify(uint load_size) const {
764 if (_version != AOT_CODE_VERSION) {
765 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
766 return false;
767 }
768 if (load_size < _cache_size) {
769 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
770 return false;
771 }
772 return true;
773 }
774
775 volatile int AOTCodeCache::_nmethod_readers = 0;
776
777 AOTCodeCache* AOTCodeCache::open_for_use() {
778 if (AOTCodeCache::is_on_for_use()) {
779 return AOTCodeCache::cache();
780 }
781 return nullptr;
782 }
783
784 AOTCodeCache* AOTCodeCache::open_for_dump() {
785 if (AOTCodeCache::is_on_for_dump()) {
786 AOTCodeCache* cache = AOTCodeCache::cache();
787 cache->clear_lookup_failed(); // Reset bit
788 return cache;
789 }
790 return nullptr;
791 }
792
793 bool AOTCodeCache::is_address_in_aot_cache(address p) {
794 AOTCodeCache* cache = open_for_use();
795 if (cache == nullptr) {
796 return false;
797 }
798 if ((p >= (address)cache->cache_buffer()) &&
799 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
800 return true;
801 }
802 return false;
803 }
804
805 static void copy_bytes(const char* from, address to, uint size) {
806 assert((int)size > 0, "sanity");
807 memcpy(to, from, size);
808 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
809 }
810
811 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
812 _cache = cache;
813 _entry = entry;
814 _load_buffer = cache->cache_buffer();
815 _read_position = 0;
816 if (task != nullptr) {
817 _compile_id = task->compile_id();
818 _comp_level = task->comp_level();
819 _preload = task->preload();
820 } else {
821 _compile_id = 0;
822 _comp_level = 0;
823 _preload = false;
824 }
825 _lookup_failed = false;
826 }
827
828 void AOTCodeReader::set_read_position(uint pos) {
829 if (pos == _read_position) {
830 return;
831 }
832 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
833 _read_position = pos;
834 }
835
836 bool AOTCodeCache::set_write_position(uint pos) {
837 if (pos == _write_position) {
838 return true;
839 }
840 if (_store_size < _write_position) {
841 _store_size = _write_position; // Adjust during write
842 }
843 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
844 _write_position = pos;
887 if (nbytes == 0) {
888 return 0;
889 }
890 uint new_position = _write_position + nbytes;
891 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
892 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
893 nbytes, _write_position);
894 set_failed();
895 report_store_failure();
896 return 0;
897 }
898 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
899 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
900 _write_position += nbytes;
901 if (_store_size < _write_position) {
902 _store_size = _write_position;
903 }
904 return nbytes;
905 }
906
907 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
908 assert(is_using_code(), "AOT code caching should be enabled");
909 switch (comp_level) {
910 case CompLevel_simple:
911 if ((DisableAOTCode & (1 << 0)) != 0) {
912 return nullptr;
913 }
914 break;
915 case CompLevel_limited_profile:
916 if ((DisableAOTCode & (1 << 1)) != 0) {
917 return nullptr;
918 }
919 break;
920 case CompLevel_full_optimization:
921 if ((DisableAOTCode & (1 << 2)) != 0) {
922 return nullptr;
923 }
924 break;
925
926 default: return nullptr; // Level 1, 2, and 4 only
927 }
928 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
929 if (is_on() && _cache->cache_buffer() != nullptr) {
930 ResourceMark rm;
931 const char* target_name = method->name_and_sig_as_C_string();
932 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
933 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Code, hash, comp_level);
934 if (entry == nullptr) {
935 log_info(aot, codecache, nmethod)("Missing entry for '%s' (comp_level %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, hash);
936 #ifdef ASSERT
937 } else {
938 uint name_offset = entry->offset() + entry->name_offset();
939 uint name_size = entry->name_size(); // Includes '/0'
940 const char* name = _cache->cache_buffer() + name_offset;
941 if (strncmp(target_name, name, name_size) != 0) {
942 assert(false, "AOTCodeCache: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
943 }
944 #endif
945 }
946
947 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
948 if (directives->IgnorePrecompiledOption) {
949 LogStreamHandle(Info, aot, codecache, compilation) log;
950 if (log.is_enabled()) {
951 log.print("Ignore AOT code entry on level %d for ", comp_level);
952 method->print_value_on(&log);
953 }
954 return nullptr;
955 }
956
957 return entry;
958 }
959 return nullptr;
960 }
961
962 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
963 return (void*)(cache->add_entry());
964 }
965
966 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
967 if (entry->kind() == kind) {
968 assert(entry->id() == id, "sanity");
969 if (kind != AOTCodeEntry::Code || // addapters and stubs have only one version
970 // Look only for normal AOT code entry, preload code is handled separately
971 (!entry->not_entrant() && !entry->has_clinit_barriers() && (entry->comp_level() == comp_level))) {
972 return true; // Found
973 }
974 }
975 return false;
976 }
977
978 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
979 assert(_for_use, "sanity");
980 uint count = _load_header->entries_count();
981 if (_load_entries == nullptr) {
982 // Read it
983 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
984 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
985 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
986 }
987 // Binary search
988 int l = 0;
989 int h = count - 1;
990 while (l <= h) {
991 int mid = (l + h) >> 1;
992 int ix = mid * 2;
993 uint is = _search_entries[ix];
994 if (is == id) {
995 int index = _search_entries[ix + 1];
996 AOTCodeEntry* entry = &(_load_entries[index]);
997 if (check_entry(kind, id, comp_level, entry)) {
998 return entry; // Found
999 }
1000 // Leaner search around
1001 for (int i = mid - 1; i >= l; i--) { // search back
1002 ix = i * 2;
1003 is = _search_entries[ix];
1004 if (is != id) {
1005 break;
1006 }
1007 index = _search_entries[ix + 1];
1008 AOTCodeEntry* entry = &(_load_entries[index]);
1009 if (check_entry(kind, id, comp_level, entry)) {
1010 return entry; // Found
1011 }
1012 }
1013 for (int i = mid + 1; i <= h; i++) { // search forward
1014 ix = i * 2;
1015 is = _search_entries[ix];
1016 if (is != id) {
1017 break;
1018 }
1019 index = _search_entries[ix + 1];
1020 AOTCodeEntry* entry = &(_load_entries[index]);
1021 if (check_entry(kind, id, comp_level, entry)) {
1022 return entry; // Found
1023 }
1024 }
1025 break; // No match found
1026 } else if (is < id) {
1027 l = mid + 1;
1028 } else {
1029 h = mid - 1;
1030 }
1031 }
1032 return nullptr;
1033 }
1034
1035 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1036 assert(entry!= nullptr, "all entries should be read already");
1037 if (entry->not_entrant()) {
1038 return; // Someone invalidated it already
1039 }
1040 #ifdef ASSERT
1041 assert(_load_entries != nullptr, "sanity");
1042 {
1043 uint name_offset = entry->offset() + entry->name_offset();
1044 const char* name = _load_buffer + name_offset;;
1045 uint level = entry->comp_level();
1046 uint comp_id = entry->comp_id();
1047 bool for_preload = entry->for_preload();
1048 bool clinit_brs = entry->has_clinit_barriers();
1049 log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1050 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1051 }
1052 assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1053 bool found = false;
1054 uint count = _load_header->entries_count();
1055 uint i = 0;
1056 for(; i < count; i++) {
1057 if (entry == &(_load_entries[i])) {
1058 break;
1059 }
1060 }
1061 found = (i < count);
1062 assert(found, "entry should exist");
1063 #endif
1064 entry->set_not_entrant();
1065 uint name_offset = entry->offset() + entry->name_offset();
1066 const char* name = _load_buffer + name_offset;;
1067 uint level = entry->comp_level();
1068 uint comp_id = entry->comp_id();
1069 bool for_preload = entry->for_preload();
1070 bool clinit_brs = entry->has_clinit_barriers();
1071 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1072 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1073
1074 if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1075 // Invalidate preload code if normal AOT C2 code is invalidated,
1076 // most likely because some dependencies changed during run.
1077 // We can still use normal AOT code if preload code is
1078 // invalidated - normal AOT code has less restrictions.
1079 Method* method = entry->method();
1080 if (method != nullptr) {
1081 AOTCodeEntry* preload_entry = method->aot_code_entry();
1082 if (preload_entry != nullptr) {
1083 assert(preload_entry->for_preload(), "expecting only such entries here");
1084 invalidate_entry(preload_entry);
1085 }
1086 }
1087 }
1088 }
1089
1090 static int uint_cmp(const void *i, const void *j) {
1091 uint a = *(uint *)i;
1092 uint b = *(uint *)j;
1093 return a > b ? 1 : a < b ? -1 : 0;
1094 }
1095
1096 void AOTCodeCache::mark_method_pointer(AOTCodeEntry* entries, int count) {
1097 for (int i = 0; i < count; i++) {
1098 Method* m = entries[i].method();
1099 if (m != nullptr) {
1100 AOTCacheAccess::set_pointer(entries[i].method_addr(), m);
1101 }
1102 }
1103 }
1104
1105 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1106 uint* size_ptr = (uint *)buffer;
1107 *size_ptr = buffer_size;
1108 buffer += sizeof(uint);
1109
1110 VM_Version::store_cpu_features(buffer);
1111 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1112 buffer += buffer_size;
1113 buffer = align_up(buffer, DATA_ALIGNMENT);
1114 }
1115
1116 bool AOTCodeCache::finish_write() {
1117 if (!align_write()) {
1118 return false;
1119 }
1120 uint strings_offset = _write_position;
1121 int strings_count = store_strings();
1122 if (strings_count < 0) {
1123 return false;
1124 }
1125 if (!align_write()) {
1126 return false;
1127 }
1128 uint strings_size = _write_position - strings_offset;
1129
1130 uint entries_count = 0; // Number of entrant (useful) code entries
1131 uint entries_offset = _write_position;
1132
1133 uint code_count = _store_entries_cnt;
1134 if (code_count > 0) {
1135 _aot_code_directory = CachedCodeDirectory::create();
1136 assert(_aot_code_directory != nullptr, "Sanity check");
1137
1138 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1139 uint search_count = code_count * 2;
1140 uint search_size = search_count * sizeof(uint);
1141 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1142 uint preload_entries_cnt = 0;
1143 uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1144 uint preload_entries_size = code_count * sizeof(uint);
1145 // _write_position should include code and strings
1146 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1147 uint cpu_features_size = VM_Version::cpu_features_size();
1148 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1149 uint total_size = _write_position + header_size + code_alignment +
1150 search_size + preload_entries_size + entries_size +
1151 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1152 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1153
1154 // Allocate in AOT Cache buffer
1155 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1156 char* start = align_up(buffer, DATA_ALIGNMENT);
1157 char* current = start + header_size; // Skip header
1158
1159 uint cpu_features_offset = current - start;
1160 store_cpu_features(current, cpu_features_size);
1161 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1162 assert(current < start + total_size, "sanity check");
1163
1164 // Create ordered search table for entries [id, index];
1165 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1166
1167 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1168 uint adapters_count = 0;
1169 uint shared_blobs_count = 0;
1170 uint C1_blobs_count = 0;
1171 uint C2_blobs_count = 0;
1172 uint stubs_count = 0;
1173 uint nmethods_count = 0;
1174 uint max_size = 0;
1175 // AOTCodeEntry entries were allocated in reverse in store buffer.
1176 // Process them in reverse order to cache first code first.
1177 for (int i = code_count - 1; i >= 0; i--) {
1178 AOTCodeEntry* entry = &entries_address[i];
1179 if (entry->load_fail()) {
1180 continue;
1181 }
1182 if (entry->not_entrant()) {
1183 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1184 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1185 if (entry->for_preload()) {
1186 // Skip not entrant preload code:
1187 // we can't pre-load code which may have failing dependencies.
1188 continue;
1189 }
1190 entry->set_entrant(); // Reset
1191 } else if (entry->for_preload() && entry->method() != nullptr) {
1192 // record entrant first version code for pre-loading
1193 preload_entries[preload_entries_cnt++] = entries_count;
1194 }
1195 {
1196 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1197 if (size > max_size) {
1198 max_size = size;
1199 }
1200 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1201 entry->set_offset(current - start); // New offset
1202 current += size;
1203 uint n = write_bytes(entry, sizeof(AOTCodeEntry));
1204 if (n != sizeof(AOTCodeEntry)) {
1205 FREE_C_HEAP_ARRAY(uint, search);
1206 return false;
1207 }
1208 search[entries_count*2 + 0] = entry->id();
1209 search[entries_count*2 + 1] = entries_count;
1210 entries_count++;
1211 AOTCodeEntry::Kind kind = entry->kind();
1212 if (kind == AOTCodeEntry::Adapter) {
1213 adapters_count++;
1214 } else if (kind == AOTCodeEntry::SharedBlob) {
1215 shared_blobs_count++;
1216 } else if (kind == AOTCodeEntry::C1Blob) {
1217 C1_blobs_count++;
1218 } else if (kind == AOTCodeEntry::C2Blob) {
1219 C2_blobs_count++;
1220 } else if (kind == AOTCodeEntry::Stub) {
1221 stubs_count++;
1222 } else {
1223 assert(kind == AOTCodeEntry::Code, "sanity");
1224 nmethods_count++;
1225 }
1226 }
1227 }
1228
1229 if (entries_count == 0) {
1230 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
1231 FREE_C_HEAP_ARRAY(uint, search);
1232 return true; // Nothing to write
1233 }
1234 assert(entries_count <= code_count, "%d > %d", entries_count, code_count);
1235 // Write strings
1236 if (strings_count > 0) {
1237 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1238 strings_offset = (current - start); // New offset
1239 current += strings_size;
1240 }
1241 uint preload_entries_offset = (current - start);
1242 preload_entries_size = preload_entries_cnt * sizeof(uint);
1243 if (preload_entries_size > 0) {
1244 copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1245 current += preload_entries_size;
1246 log_info(aot, codecache, exit)("Wrote %d preload entries to AOT Code Cache", preload_entries_cnt);
1247 }
1248 if (preload_entries != nullptr) {
1249 FREE_C_HEAP_ARRAY(uint, preload_entries);
1250 }
1251
1252 uint search_table_offset = current - start;
1253 // Sort and store search table
1254 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1255 search_size = 2 * entries_count * sizeof(uint);
1256 copy_bytes((const char*)search, (address)current, search_size);
1257 FREE_C_HEAP_ARRAY(uint, search);
1258 current += search_size;
1259
1260 // Write entries
1261 current = align_up(current, DATA_ALIGNMENT);
1262 uint new_entries_offset = current - start;
1263 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1264 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1265 mark_method_pointer((AOTCodeEntry*)current, entries_count);
1266 current += entries_size;
1267
1268 log_stats_on_exit();
1269
1270 uint size = (current - start);
1271 assert(size <= total_size, "%d > %d", size , total_size);
1272 uint blobs_count = shared_blobs_count + C1_blobs_count + C2_blobs_count;
1273 assert(nmethods_count == (entries_count - (stubs_count + blobs_count + adapters_count)), "sanity");
1274 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
1275 log_debug(aot, codecache, exit)(" Shared Blobs: total=%u", shared_blobs_count);
1276 log_debug(aot, codecache, exit)(" C1 Blobs: total=%u", C1_blobs_count);
1277 log_debug(aot, codecache, exit)(" C2 Blobs: total=%u", C2_blobs_count);
1278 log_debug(aot, codecache, exit)(" Stubs: total=%u", stubs_count);
1279 log_debug(aot, codecache, exit)(" Nmethods: total=%u", nmethods_count);
1280 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1281
1282 // Finalize header
1283 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1284 header->init(size, (uint)strings_count, strings_offset,
1285 entries_count, search_table_offset, new_entries_offset,
1286 preload_entries_cnt, preload_entries_offset,
1287 adapters_count, shared_blobs_count,
1288 C1_blobs_count, C2_blobs_count,
1289 stubs_count, cpu_features_offset);
1290
1291 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1292
1293 _aot_code_directory->set_aot_code_data(size, start);
1294 }
1295 return true;
1296 }
1297
1298 //------------------Store/Load AOT code ----------------------
1299
1300 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1301 AOTCodeCache* cache = open_for_dump();
1302 if (cache == nullptr) {
1303 return false;
1304 }
1305 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1306
1307 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1308 return false;
1309 }
1310 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1311 return false;
1312 }
1313 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1314
1315 #ifdef ASSERT
1316 LogStreamHandle(Trace, aot, codecache, stubs) log;
1317 if (log.is_enabled()) {
1318 FlagSetting fs(PrintRelocations, true);
1319 blob.print_on(&log);
1320 }
1321 #endif
1322 // we need to take a lock to prevent race between compiler threads generating AOT code
1323 // and the main thread generating adapter
1324 MutexLocker ml(Compile_lock);
1325 if (!is_on()) {
1326 return false; // AOT code cache was already dumped and closed.
1327 }
1328 if (!cache->align_write()) {
1329 return false;
1330 }
1331 uint entry_position = cache->_write_position;
1332
1333 // Write name
1334 uint name_offset = cache->_write_position - entry_position;
1335 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1336 uint n = cache->write_bytes(name, name_size);
1337 if (n != name_size) {
1338 return false;
1339 }
1340
1341 // Write CodeBlob
1342 if (!cache->align_write()) {
1343 return false;
1344 }
1345 uint blob_offset = cache->_write_position - entry_position;
1346 address archive_buffer = cache->reserve_bytes(blob.size());
1347 if (archive_buffer == nullptr) {
1348 return false;
1349 }
1350 CodeBlob::archive_blob(&blob, archive_buffer);
1351
1352 uint reloc_data_size = blob.relocation_size();
1353 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1354 if (n != reloc_data_size) {
1355 return false;
1356 }
1357
1358 bool has_oop_maps = false;
1359 if (blob.oop_maps() != nullptr) {
1360 if (!cache->write_oop_map_set(blob)) {
1361 return false;
1362 }
1363 has_oop_maps = true;
1364 }
1365
1366 #ifndef PRODUCT
1367 // Write asm remarks
1368 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1369 return false;
1370 }
1371 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1372 return false;
1373 }
1374 #endif /* PRODUCT */
1375
1376 if (!cache->write_relocations(blob)) {
1377 if (!cache->failed()) {
1378 // We may miss an address in AOT table - skip this code blob.
1379 cache->set_write_position(entry_position);
1380 }
1381 return false;
1382 }
1383
1384 // Write entries offsets
1385 n = cache->write_bytes(&entry_offset_count, sizeof(int));
1386 if (n != sizeof(int)) {
1387 return false;
1388 }
1389 for (int i = 0; i < entry_offset_count; i++) {
1390 uint32_t off = (uint32_t)entry_offsets[i];
1391 n = cache->write_bytes(&off, sizeof(uint32_t));
1392 if (n != sizeof(uint32_t)) {
1393 return false;
1394 }
1395 }
1396 uint entry_size = cache->_write_position - entry_position;
1397 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1398 entry_position, entry_size, name_offset, name_size,
1399 blob_offset, has_oop_maps, blob.content_begin());
1400 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1403
1404 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1405 AOTCodeCache* cache = open_for_use();
1406 if (cache == nullptr) {
1407 return nullptr;
1408 }
1409 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1410
1411 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1412 return nullptr;
1413 }
1414 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1415 return nullptr;
1416 }
1417 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1418
1419 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1420 if (entry == nullptr) {
1421 return nullptr;
1422 }
1423 AOTCodeReader reader(cache, entry, nullptr);
1424 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
1425
1426 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1427 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1428 return blob;
1429 }
1430
1431 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
1432 uint entry_position = _entry->offset();
1433
1434 // Read name
1435 uint name_offset = entry_position + _entry->name_offset();
1436 uint name_size = _entry->name_size(); // Includes '/0'
1437 const char* stored_name = addr(name_offset);
1438
1439 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1440 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1441 stored_name, name);
1442 set_lookup_failed(); // Skip this blob
1443 return nullptr;
1444 }
1445
1446 // Read archived code blob
1447 uint offset = entry_position + _entry->code_offset();
1448 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1449 offset += archived_blob->size();
1450
1451 address reloc_data = (address)addr(offset);
1452 offset += archived_blob->relocation_size();
1453 set_read_position(offset);
1454
1455 ImmutableOopMapSet* oop_maps = nullptr;
1456 if (_entry->has_oop_maps()) {
1457 oop_maps = read_oop_map_set();
1458 }
1459
1460 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1461 stored_name,
1462 reloc_data,
1463 oop_maps
1464 );
1465 if (code_blob == nullptr) { // no space left in CodeCache
1466 return nullptr;
1467 }
1468
1469 #ifndef PRODUCT
1470 code_blob->asm_remarks().init();
1471 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1472 code_blob->dbg_strings().init();
1473 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1474 #endif // PRODUCT
1475
1476 fix_relocations(code_blob);
1477
1478 // Read entries offsets
1479 offset = read_position();
1480 int stored_count = *(int*)addr(offset);
1481 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count);
1482 offset += sizeof(int);
1483 set_read_position(offset);
1484 for (int i = 0; i < stored_count; i++) {
1485 uint32_t off = *(uint32_t*)addr(offset);
1486 offset += sizeof(uint32_t);
1487 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
1488 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
1489 stored_name, entry_name, _entry->id(), off);
1490 entry_offsets[i] = off;
1491 }
1492
1493 #ifdef ASSERT
1494 LogStreamHandle(Trace, aot, codecache, stubs) log;
1495 if (log.is_enabled()) {
1496 FlagSetting fs(PrintRelocations, true);
1497 code_blob->print_on(&log);
1498 }
1499 #endif
1500 return code_blob;
1501 }
1502
1503 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1504 if (!is_dumping_stub()) {
1505 return false;
1506 }
1507 AOTCodeCache* cache = open_for_dump();
1508 if (cache == nullptr) {
1509 return false;
1510 }
1511 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1512 if (!cache->align_write()) {
1513 return false;
1514 }
1515 #ifdef ASSERT
1516 CodeSection* cs = cgen->assembler()->code_section();
1517 if (cs->has_locs()) {
1518 uint reloc_count = cs->locs_count();
1519 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1520 // Collect additional data
1521 RelocIterator iter(cs);
1522 while (iter.next()) {
1523 switch (iter.type()) {
1524 case relocInfo::none:
1525 break;
1526 default: {
1527 iter.print_current_on(tty);
1528 fatal("stub's relocation %d unimplemented", (int)iter.type());
1529 break;
1530 }
1531 }
1532 }
1533 }
1534 #endif
1535 uint entry_position = cache->_write_position;
1536
1537 // Write code
1538 uint code_offset = 0;
1539 uint code_size = cgen->assembler()->pc() - start;
1540 uint n = cache->write_bytes(start, code_size);
1541 if (n != code_size) {
1542 return false;
1543 }
1544 // Write name
1545 uint name_offset = cache->_write_position - entry_position;
1546 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1547 n = cache->write_bytes(name, name_size);
1548 if (n != name_size) {
1549 return false;
1550 }
1551 uint entry_size = cache->_write_position - entry_position;
1552 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1553 code_offset, code_size,
1554 AOTCodeEntry::Stub, (uint32_t)id);
1555 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1556 return true;
1557 }
1558
1559 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1560 if (!is_using_stub()) {
1561 return false;
1562 }
1563 assert(start == cgen->assembler()->pc(), "wrong buffer");
1564 AOTCodeCache* cache = open_for_use();
1565 if (cache == nullptr) {
1566 return false;
1567 }
1568 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1569 if (entry == nullptr) {
1570 return false;
1571 }
1572 uint entry_position = entry->offset();
1573 // Read name
1574 uint name_offset = entry->name_offset() + entry_position;
1575 uint name_size = entry->name_size(); // Includes '/0'
1576 const char* saved_name = cache->addr(name_offset);
1577 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1578 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1579 cache->set_failed();
1580 report_load_failure();
1581 return false;
1582 }
1583 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1584 // Read code
1585 uint code_offset = entry->code_offset() + entry_position;
1586 uint code_size = entry->code_size();
1587 copy_bytes(cache->addr(code_offset), start, code_size);
1588 cgen->assembler()->code_section()->set_end(start + code_size);
1589 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1590 return true;
1591 }
1592
1593 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1594 if (!is_dumping_code()) {
1595 return nullptr;
1596 }
1597 assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1598 AOTCodeCache* cache = open_for_dump();
1599 precond(cache != nullptr);
1600 precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1601 if (!compiler->is_c1() && !compiler->is_c2()) {
1602 // Only c1 and c2 compilers
1603 return nullptr;
1604 }
1605 int comp_level = nm->comp_level();
1606 if (comp_level == CompLevel_full_profile) {
1607 // Do not cache C1 compiles with full profile i.e. tier3
1608 return nullptr;
1609 }
1610 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1611
1612 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1613 AOTCodeEntry* entry = nullptr;
1614 entry = cache->write_nmethod(nm, for_preload);
1615 if (entry == nullptr) {
1616 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1617 }
1618 return entry;
1619 }
1620
1621 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1622 AOTCodeCache* cache = open_for_dump();
1623 assert(cache != nullptr, "sanity check");
1624 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1625 uint comp_id = nm->compile_id();
1626 uint comp_level = nm->comp_level();
1627 Method* method = nm->method();
1628 if (!AOTCacheAccess::can_generate_aot_code(method)) {
1629 ResourceMark rm;
1630 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1631 assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1632 return nullptr;
1633 }
1634 bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)method);
1635 InstanceKlass* holder = method->method_holder();
1636 bool klass_in_cds = holder->is_shared() && !holder->defined_by_other_loaders();
1637 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1638 if (!builtin_loader) {
1639 ResourceMark rm;
1640 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1641 assert(builtin_loader, "sanity");
1642 return nullptr;
1643 }
1644 if (for_preload && !(method_in_cds && klass_in_cds)) {
1645 ResourceMark rm;
1646 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
1647 assert(!for_preload || (method_in_cds && klass_in_cds), "sanity");
1648 return nullptr;
1649 }
1650 assert(!for_preload || (method_in_cds && klass_in_cds), "sanity");
1651 _for_preload = for_preload;
1652 _has_clinit_barriers = nm->has_clinit_barriers();
1653
1654 if (!align_write()) {
1655 return nullptr;
1656 }
1657
1658 uint entry_position = _write_position;
1659
1660 // Write name
1661 uint name_offset = 0;
1662 uint name_size = 0;
1663 uint hash = 0;
1664 uint n;
1665 {
1666 ResourceMark rm;
1667 const char* name = method->name_and_sig_as_C_string();
1668 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1669 comp_id, (int)comp_level, name, comp_level,
1670 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1671
1672 LogStreamHandle(Info, aot, codecache, loader) log;
1673 if (log.is_enabled()) {
1674 oop loader = holder->class_loader();
1675 oop domain = holder->protection_domain();
1676 log.print("Holder: ");
1677 holder->print_value_on(&log);
1678 log.print(" loader: ");
1679 if (loader == nullptr) {
1680 log.print("nullptr");
1681 } else {
1682 loader->print_value_on(&log);
1683 }
1684 log.print(" domain: ");
1685 if (domain == nullptr) {
1686 log.print("nullptr");
1687 } else {
1688 domain->print_value_on(&log);
1689 }
1690 log.cr();
1691 }
1692 name_offset = _write_position - entry_position;
1693 name_size = (uint)strlen(name) + 1; // Includes '/0'
1694 n = write_bytes(name, name_size);
1695 if (n != name_size) {
1696 return nullptr;
1697 }
1698 hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
1699 }
1700
1701 // Write CodeBlob
1702 if (!cache->align_write()) {
1703 return nullptr;
1704 }
1705 uint blob_offset = cache->_write_position - entry_position;
1706 address archive_buffer = cache->reserve_bytes(nm->size());
1707 if (archive_buffer == nullptr) {
1708 return nullptr;
1709 }
1710 CodeBlob::archive_blob(nm, archive_buffer);
1711
1712 uint reloc_data_size = nm->relocation_size();
1713 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1714 if (n != reloc_data_size) {
1715 return nullptr;
1716 }
1717
1718 // Write oops and metadata present in the nmethod's data region
1719 if (!write_oops(nm)) {
1720 if (lookup_failed() && !failed()) {
1721 // Skip this method and reposition file
1722 set_write_position(entry_position);
1723 }
1724 return nullptr;
1725 }
1726 if (!write_metadata(nm)) {
1727 if (lookup_failed() && !failed()) {
1728 // Skip this method and reposition file
1729 set_write_position(entry_position);
1730 }
1731 return nullptr;
1732 }
1733
1734 bool has_oop_maps = false;
1735 if (nm->oop_maps() != nullptr) {
1736 if (!cache->write_oop_map_set(*nm)) {
1737 return nullptr;
1738 }
1739 has_oop_maps = true;
1740 }
1741
1742 uint immutable_data_size = nm->immutable_data_size();
1743 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1744 if (n != immutable_data_size) {
1745 return nullptr;
1746 }
1747
1748 JavaThread* thread = JavaThread::current();
1749 HandleMark hm(thread);
1750 GrowableArray<Handle> oop_list;
1751 GrowableArray<Metadata*> metadata_list;
1752
1753 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1754 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1755 if (lookup_failed() && !failed()) {
1756 // Skip this method and reposition file
1757 set_write_position(entry_position);
1758 }
1759 return nullptr;
1760 }
1761
1762 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1763 return nullptr;
1764 }
1765
1766 #ifndef PRODUCT
1767 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1768 return nullptr;
1769 }
1770 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1771 return nullptr;
1772 }
1773 #endif /* PRODUCT */
1774
1775 uint entry_size = _write_position - entry_position;
1776 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Code, hash,
1777 entry_position, entry_size,
1778 name_offset, name_size,
1779 blob_offset, has_oop_maps,
1780 nm->content_begin(), comp_level, comp_id,
1781 nm->has_clinit_barriers(), for_preload);
1782 if (method_in_cds) {
1783 entry->set_method(method);
1784 }
1785 #ifdef ASSERT
1786 if (nm->has_clinit_barriers() || for_preload) {
1787 assert(for_preload, "sanity");
1788 assert(entry->method() != nullptr, "sanity");
1789 }
1790 #endif
1791 {
1792 ResourceMark rm;
1793 const char* name = nm->method()->name_and_sig_as_C_string();
1794 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1795 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1796 }
1797 if (VerifyAOTCode) {
1798 return nullptr;
1799 }
1800 return entry;
1801 }
1802
1803 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1804 if (!is_using_code()) {
1805 return false;
1806 }
1807 AOTCodeCache* cache = open_for_use();
1808 if (cache == nullptr) {
1809 return false;
1810 }
1811 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1812 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1813 CompileTask* task = env->task();
1814 task->mark_aot_load_start(os::elapsed_counter());
1815 AOTCodeEntry* entry = task->aot_code_entry();
1816 bool preload = task->preload();
1817 assert(entry != nullptr, "sanity");
1818 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1819 VM_ENTRY_MARK;
1820 ResourceMark rm;
1821 methodHandle method(THREAD, target->get_Method());
1822 const char* target_name = method->name_and_sig_as_C_string();
1823 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
1824 bool clinit_brs = entry->has_clinit_barriers();
1825 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (hash: " UINT32_FORMAT_X_0 "%s)",
1826 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1827 target_name, hash, (clinit_brs ? ", has clinit barriers" : ""));
1828 }
1829 ReadingMark rdmk;
1830 if (rdmk.failed()) {
1831 // Cache is closed, cannot touch anything.
1832 return false;
1833 }
1834
1835 AOTCodeReader reader(cache, entry, task);
1836 bool success = reader.compile_nmethod(env, target, compiler);
1837 if (success) {
1838 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1839 } else {
1840 entry->set_load_fail();
1841 entry->set_not_entrant();
1842 }
1843 task->mark_aot_load_finish(os::elapsed_counter());
1844 return success;
1845 }
1846
1847 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1848 CompileTask* task = env->task();
1849 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1850 nmethod* nm = nullptr;
1851
1852 uint entry_position = aot_code_entry->offset();
1853 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1854 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1855 set_read_position(archived_nm_offset + archived_nm->size());
1856
1857 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1858 env->set_oop_recorder(oop_recorder);
1859
1860 uint offset;
1861
1862 offset = read_position();
1863 address reloc_data = (address)addr(offset);
1864 offset += archived_nm->relocation_size();
1865 set_read_position(offset);
1866
1867 // Read oops and metadata
1868 VM_ENTRY_MARK
1869 GrowableArray<Handle> oop_list;
1870 GrowableArray<Metadata*> metadata_list;
1871
1872 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1873 return false;
1874 }
1875
1876 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1877
1878 offset = read_position();
1879 address immutable_data = (address)addr(offset);
1880 offset += archived_nm->immutable_data_size();
1881 set_read_position(offset);
1882
1883 GrowableArray<Handle> reloc_immediate_oop_list;
1884 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1885 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1886 return false;
1887 }
1888
1889 // Read Dependencies (compressed already)
1890 Dependencies* dependencies = new Dependencies(env);
1891 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1892 env->set_dependencies(dependencies);
1893
1894 const char* name = addr(entry_position + aot_code_entry->name_offset());
1895
1896 if (VerifyAOTCode) {
1897 return false;
1898 }
1899
1900 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1901 nm = env->register_aot_method(THREAD,
1902 target,
1903 compiler,
1904 archived_nm,
1905 reloc_data,
1906 oop_list,
1907 metadata_list,
1908 oopmaps,
1909 immutable_data,
1910 reloc_immediate_oop_list,
1911 reloc_immediate_metadata_list,
1912 this);
1913 bool success = task->is_success();
1914 if (success) {
1915 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1916 #ifdef ASSERT
1917 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1918 if (log.is_enabled()) {
1919 FlagSetting fs(PrintRelocations, true);
1920 nm->print_on(&log);
1921 nm->decode2(&log);
1922 }
1923 #endif
1924 }
1925
1926 return success;
1927 }
1928
1929 bool skip_preload(methodHandle mh) {
1930 if (!mh->method_holder()->is_loaded()) {
1931 return true;
1932 }
1933 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1934 if (directives->DontPreloadOption) {
1935 LogStreamHandle(Info, aot, codecache, init) log;
1936 if (log.is_enabled()) {
1937 log.print("Exclude preloading code for ");
1938 mh->print_value_on(&log);
1939 }
1940 return true;
1941 }
1942 return false;
1943 }
1944
1945 void AOTCodeCache::preload_code(JavaThread* thread) {
1946 if (!is_using_code()) {
1947 return;
1948 }
1949 if ((DisableAOTCode & (1 << 3)) != 0) {
1950 return; // no preloaded code (level 5);
1951 }
1952 _cache->preload_aot_code(thread);
1953 }
1954
1955 void AOTCodeCache::preload_aot_code(TRAPS) {
1956 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1957 // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
1958 // level we want (that is CompLevel_full_optimization).
1959 return;
1960 }
1961 assert(_for_use, "sanity");
1962 uint count = _load_header->entries_count();
1963 if (_load_entries == nullptr) {
1964 // Read it
1965 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
1966 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
1967 log_info(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1968 }
1969 uint preload_entries_count = _load_header->preload_entries_count();
1970 if (preload_entries_count > 0) {
1971 uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
1972 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
1973 uint count = MIN2(preload_entries_count, AOTCodeLoadStop);
1974 for (uint i = AOTCodeLoadStart; i < count; i++) {
1975 uint index = entries_index[i];
1976 AOTCodeEntry* entry = &(_load_entries[index]);
1977 if (entry->not_entrant()) {
1978 continue;
1979 }
1980 methodHandle mh(THREAD, entry->method());
1981 assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
1982 if (skip_preload(mh)) {
1983 continue; // Exclude preloading for this method
1984 }
1985 assert(mh->method_holder()->is_loaded(), "");
1986 if (!mh->method_holder()->is_linked()) {
1987 assert(!HAS_PENDING_EXCEPTION, "");
1988 mh->method_holder()->link_class(THREAD);
1989 if (HAS_PENDING_EXCEPTION) {
1990 LogStreamHandle(Info, aot, codecache) log;
1991 if (log.is_enabled()) {
1992 ResourceMark rm;
1993 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
1994 THREAD->pending_exception()->print_value_on(&log);
1995 if (log_is_enabled(Debug, aot, codecache)) {
1996 THREAD->pending_exception()->print_on(&log);
1997 }
1998 }
1999 CLEAR_PENDING_EXCEPTION;
2000 }
2001 }
2002 if (mh->aot_code_entry() != nullptr) {
2003 // Second C2 compilation of the same method could happen for
2004 // different reasons without marking first entry as not entrant.
2005 continue; // Keep old entry to avoid issues
2006 }
2007 mh->set_aot_code_entry(entry);
2008 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
2009 }
2010 }
2011 }
2012
2013 // ------------ process code and data --------------
2014
2015 // Can't use -1. It is valid value for jump to iteself destination
2016 // used by static call stub: see NativeJump::jump_destination().
2017 #define BAD_ADDRESS_ID -2
2018
2019 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2020 GrowableArray<uint> reloc_data;
2021 RelocIterator iter(&code_blob);
2022 LogStreamHandle(Trace, aot, codecache, reloc) log;
2023 while (iter.next()) {
2024 int idx = reloc_data.append(0); // default value
2025 switch (iter.type()) {
2026 case relocInfo::none:
2027 break;
2028 case relocInfo::oop_type: {
2029 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2030 if (r->oop_is_immediate()) {
2031 assert(oop_list != nullptr, "sanity check");
2032 // store index of oop in the reloc immediate oop list
2033 Handle h(JavaThread::current(), r->oop_value());
2034 int oop_idx = oop_list->find(h);
2035 assert(oop_idx != -1, "sanity check");
2036 reloc_data.at_put(idx, (uint)oop_idx);
2037 }
2038 break;
2039 }
2040 case relocInfo::metadata_type: {
2041 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2042 if (r->metadata_is_immediate()) {
2043 assert(metadata_list != nullptr, "sanity check");
2044 // store index of metadata in the reloc immediate metadata list
2045 int metadata_idx = metadata_list->find(r->metadata_value());
2046 assert(metadata_idx != -1, "sanity check");
2047 reloc_data.at_put(idx, (uint)metadata_idx);
2048 }
2049 break;
2050 }
2051 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2052 case relocInfo::opt_virtual_call_type:
2053 case relocInfo::static_call_type: {
2054 CallRelocation* r = (CallRelocation*)iter.reloc();
2055 address dest = r->destination();
2056 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2057 dest = (address)-1; // do nothing in this case when loading this relocation
2058 }
2059 int id = _table->id_for_address(dest, iter, &code_blob);
2060 if (id == BAD_ADDRESS_ID) {
2061 return false;
2062 }
2063 reloc_data.at_put(idx, id);
2064 break;
2065 }
2066 case relocInfo::trampoline_stub_type: {
2067 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2068 int id = _table->id_for_address(dest, iter, &code_blob);
2069 if (id == BAD_ADDRESS_ID) {
2070 return false;
2071 }
2072 reloc_data.at_put(idx, id);
2073 break;
2074 }
2075 case relocInfo::static_stub_type:
2076 break;
2077 case relocInfo::runtime_call_type: {
2078 // Record offset of runtime destination
2079 CallRelocation* r = (CallRelocation*)iter.reloc();
2080 address dest = r->destination();
2081 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2082 dest = (address)-1; // do nothing in this case when loading this relocation
2083 }
2084 int id = _table->id_for_address(dest, iter, &code_blob);
2085 if (id == BAD_ADDRESS_ID) {
2086 return false;
2087 }
2088 reloc_data.at_put(idx, id);
2089 break;
2090 }
2091 case relocInfo::runtime_call_w_cp_type:
2092 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2093 return false;
2094 case relocInfo::external_word_type: {
2095 // Record offset of runtime target
2096 address target = ((external_word_Relocation*)iter.reloc())->target();
2097 int id = _table->id_for_address(target, iter, &code_blob);
2098 if (id == BAD_ADDRESS_ID) {
2099 return false;
2100 }
2101 reloc_data.at_put(idx, id);
2102 break;
2103 }
2104 case relocInfo::internal_word_type:
2105 break;
2106 case relocInfo::section_word_type:
2107 break;
2108 case relocInfo::poll_type:
2109 break;
2110 case relocInfo::poll_return_type:
2111 break;
2112 case relocInfo::post_call_nop_type:
2113 break;
2114 case relocInfo::entry_guard_type:
2115 break;
2116 default:
2117 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2118 return false;
2119 break;
2120 }
2121 if (log.is_enabled()) {
2122 iter.print_current_on(&log);
2123 }
2124 }
2125
2126 // Write additional relocation data: uint per relocation
2127 // Write the count first
2128 int count = reloc_data.length();
2129 write_bytes(&count, sizeof(int));
2130 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2131 iter != reloc_data.end(); ++iter) {
2132 uint value = *iter;
2133 int n = write_bytes(&value, sizeof(uint));
2134 if (n != sizeof(uint)) {
2135 return false;
2136 }
2137 }
2138 return true;
2139 }
2140
2141 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2142 LogStreamHandle(Trace, aot, reloc) log;
2143 uint offset = read_position();
2144 int count = *(int*)addr(offset);
2145 offset += sizeof(int);
2146 if (log.is_enabled()) {
2147 log.print_cr("======== extra relocations count=%d", count);
2148 }
2149 uint* reloc_data = (uint*)addr(offset);
2150 offset += (count * sizeof(uint));
2151 set_read_position(offset);
2152
2153 RelocIterator iter(code_blob);
2154 int j = 0;
2155 while (iter.next()) {
2156 switch (iter.type()) {
2157 case relocInfo::none:
2158 break;
2159 case relocInfo::oop_type: {
2160 assert(code_blob->is_nmethod(), "sanity check");
2161 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2162 if (r->oop_is_immediate()) {
2163 assert(oop_list != nullptr, "sanity check");
2164 Handle h = oop_list->at(reloc_data[j]);
2165 r->set_value(cast_from_oop<address>(h()));
2166 } else {
2167 r->fix_oop_relocation();
2168 }
2169 break;
2170 }
2171 case relocInfo::metadata_type: {
2172 assert(code_blob->is_nmethod(), "sanity check");
2173 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2174 Metadata* m;
2175 if (r->metadata_is_immediate()) {
2176 assert(metadata_list != nullptr, "sanity check");
2177 m = metadata_list->at(reloc_data[j]);
2178 } else {
2179 // Get already updated value from nmethod.
2180 int index = r->metadata_index();
2181 m = code_blob->as_nmethod()->metadata_at(index);
2182 }
2183 r->set_value((address)m);
2184 break;
2185 }
2186 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2187 case relocInfo::opt_virtual_call_type:
2188 case relocInfo::static_call_type: {
2189 address dest = _cache->address_for_id(reloc_data[j]);
2190 if (dest != (address)-1) {
2191 ((CallRelocation*)iter.reloc())->set_destination(dest);
2192 }
2193 break;
2194 }
2195 case relocInfo::trampoline_stub_type: {
2196 address dest = _cache->address_for_id(reloc_data[j]);
2197 if (dest != (address)-1) {
2198 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2199 }
2200 break;
2201 }
2202 case relocInfo::static_stub_type:
2203 break;
2204 case relocInfo::runtime_call_type: {
2205 address dest = _cache->address_for_id(reloc_data[j]);
2206 if (dest != (address)-1) {
2207 ((CallRelocation*)iter.reloc())->set_destination(dest);
2208 }
2209 break;
2210 }
2211 case relocInfo::runtime_call_w_cp_type:
2212 // this relocation should not be in cache (see write_relocations)
2213 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2214 break;
2215 case relocInfo::external_word_type: {
2216 address target = _cache->address_for_id(reloc_data[j]);
2217 // Add external address to global table
2218 int index = ExternalsRecorder::find_index(target);
2219 // Update index in relocation
2220 Relocation::add_jint(iter.data(), index);
2221 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2222 assert(reloc->target() == target, "sanity");
2223 reloc->set_value(target); // Patch address in the code
2224 break;
2225 }
2226 case relocInfo::internal_word_type: {
2227 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2228 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2229 break;
2230 }
2231 case relocInfo::section_word_type: {
2232 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2233 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2234 break;
2235 }
2236 case relocInfo::poll_type:
2237 break;
2238 case relocInfo::poll_return_type:
2239 break;
2240 case relocInfo::post_call_nop_type:
2241 break;
2242 case relocInfo::entry_guard_type:
2243 break;
2244 default:
2245 assert(false,"relocation %d unimplemented", (int)iter.type());
2246 break;
2247 }
2248 if (log.is_enabled()) {
2249 iter.print_current_on(&log);
2250 }
2251 j++;
2252 }
2253 assert(j == count, "sanity");
2254 }
2255
2256 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2257 int count = oop_list.length();
2258 if (!write_bytes(&count, sizeof(int))) {
2259 return false;
2260 }
2261 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2262 iter != oop_list.end(); ++iter) {
2263 Handle h = *iter;
2264 if (!write_oop(h())) {
2265 return false;
2266 }
2267 }
2268
2269 count = metadata_list.length();
2270 if (!write_bytes(&count, sizeof(int))) {
2271 return false;
2272 }
2273 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2274 iter != metadata_list.end(); ++iter) {
2275 Metadata* m = *iter;
2276 if (!write_metadata(m)) {
2277 return false;
2278 }
2279 }
2280 return true;
2281 }
2282
2283 bool AOTCodeCache::write_metadata(nmethod* nm) {
2284 int count = nm->metadata_count()-1;
2285 if (!write_bytes(&count, sizeof(int))) {
2286 return false;
2287 }
2288 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2289 if (!write_metadata(*p)) {
2290 return false;
2291 }
2292 }
2293 return true;
2294 }
2295
2296 bool AOTCodeCache::write_metadata(Metadata* m) {
2297 uint n = 0;
2298 if (m == nullptr) {
2299 DataKind kind = DataKind::Null;
2300 n = write_bytes(&kind, sizeof(int));
2301 if (n != sizeof(int)) {
2302 return false;
2303 }
2304 } else if (m == (Metadata*)Universe::non_oop_word()) {
2305 DataKind kind = DataKind::No_Data;
2306 n = write_bytes(&kind, sizeof(int));
2307 if (n != sizeof(int)) {
2308 return false;
2309 }
2310 } else if (m->is_klass()) {
2311 if (!write_klass((Klass*)m)) {
2312 return false;
2313 }
2314 } else if (m->is_method()) {
2315 if (!write_method((Method*)m)) {
2316 return false;
2317 }
2318 } else if (m->is_methodCounters()) {
2319 DataKind kind = DataKind::MethodCnts;
2320 n = write_bytes(&kind, sizeof(int));
2321 if (n != sizeof(int)) {
2322 return false;
2323 }
2324 if (!write_method(((MethodCounters*)m)->method())) {
2325 return false;
2326 }
2327 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2328 } else { // Not supported
2329 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2330 return false;
2331 }
2332 return true;
2333 }
2334
2335 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2336 uint code_offset = read_position();
2337 Metadata* m = nullptr;
2338 DataKind kind = *(DataKind*)addr(code_offset);
2339 code_offset += sizeof(DataKind);
2340 set_read_position(code_offset);
2341 if (kind == DataKind::Null) {
2342 m = (Metadata*)nullptr;
2343 } else if (kind == DataKind::No_Data) {
2344 m = (Metadata*)Universe::non_oop_word();
2345 } else if (kind == DataKind::Klass) {
2346 m = (Metadata*)read_klass(comp_method);
2347 } else if (kind == DataKind::Method) {
2348 m = (Metadata*)read_method(comp_method);
2349 } else if (kind == DataKind::MethodCnts) {
2350 kind = *(DataKind*)addr(code_offset);
2351 code_offset += sizeof(DataKind);
2352 set_read_position(code_offset);
2353 m = (Metadata*)read_method(comp_method);
2354 if (m != nullptr) {
2355 Method* method = (Method*)m;
2356 m = method->get_method_counters(Thread::current());
2357 if (m == nullptr) {
2358 set_lookup_failed();
2359 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2360 } else {
2361 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2362 }
2363 }
2364 } else {
2365 set_lookup_failed();
2366 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2367 }
2368 return m;
2369 }
2370
2371 bool AOTCodeCache::write_method(Method* method) {
2372 ResourceMark rm; // To method's name printing
2373 if (AOTCacheAccess::can_generate_aot_code(method)) {
2374 DataKind kind = DataKind::Method;
2375 uint n = write_bytes(&kind, sizeof(int));
2376 if (n != sizeof(int)) {
2377 return false;
2378 }
2379 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2380 n = write_bytes(&method_offset, sizeof(uint));
2381 if (n != sizeof(uint)) {
2382 return false;
2383 }
2384 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2385 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2386 return true;
2387 }
2388 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2389 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2390 set_lookup_failed();
2391 return false;
2392 }
2393
2394 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2395 uint code_offset = read_position();
2396 uint method_offset = *(uint*)addr(code_offset);
2397 code_offset += sizeof(uint);
2398 set_read_position(code_offset);
2399 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2400 if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
2401 // Something changed in CDS
2402 set_lookup_failed();
2403 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2404 return nullptr;
2405 }
2406 assert(m->is_method(), "sanity");
2407 ResourceMark rm;
2408 Klass* k = m->method_holder();
2409 if (!k->is_instance_klass()) {
2410 set_lookup_failed();
2411 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2412 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2413 return nullptr;
2414 } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2415 set_lookup_failed();
2416 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2417 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2418 return nullptr;
2419 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2420 set_lookup_failed();
2421 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2422 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2423 return nullptr;
2424 } else if (!InstanceKlass::cast(k)->is_linked()) {
2425 set_lookup_failed();
2426 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2427 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2428 return nullptr;
2429 }
2430 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2431 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2432 return m;
2433 }
2434
2435 bool AOTCodeCache::write_klass(Klass* klass) {
2436 uint array_dim = 0;
2437 if (klass->is_objArray_klass()) {
2438 array_dim = ObjArrayKlass::cast(klass)->dimension();
2439 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2440 }
2441 uint init_state = 0;
2442 bool can_write = true;
2443 if (klass->is_instance_klass()) {
2444 InstanceKlass* ik = InstanceKlass::cast(klass);
2445 init_state = (ik->is_initialized() ? 1 : 0);
2446 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2447 } else {
2448 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2449 }
2450 ResourceMark rm;
2451 uint state = (array_dim << 1) | (init_state & 1);
2452 if (can_write) {
2453 DataKind kind = DataKind::Klass;
2454 uint n = write_bytes(&kind, sizeof(int));
2455 if (n != sizeof(int)) {
2456 return false;
2457 }
2458 // Record state of instance klass initialization and array dimentions.
2459 n = write_bytes(&state, sizeof(int));
2460 if (n != sizeof(int)) {
2461 return false;
2462 }
2463 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2464 n = write_bytes(&klass_offset, sizeof(uint));
2465 if (n != sizeof(uint)) {
2466 return false;
2467 }
2468 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2469 compile_id(), comp_level(), klass->external_name(),
2470 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2471 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2472 return true;
2473 }
2474 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2475 compile_id(), comp_level(), klass->external_name(),
2476 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2477 (array_dim > 0 ? " (object array)" : ""));
2478 set_lookup_failed();
2479 return false;
2480 }
2481
2482 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2483 uint code_offset = read_position();
2484 uint state = *(uint*)addr(code_offset);
2485 uint init_state = (state & 1);
2486 uint array_dim = (state >> 1);
2487 code_offset += sizeof(int);
2488 uint klass_offset = *(uint*)addr(code_offset);
2489 code_offset += sizeof(uint);
2490 set_read_position(code_offset);
2491 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2492 if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2493 // Something changed in CDS
2494 set_lookup_failed();
2495 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2496 return nullptr;
2497 }
2498 assert(k->is_klass(), "sanity");
2499 ResourceMark rm;
2500 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2501 set_lookup_failed();
2502 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2503 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2504 return nullptr;
2505 } else
2506 // Allow not initialized klass which was uninitialized during code caching or for preload
2507 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2508 set_lookup_failed();
2509 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2510 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2511 return nullptr;
2512 }
2513 if (array_dim > 0) {
2514 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2515 Klass* ak = k->array_klass_or_null(array_dim);
2516 // FIXME: what would it take to create an array class on the fly?
2517 // Klass* ak = k->array_klass(dim, JavaThread::current());
2518 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2519 if (ak == nullptr) {
2520 set_lookup_failed();
2521 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2522 compile_id(), comp_level(), array_dim, k->external_name());
2523 }
2524 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2525 return ak;
2526 } else {
2527 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2528 compile_id(), comp_level(), k->external_name());
2529 return k;
2530 }
2531 }
2532
2533 bool AOTCodeCache::write_oop(jobject& jo) {
2534 oop obj = JNIHandles::resolve(jo);
2535 return write_oop(obj);
2536 }
2537
2538 bool AOTCodeCache::write_oop(oop obj) {
2539 DataKind kind;
2540 uint n = 0;
2541 if (obj == nullptr) {
2542 kind = DataKind::Null;
2543 n = write_bytes(&kind, sizeof(int));
2544 if (n != sizeof(int)) {
2545 return false;
2546 }
2547 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2548 kind = DataKind::No_Data;
2549 n = write_bytes(&kind, sizeof(int));
2550 if (n != sizeof(int)) {
2551 return false;
2552 }
2553 } else if (java_lang_Class::is_instance(obj)) {
2554 if (java_lang_Class::is_primitive(obj)) {
2555 int bt = (int)java_lang_Class::primitive_type(obj);
2556 kind = DataKind::Primitive;
2557 n = write_bytes(&kind, sizeof(int));
2558 if (n != sizeof(int)) {
2559 return false;
2560 }
2561 n = write_bytes(&bt, sizeof(int));
2562 if (n != sizeof(int)) {
2563 return false;
2564 }
2565 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2566 } else {
2567 Klass* klass = java_lang_Class::as_Klass(obj);
2568 if (!write_klass(klass)) {
2569 return false;
2570 }
2571 }
2572 } else if (java_lang_String::is_instance(obj)) { // herere
2573 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2574 ResourceMark rm;
2575 size_t length_sz = 0;
2576 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2577 if (k >= 0) {
2578 kind = DataKind::String;
2579 n = write_bytes(&kind, sizeof(int));
2580 if (n != sizeof(int)) {
2581 return false;
2582 }
2583 n = write_bytes(&k, sizeof(int));
2584 if (n != sizeof(int)) {
2585 return false;
2586 }
2587 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2588 return true;
2589 }
2590 // Not archived String object - bailout
2591 set_lookup_failed();
2592 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2593 compile_id(), comp_level(), p2i(obj), string);
2594 return false;
2595 } else if (java_lang_Module::is_instance(obj)) {
2596 fatal("Module object unimplemented");
2597 } else if (java_lang_ClassLoader::is_instance(obj)) {
2598 if (obj == SystemDictionary::java_system_loader()) {
2599 kind = DataKind::SysLoader;
2600 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2601 } else if (obj == SystemDictionary::java_platform_loader()) {
2602 kind = DataKind::PlaLoader;
2603 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2604 } else {
2605 ResourceMark rm;
2606 set_lookup_failed();
2607 log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2608 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2609 return false;
2610 }
2611 n = write_bytes(&kind, sizeof(int));
2612 if (n != sizeof(int)) {
2613 return false;
2614 }
2615 } else { // herere
2616 ResourceMark rm;
2617 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2618 if (k >= 0) {
2619 kind = DataKind::MH_Oop;
2620 n = write_bytes(&kind, sizeof(int));
2621 if (n != sizeof(int)) {
2622 return false;
2623 }
2624 n = write_bytes(&k, sizeof(int));
2625 if (n != sizeof(int)) {
2626 return false;
2627 }
2628 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2629 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2630 return true;
2631 }
2632 // Not archived Java object - bailout
2633 set_lookup_failed();
2634 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2635 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2636 return false;
2637 }
2638 return true;
2639 }
2640
2641 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2642 uint code_offset = read_position();
2643 oop obj = nullptr;
2644 DataKind kind = *(DataKind*)addr(code_offset);
2645 code_offset += sizeof(DataKind);
2646 set_read_position(code_offset);
2647 if (kind == DataKind::Null) {
2648 return nullptr;
2649 } else if (kind == DataKind::No_Data) {
2650 return cast_to_oop(Universe::non_oop_word());
2651 } else if (kind == DataKind::Klass) {
2652 Klass* k = read_klass(comp_method);
2653 if (k == nullptr) {
2654 return nullptr;
2655 }
2656 obj = k->java_mirror();
2657 if (obj == nullptr) {
2658 set_lookup_failed();
2659 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2660 return nullptr;
2661 }
2662 } else if (kind == DataKind::Primitive) {
2663 code_offset = read_position();
2664 int t = *(int*)addr(code_offset);
2665 code_offset += sizeof(int);
2666 set_read_position(code_offset);
2667 BasicType bt = (BasicType)t;
2668 obj = java_lang_Class::primitive_mirror(bt);
2669 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2670 } else if (kind == DataKind::String) {
2671 code_offset = read_position();
2672 int k = *(int*)addr(code_offset);
2673 code_offset += sizeof(int);
2674 set_read_position(code_offset);
2675 obj = AOTCacheAccess::get_archived_object(k);
2676 if (obj == nullptr) {
2677 set_lookup_failed();
2678 log_debug(aot, codecache, oops)("Lookup failed for String object");
2679 return nullptr;
2680 }
2681 assert(java_lang_String::is_instance(obj), "must be string");
2682
2683 ResourceMark rm;
2684 size_t length_sz = 0;
2685 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2686 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2687 } else if (kind == DataKind::SysLoader) {
2688 obj = SystemDictionary::java_system_loader();
2689 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2690 } else if (kind == DataKind::PlaLoader) {
2691 obj = SystemDictionary::java_platform_loader();
2692 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2693 } else if (kind == DataKind::MH_Oop) {
2694 code_offset = read_position();
2695 int k = *(int*)addr(code_offset);
2696 code_offset += sizeof(int);
2697 set_read_position(code_offset);
2698 obj = AOTCacheAccess::get_archived_object(k);
2699 if (obj == nullptr) {
2700 set_lookup_failed();
2701 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2702 return nullptr;
2703 }
2704 ResourceMark rm;
2705 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2706 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2707 } else {
2708 set_lookup_failed();
2709 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2710 compile_id(), comp_level(), (int)kind);
2711 return nullptr;
2712 }
2713 return obj;
2714 }
2715
2716 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2717 methodHandle comp_method(JavaThread::current(), target->get_Method());
2718 JavaThread* current = JavaThread::current();
2719 uint offset = read_position();
2720 int count = *(int *)addr(offset);
2721 offset += sizeof(int);
2722 set_read_position(offset);
2723 for (int i = 0; i < count; i++) {
2724 oop obj = read_oop(current, comp_method);
2725 if (lookup_failed()) {
2726 return false;
2727 }
2728 Handle h(thread, obj);
2729 oop_list.append(h);
2730 if (oop_recorder != nullptr) {
2731 jobject jo = JNIHandles::make_local(thread, obj);
2732 if (oop_recorder->is_real(jo)) {
2733 oop_recorder->find_index(jo);
2734 } else {
2735 oop_recorder->allocate_oop_index(jo);
2736 }
2737 }
2738 LogStreamHandle(Debug, aot, codecache, oops) log;
2739 if (log.is_enabled()) {
2740 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2741 if (obj == Universe::non_oop_word()) {
2742 log.print("non-oop word");
2743 } else if (obj == nullptr) {
2744 log.print("nullptr-oop");
2745 } else {
2746 obj->print_value_on(&log);
2747 }
2748 log.cr();
2749 }
2750 }
2751
2752 offset = read_position();
2753 count = *(int *)addr(offset);
2754 offset += sizeof(int);
2755 set_read_position(offset);
2756 for (int i = 0; i < count; i++) {
2757 Metadata* m = read_metadata(comp_method);
2758 if (lookup_failed()) {
2759 return false;
2760 }
2761 metadata_list.append(m);
2762 if (oop_recorder != nullptr) {
2763 if (oop_recorder->is_real(m)) {
2764 oop_recorder->find_index(m);
2765 } else {
2766 oop_recorder->allocate_metadata_index(m);
2767 }
2768 }
2769 LogTarget(Debug, aot, codecache, metadata) log;
2770 if (log.is_enabled()) {
2771 LogStream ls(log);
2772 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2773 if (m == (Metadata*)Universe::non_oop_word()) {
2774 ls.print("non-metadata word");
2775 } else if (m == nullptr) {
2776 ls.print("nullptr-oop");
2777 } else {
2778 Metadata::print_value_on_maybe_null(&ls, m);
2779 }
2780 ls.cr();
2781 }
2782 }
2783 return true;
2784 }
2785
2786 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2787 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2788 int oopmaps_size = oopmaps->nr_of_bytes();
2789 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2790 return false;
2791 }
2792 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2793 if (n != (uint)oopmaps->nr_of_bytes()) {
2794 return false;
2795 }
2796 return true;
2797 }
2798
2799 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2800 uint offset = read_position();
2801 int size = *(int *)addr(offset);
2802 offset += sizeof(int);
2803 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2804 offset += size;
2805 set_read_position(offset);
2806 return oopmaps;
2807 }
2808
2809 bool AOTCodeCache::write_oops(nmethod* nm) {
2810 int count = nm->oops_count()-1;
2811 if (!write_bytes(&count, sizeof(int))) {
2812 return false;
2813 }
2814 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2815 if (!write_oop(*p)) {
2816 return false;
2817 }
2818 }
2819 return true;
2820 }
2821
2822 #ifndef PRODUCT
2823 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2824 // Write asm remarks
2825 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2826 if (count_ptr == nullptr) {
2827 return false;
2828 }
2829 uint count = 0;
2830 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2831 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2832 uint n = write_bytes(&offset, sizeof(uint));
2833 if (n != sizeof(uint)) {
2834 return false;
2835 }
2836 if (use_string_table) {
2837 const char* cstr = add_C_string(str);
2838 int id = _table->id_for_C_string((address)cstr);
2839 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2840 n = write_bytes(&id, sizeof(int));
2841 if (n != sizeof(int)) {
2842 return false;
2843 }
2844 } else {
2845 n = write_bytes(str, (uint)strlen(str) + 1);
2846 if (n != strlen(str) + 1) {
2847 return false;
2848 }
2849 }
2850 count += 1;
2851 return true;
2852 });
2853 *count_ptr = count;
2854 return result;
2855 }
2856
2857 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2858 // Read asm remarks
2859 uint offset = read_position();
2860 uint count = *(uint *)addr(offset);
2861 offset += sizeof(uint);
2862 for (uint i = 0; i < count; i++) {
2863 uint remark_offset = *(uint *)addr(offset);
2864 offset += sizeof(uint);
2865 const char* remark = nullptr;
2866 if (use_string_table) {
2867 int remark_string_id = *(uint *)addr(offset);
2868 offset += sizeof(int);
2869 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2870 } else {
2871 remark = (const char*)addr(offset);
2872 offset += (uint)strlen(remark)+1;
2873 }
2874 asm_remarks.insert(remark_offset, remark);
2875 }
2876 set_read_position(offset);
2877 }
2878
2879 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2880 // Write dbg strings
2881 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2882 if (count_ptr == nullptr) {
2883 return false;
2884 }
2885 uint count = 0;
2886 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2887 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2888 if (use_string_table) {
2889 const char* cstr = add_C_string(str);
2890 int id = _table->id_for_C_string((address)cstr);
2891 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2892 uint n = write_bytes(&id, sizeof(int));
2893 if (n != sizeof(int)) {
2894 return false;
2895 }
2896 } else {
2897 uint n = write_bytes(str, (uint)strlen(str) + 1);
2898 if (n != strlen(str) + 1) {
2899 return false;
2900 }
2901 }
2902 count += 1;
2903 return true;
2904 });
2905 *count_ptr = count;
2906 return result;
2907 }
2908
2909 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2910 // Read dbg strings
2911 uint offset = read_position();
2912 uint count = *(uint *)addr(offset);
2913 offset += sizeof(uint);
2914 for (uint i = 0; i < count; i++) {
2915 const char* str = nullptr;
2916 if (use_string_table) {
2917 int string_id = *(uint *)addr(offset);
2918 offset += sizeof(int);
2919 str = (const char*)_cache->address_for_C_string(string_id);
2920 } else {
2921 str = (const char*)addr(offset);
2922 offset += (uint)strlen(str)+1;
2923 }
2924 dbg_strings.insert(str);
2925 }
2926 set_read_position(offset);
2927 }
2928 #endif // PRODUCT
2929
2930 //======================= AOTCodeAddressTable ===============
2931
2932 // address table ids for generated routines, external addresses and C
2933 // string addresses are partitioned into positive integer ranges
2934 // defined by the following positive base and max values
2935 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2936 // [_stubs_base, _stubs_base + _stubs_max -1],
2937 // ...
2938 // [_c_str_base, _c_str_base + _c_str_max -1],
2939 #define _extrs_max 140
2940 #define _stubs_max 210
2941 #define _shared_blobs_max 25
2942 #define _C1_blobs_max 50
2943 #define _C2_blobs_max 25
2944 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2945 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2946
2947 #define _extrs_base 0
2948 #define _stubs_base (_extrs_base + _extrs_max)
2949 #define _shared_blobs_base (_stubs_base + _stubs_max)
2950 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2951 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2952 #define _blobs_end (_shared_blobs_base + _blobs_max)
2953 #if (_C2_blobs_base >= _all_max)
2954 #error AOTCodeAddressTable ranges need adjusting
2955 #endif
2956
2957 #define SET_ADDRESS(type, addr) \
2958 { \
2959 type##_addr[type##_length++] = (address) (addr); \
2960 assert(type##_length <= type##_max, "increase size"); \
2961 }
2962
2963 static bool initializing_extrs = false;
2964
2965 void AOTCodeAddressTable::init_extrs() {
2966 if (_extrs_complete || initializing_extrs) return; // Done already
2967
2968 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
2969
2970 initializing_extrs = true;
2971 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
2972
2973 _extrs_length = 0;
2974
2975 // Record addresses of VM runtime methods
2976 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
2977 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
2978 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
2979 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
2980 {
2981 // Required by Shared blobs
2982 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
2983 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
2984 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
2985 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
2986 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
2987 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
2988 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
2989 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
2990 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
2991 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
2992 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
2993 SET_ADDRESS(_extrs, CompressedOops::base_addr());
2994 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
2995 }
2996 {
2997 // Required by initial stubs
2998 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
2999 #if defined(AMD64)
3000 SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
3001 #endif
3002 }
3003
3004 #ifdef COMPILER1
3005 {
3006 // Required by C1 blobs
3007 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3008 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3009 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3010 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3011 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3012 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3013 SET_ADDRESS(_extrs, Runtime1::new_instance);
3014 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3015 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3016 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3017 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3018 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3019 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3020 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3021 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3022 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3023 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3024 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3025 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3026 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3027 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3028 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3029 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3030 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3031 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3032 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3033 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3034 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3035 #ifdef X86
3036 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3037 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3038 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3039 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3040 #endif
3041 #ifndef PRODUCT
3042 SET_ADDRESS(_extrs, os::breakpoint);
3043 #endif
3044 }
3045 #endif // COMPILER1
3046
3047 #ifdef COMPILER2
3048 {
3049 // Required by C2 blobs
3050 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3051 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3052 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3053 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3054 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3055 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3056 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3057 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3058 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3059 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3060 #if INCLUDE_JVMTI
3061 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
3062 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
3063 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
3064 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
3065 #endif
3066 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3067 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3068 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3069 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3070 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3071 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3072 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3073 #if defined(AMD64)
3074 // Use by C2 intinsic
3075 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3076 #endif
3077 }
3078 #endif // COMPILER2
3079
3080 // Record addresses of VM runtime methods and data structs
3081 BarrierSet* bs = BarrierSet::barrier_set();
3082 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3083 SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3084 }
3085
3086 #if INCLUDE_G1GC
3087 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3088 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3089 #endif
3090
3091 #if INCLUDE_SHENANDOAHGC
3092 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3093 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3094 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3095 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3096 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3097 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3098 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3099 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3100 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3101 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3102 #endif
3103
3104 #if INCLUDE_ZGC
3105 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3106 #if defined(AMD64)
3107 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3108 #endif
3109 #endif // INCLUDE_ZGC
3110
3111 SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3112 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3113 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3114 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3115
3116 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3117 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3118 #if defined(AMD64) && !defined(ZERO)
3119 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3120 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3121 #endif // AMD64
3122 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3123 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3124 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3125 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3126 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3127 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3128 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3129 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3130 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3131 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3132 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3133 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3134 #ifndef ZERO
3135 SET_ADDRESS(_extrs, SharedRuntime::drem);
3136 SET_ADDRESS(_extrs, SharedRuntime::frem);
3137 #endif
3138 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3139 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3140 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3141 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3142 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3143
3144 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3145 SET_ADDRESS(_extrs, Thread::current);
3146
3147 SET_ADDRESS(_extrs, os::javaTimeMillis);
3148 SET_ADDRESS(_extrs, os::javaTimeNanos);
3149 // For JFR
3150 SET_ADDRESS(_extrs, os::elapsed_counter);
3151 #if defined(X86) && !defined(ZERO)
3152 SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3153 #endif
3154
3155 #if INCLUDE_JVMTI
3156 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3157 SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3158 #endif /* INCLUDE_JVMTI */
3159
3160 #ifndef PRODUCT
3161 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3162 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3163 #endif
3164
3165 #ifndef ZERO
3166 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3167 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3168 #endif
3169 #if defined(AARCH64)
3170 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3171 #endif
3172 #endif // ZERO
3173
3174 // addresses of fields in AOT runtime constants area
3175 address* p = AOTRuntimeConstants::field_addresses_list();
3176 while (*p != nullptr) {
3177 SET_ADDRESS(_extrs, *p++);
3178 }
3179
3180 _extrs_complete = true;
3181 log_info(aot, codecache, init)("External addresses recorded");
3182 }
3183
3184 static bool initializing_early_stubs = false;
3185
3186 void AOTCodeAddressTable::init_early_stubs() {
3187 if (_complete || initializing_early_stubs) return; // Done already
3188 initializing_early_stubs = true;
3189 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3190 _stubs_length = 0;
3191 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3192
3193 {
3194 // Required by C1 blobs
3195 #if defined(AMD64) && !defined(ZERO)
3196 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3197 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3198 #endif // AMD64
3199 }
3200
3201 _early_stubs_complete = true;
3202 log_info(aot, codecache, init)("Early stubs recorded");
3203 }
3204
3205 static bool initializing_shared_blobs = false;
3206
3207 void AOTCodeAddressTable::init_shared_blobs() {
3208 if (_complete || initializing_shared_blobs) return; // Done already
3209 initializing_shared_blobs = true;
3210 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3211
3212 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3213 _shared_blobs_addr = blobs_addr;
3214 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3215 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3216
3217 _shared_blobs_length = 0;
3218 _C1_blobs_length = 0;
3219 _C2_blobs_length = 0;
3220
3221 // clear the address table
3222 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3223
3224 // Record addresses of generated code blobs
3225 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3226 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3227 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3228 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3229 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3230 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3231 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3232 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3233 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3234 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3235 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3236 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3237 #ifdef COMPILER2
3238 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3239 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3240 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3241 }
3242 #endif
3243 #if INCLUDE_JVMCI
3244 if (EnableJVMCI) {
3245 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3246 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3247 }
3248 #endif
3249 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3250 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3251 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3252 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3253 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3254
3255 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3256 _shared_blobs_complete = true;
3257 log_info(aot, codecache, init)("All shared blobs recorded");
3258 }
3259
3260 static bool initializing_stubs = false;
3261 void AOTCodeAddressTable::init_stubs() {
3262 if (_complete || initializing_stubs) return; // Done already
3263 assert(_early_stubs_complete, "early stubs whould be initialized");
3264 initializing_stubs = true;
3265
3266 // Stubs
3267 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3268 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3269 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3270 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3271 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3272 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3273
3274 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3275 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3276 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3277
3278 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3279
3280 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3281 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3282 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3283 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3284 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3285 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3286
3287 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3288 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3289 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3290 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3291 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3292 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3293
3294 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3295 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3296 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3297 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3298 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3299 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3300
3301 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3302 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3303 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3304 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3305 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3306 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3307
3308 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3309 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3310
3311 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3312 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3313
3314 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3315 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3316 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3317 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3318 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3319 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3320
3321 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3322 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3323
3324 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3325 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3326 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3327 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3328 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3329 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3330 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3331 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3332 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3333 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3334 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3335 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3336 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3337 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3338 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3339 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3340 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3341 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3342 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3343 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3344 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3345 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3346 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3347 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3348 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3349 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3350 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3351 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3352 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3353 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3354
3355 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3356 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3357 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3358
3359 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3360 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3361 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3362 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3363 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3364 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3365 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3366 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3367
3368 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3369
3370 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3371
3372 SET_ADDRESS(_stubs, StubRoutines::dexp());
3373 SET_ADDRESS(_stubs, StubRoutines::dlog());
3374 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3375 SET_ADDRESS(_stubs, StubRoutines::dpow());
3376 SET_ADDRESS(_stubs, StubRoutines::dsin());
3377 SET_ADDRESS(_stubs, StubRoutines::dcos());
3378 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3379 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3380 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3381 SET_ADDRESS(_stubs, StubRoutines::dtan());
3382
3383 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3384 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3385
3386 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3387 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3388 }
3389 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3390
3391 #if defined(AMD64) && !defined(ZERO)
3392 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3393 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3394 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3395 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3396 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3397 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3398 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3399 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3400 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3401 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3402 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3403 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3404 SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3405 SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3406 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3407 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3408 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3409 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3410 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3411 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3412 // See C2_MacroAssembler::load_iota_indices().
3413 for (int i = 0; i < 6; i++) {
3414 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3415 }
3416 #endif
3417 #if defined(AARCH64) && !defined(ZERO)
3418 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3419 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3420 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3421 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3422 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3423 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3424 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3425 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3426 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3427 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3428 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3429 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3430 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3431
3432 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3433 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3434 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3435 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3436 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3437 #endif
3438
3439 _complete = true;
3440 log_info(aot, codecache, init)("Stubs recorded");
3441 }
3442
3443 void AOTCodeAddressTable::init_early_c1() {
3444 #ifdef COMPILER1
3445 // Runtime1 Blobs
3446 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
3447 C1StubId id = (C1StubId)i;
3448 if (Runtime1::blob_for(id) == nullptr) {
3449 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3450 continue;
3451 }
3452 if (Runtime1::entry_for(id) == nullptr) {
3453 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3454 continue;
3455 }
3456 address entry = Runtime1::entry_for(id);
3457 SET_ADDRESS(_C1_blobs, entry);
3458 }
3459 #endif // COMPILER1
3460 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3461 _early_c1_complete = true;
3462 }
3463
3464 void AOTCodeAddressTable::init_c1() {
3465 #ifdef COMPILER1
3466 // Runtime1 Blobs
3467 assert(_early_c1_complete, "early C1 blobs should be initialized");
3468 for (int i = (int)C1StubId::forward_exception_id + 1; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3469 C1StubId id = (C1StubId)i;
3470 if (Runtime1::blob_for(id) == nullptr) {
3471 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3472 continue;
3473 }
3474 if (Runtime1::entry_for(id) == nullptr) {
3475 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3476 continue;
3477 }
3478 address entry = Runtime1::entry_for(id);
3479 SET_ADDRESS(_C1_blobs, entry);
3480 }
3481 #if INCLUDE_G1GC
3482 if (UseG1GC) {
3483 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3484 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3485 SET_ADDRESS(_C1_blobs, entry);
3486 entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3487 SET_ADDRESS(_C1_blobs, entry);
3488 }
3489 #endif // INCLUDE_G1GC
3490 #if INCLUDE_ZGC
3491 if (UseZGC) {
3492 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3493 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3494 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3495 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3496 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3497 }
3498 #endif // INCLUDE_ZGC
3499 #if INCLUDE_SHENANDOAHGC
3500 if (UseShenandoahGC) {
3501 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3502 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3503 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3504 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3505 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3506 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3507 }
3508 #endif // INCLUDE_SHENANDOAHGC
3509 #endif // COMPILER1
3510
3511 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3512 _c1_complete = true;
3513 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3514 }
3515
3516 void AOTCodeAddressTable::init_c2() {
3517 #ifdef COMPILER2
3518 // OptoRuntime Blobs
3519 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3520 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3521 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3522 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3523 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3524 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3525 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3526 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3527 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3528 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3529 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3530 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3531 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3532 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3533 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3534 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3535 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3536 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3537 #if INCLUDE_JVMTI
3538 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3539 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3540 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3541 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3542 #endif /* INCLUDE_JVMTI */
3543 #endif
3544
3545 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3546 _c2_complete = true;
3547 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3548 }
3549 #undef SET_ADDRESS
3550
3551 AOTCodeAddressTable::~AOTCodeAddressTable() {
3552 if (_extrs_addr != nullptr) {
3553 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3554 }
3555 if (_stubs_addr != nullptr) {
3556 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3557 }
3558 if (_shared_blobs_addr != nullptr) {
3559 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3560 }
3561 }
3562
3563 #ifdef PRODUCT
3564 #define MAX_STR_COUNT 200
3565 #else
3566 #define MAX_STR_COUNT 500
3567 #endif
3568 #define _c_str_max MAX_STR_COUNT
3569 static const int _c_str_base = _all_max;
3570
3571 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3572 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3573 static int _C_strings_count = 0;
3574 static int _C_strings_s[MAX_STR_COUNT] = {0};
3575 static int _C_strings_id[MAX_STR_COUNT] = {0};
3576 static int _C_strings_used = 0;
3577
3589 // still be executed on VM exit after _cache is freed.
3590 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3591 memcpy(p, addr(strings_offset), strings_size);
3592 _C_strings_buf = p;
3593 assert(strings_count <= MAX_STR_COUNT, "sanity");
3594 for (uint i = 0; i < strings_count; i++) {
3595 _C_strings[i] = p;
3596 uint len = string_lengths[i];
3597 _C_strings_s[i] = i;
3598 _C_strings_id[i] = i;
3599 p += len;
3600 }
3601 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3602 _C_strings_count = strings_count;
3603 _C_strings_used = strings_count;
3604 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3605 }
3606
3607 int AOTCodeCache::store_strings() {
3608 if (_C_strings_used > 0) {
3609 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3610 uint offset = _write_position;
3611 uint length = 0;
3612 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
3613 if (lengths == nullptr) {
3614 return -1;
3615 }
3616 for (int i = 0; i < _C_strings_used; i++) {
3617 const char* str = _C_strings[_C_strings_s[i]];
3618 uint len = (uint)strlen(str) + 1;
3619 length += len;
3620 assert(len < 1000, "big string: %s", str);
3621 lengths[i] = len;
3622 uint n = write_bytes(str, len);
3623 if (n != len) {
3624 return -1;
3625 }
3626 }
3627 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
3628 _C_strings_used, length, offset);
3629 }
3630 return _C_strings_used;
3631 }
3632
3633 const char* AOTCodeCache::add_C_string(const char* str) {
3634 if (is_on_for_dump() && str != nullptr) {
3635 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3636 AOTCodeAddressTable* table = addr_table();
3637 if (table != nullptr) {
3638 return table->add_C_string(str);
3639 }
3640 }
3641 return str;
3642 }
3643
3644 const char* AOTCodeAddressTable::add_C_string(const char* str) {
3645 if (_extrs_complete) {
3646 // Check previous strings address
3647 for (int i = 0; i < _C_strings_count; i++) {
3648 if (_C_strings_in[i] == str) {
3649 return _C_strings[i]; // Found previous one - return our duplicate
3650 } else if (strcmp(_C_strings[i], str) == 0) {
3651 return _C_strings[i];
3652 }
3653 }
3654 // Add new one
3655 if (_C_strings_count < MAX_STR_COUNT) {
3656 // Passed in string can be freed and used space become inaccessible.
3657 // Keep original address but duplicate string for future compare.
3658 _C_strings_id[_C_strings_count] = -1; // Init
3659 _C_strings_in[_C_strings_count] = str;
3660 const char* dup = os::strdup(str);
3661 _C_strings[_C_strings_count++] = dup;
3662 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
3663 return dup;
3664 } else {
3665 assert(false, "Number of C strings >= MAX_STR_COUNT");
3666 }
3667 }
3668 return str;
3669 }
3670
3671 int AOTCodeAddressTable::id_for_C_string(address str) {
3672 if (str == nullptr) {
3673 return -1;
3674 }
3675 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3676 for (int i = 0; i < _C_strings_count; i++) {
3677 if (_C_strings[i] == (const char*)str) { // found
3678 int id = _C_strings_id[i];
3679 if (id >= 0) {
3680 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
3681 return id; // Found recorded
3682 }
3683 // Not found in recorded, add new
3684 id = _C_strings_used++;
3685 _C_strings_s[id] = i;
3686 _C_strings_id[i] = id;
3687 return id;
3688 }
3689 }
3690 return -1;
3691 }
3692
3693 address AOTCodeAddressTable::address_for_C_string(int idx) {
3694 assert(idx < _C_strings_count, "sanity");
3695 return (address)_C_strings[idx];
3696 }
3697
3698 static int search_address(address addr, address* table, uint length) {
3699 for (int i = 0; i < (int)length; i++) {
3700 if (table[i] == addr) {
3701 return i;
3702 }
3703 }
3704 return BAD_ADDRESS_ID;
3705 }
3706
3707 address AOTCodeAddressTable::address_for_id(int idx) {
3708 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3709 if (idx == -1) {
3710 return (address)-1;
3711 }
3712 uint id = (uint)idx;
3713 // special case for symbols based relative to os::init
3714 if (id > (_c_str_base + _c_str_max)) {
3715 return (address)os::init + idx;
3716 }
3717 if (idx < 0) {
3718 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3719 return nullptr;
3720 }
3721 // no need to compare unsigned id against 0
3722 if (/* id >= _extrs_base && */ id < _extrs_length) {
3723 return _extrs_addr[id - _extrs_base];
3724 }
3725 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3726 return _stubs_addr[id - _stubs_base];
3727 }
3728 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3729 return _stubs_addr[id - _stubs_base];
3730 }
3731 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3732 return _shared_blobs_addr[id - _shared_blobs_base];
3733 }
3734 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3735 return _C1_blobs_addr[id - _C1_blobs_base];
3736 }
3737 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3738 return _C1_blobs_addr[id - _C1_blobs_base];
3739 }
3740 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3741 return _C2_blobs_addr[id - _C2_blobs_base];
3742 }
3743 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3744 return address_for_C_string(id - _c_str_base);
3745 }
3746 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3747 return nullptr;
3748 }
3749
3750 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3751 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3752 int id = -1;
3753 if (addr == (address)-1) { // Static call stub has jump to itself
3754 return id;
3755 }
3756 // Check card_table_base address first since it can point to any address
3757 BarrierSet* bs = BarrierSet::barrier_set();
3758 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3759 if (addr == ci_card_table_address_as<address>()) {
3760 id = search_address(addr, _extrs_addr, _extrs_length);
3761 assert(id > 0 && _extrs_addr[id - _extrs_base] == addr, "sanity");
3762 return id;
3763 }
3764 }
3765
3766 // Seach for C string
3767 id = id_for_C_string(addr);
3768 if (id >= 0) {
3769 return id + _c_str_base;
3770 }
3771 if (StubRoutines::contains(addr)) {
3772 // Search in stubs
3773 id = search_address(addr, _stubs_addr, _stubs_length);
3774 if (id == BAD_ADDRESS_ID) {
3775 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3776 if (desc == nullptr) {
3777 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3778 }
3779 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3780 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3781 } else {
3782 return _stubs_base + id;
3783 }
3784 } else {
3785 CodeBlob* cb = CodeCache::find_blob(addr);
3786 if (cb != nullptr) {
3787 int id_base = _shared_blobs_base;
3788 // Search in code blobs
3789 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3790 if (id == BAD_ADDRESS_ID) {
3791 id_base = _C1_blobs_base;
3792 // search C1 blobs
3793 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3794 }
3795 if (id == BAD_ADDRESS_ID) {
3796 id_base = _C2_blobs_base;
3797 // search C2 blobs
3798 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3799 }
3800 if (id == BAD_ADDRESS_ID) {
3801 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3802 } else {
3803 return id_base + id;
3804 }
3805 } else {
3806 // Search in runtime functions
3807 id = search_address(addr, _extrs_addr, _extrs_length);
3808 if (id == BAD_ADDRESS_ID) {
3809 ResourceMark rm;
3810 const int buflen = 1024;
3811 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3812 int offset = 0;
3813 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3814 if (offset > 0) {
3815 // Could be address of C string
3816 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3817 CompileTask* task = ciEnv::current()->task();
3818 uint compile_id = 0;
3819 uint comp_level =0;
3820 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3821 compile_id = task->compile_id();
3822 comp_level = task->comp_level();
3823 }
3824 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3825 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3826 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3827 return dist;
3828 }
3829 reloc.print_current_on(tty);
3830 blob->print_on(tty);
3831 blob->print_code_on(tty);
3832 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3833 } else {
3834 reloc.print_current_on(tty);
3835 blob->print_on(tty);
3836 blob->print_code_on(tty);
3837 os::find(addr, tty);
3838 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3839 }
3840 } else {
3841 return _extrs_base + id;
3842 }
3843 }
3844 }
3845 return id;
3846 }
3847
3848 #undef _extrs_max
3849 #undef _stubs_max
3850 #undef _shared_blobs_max
3851 #undef _C1_blobs_max
3852 #undef _C2_blobs_max
3853 #undef _blobs_max
3854 #undef _extrs_base
3855 #undef _stubs_base
3856 #undef _shared_blobs_base
3857 #undef _C1_blobs_base
3858 #undef _C2_blobs_base
3859 #undef _blobs_end
3860
3861 void AOTRuntimeConstants::initialize_from_runtime() {
3862 BarrierSet* bs = BarrierSet::barrier_set();
3863 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3864 CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
3865 _aot_runtime_constants._grain_shift = ctbs->grain_shift();
3866 _aot_runtime_constants._card_shift = ctbs->card_shift();
3867 }
3868 }
3869
3870 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3871
3872 address AOTRuntimeConstants::_field_addresses_list[] = {
3873 grain_shift_address(),
3874 card_shift_address(),
3875 nullptr
3876 };
3877
3878
3879 void AOTCodeCache::wait_for_no_nmethod_readers() {
3880 while (true) {
3881 int cur = Atomic::load(&_nmethod_readers);
3882 int upd = -(cur + 1);
3883 if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3884 // Success, no new readers should appear.
3885 break;
3886 }
3887 }
3888
3889 // Now wait for all readers to leave.
3890 SpinYield w;
3891 while (Atomic::load(&_nmethod_readers) != -1) {
3892 w.wait();
3893 }
3894 }
3895
3896 AOTCodeCache::ReadingMark::ReadingMark() {
3897 while (true) {
3898 int cur = Atomic::load(&_nmethod_readers);
3899 if (cur < 0) {
3900 // Cache is already closed, cannot proceed.
3901 _failed = true;
3902 return;
3903 }
3904 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3905 // Successfully recorded ourselves as entered.
3906 _failed = false;
3907 return;
3908 }
3909 }
3910 }
3911
3912 AOTCodeCache::ReadingMark::~ReadingMark() {
3913 if (_failed) {
3914 return;
3915 }
3916 while (true) {
3917 int cur = Atomic::load(&_nmethod_readers);
3918 if (cur > 0) {
3919 // Cache is open, we are counting down towards 0.
3920 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3921 return;
3922 }
3923 } else {
3924 // Cache is closed, we are counting up towards -1.
3925 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3926 return;
3927 }
3928 }
3929 }
3930 }
3931
3932 void AOTCodeCache::print_timers_on(outputStream* st) {
3933 if (is_using_code()) {
3934 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3935 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
3936 st->print_cr (" find AOT code entry: %7.3f s", _t_totalFind.seconds());
3937 }
3938 if (is_dumping_code()) {
3939 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
3940 }
3941 }
3942
3943 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
3944 AOTCodeStats result;
3945 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3946 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
3947 }
3948
3949 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3950 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
3951 }
3952 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
3953 return result;
3954 }
3955
3956 void AOTCodeCache::log_stats_on_exit() {
3957 LogStreamHandle(Debug, aot, codecache, exit) log;
3958 if (log.is_enabled()) {
3959 AOTCodeStats prev_stats;
3960 AOTCodeStats current_stats;
3961 AOTCodeStats total_stats;
3962 uint max_size = 0;
3963
3964 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
3965
3966 for (uint i = 0; i < load_count; i++) {
3967 prev_stats.collect_entry_stats(&_load_entries[i]);
3968 if (max_size < _load_entries[i].size()) {
3969 max_size = _load_entries[i].size();
3970 }
3971 }
3972 for (uint i = 0; i < _store_entries_cnt; i++) {
3973 current_stats.collect_entry_stats(&_store_entries[i]);
3974 if (max_size < _store_entries[i].size()) {
3975 max_size = _store_entries[i].size();
3976 }
3977 }
3978 total_stats = AOTCodeStats::add_aot_code_stats(prev_stats, current_stats);
3979
3980 log.print_cr("Wrote %d AOTCodeEntry entries(%u max size) to AOT Code Cache",
3981 total_stats.total_count(), max_size);
3982 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3983 if (total_stats.entry_count(kind) > 0) {
3984 log.print_cr(" %s: total=%u(old=%u+new=%u)",
3985 aot_code_entry_kind_name[kind], total_stats.entry_count(kind), prev_stats.entry_count(kind), current_stats.entry_count(kind));
3986 if (kind == AOTCodeEntry::Code) {
3987 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3988 if (total_stats.nmethod_count(lvl) > 0) {
3989 log.print_cr(" Tier %d: total=%u(old=%u+new=%u)",
3990 lvl, total_stats.nmethod_count(lvl), prev_stats.nmethod_count(lvl), current_stats.nmethod_count(lvl));
3991 }
3992 }
3993 }
3994 }
3995 }
3996 log.print_cr("Total=%u(old=%u+new=%u)", total_stats.total_count(), prev_stats.total_count(), current_stats.total_count());
3997 }
3998 }
3999
4000 static void print_helper1(outputStream* st, const char* name, int count) {
4001 if (count > 0) {
4002 st->print(" %s=%d", name, count);
4003 }
4004 }
4005
4006 void AOTCodeCache::print_statistics_on(outputStream* st) {
4007 AOTCodeCache* cache = open_for_use();
4008 if (cache != nullptr) {
4009 ReadingMark rdmk;
4010 if (rdmk.failed()) {
4011 // Cache is closed, cannot touch anything.
4012 return;
4013 }
4014
4015 uint count = cache->_load_header->entries_count();
4016 AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
4017
4018 AOTCodeStats stats;
4019 for (uint i = 0; i < count; i++) {
4020 stats.collect_all_stats(&load_entries[i]);
4021 }
4022
4023 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4024 if (stats.entry_count(kind) > 0) {
4025 st->print(" %s:", aot_code_entry_kind_name[kind]);
4026 print_helper1(st, "total", stats.entry_count(kind));
4027 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4028 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4029 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4030 st->cr();
4031 }
4032 if (kind == AOTCodeEntry::Code) {
4033 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4034 if (stats.nmethod_count(lvl) > 0) {
4035 st->print(" AOT Code T%d", lvl);
4036 print_helper1(st, "total", stats.nmethod_count(lvl));
4037 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4038 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4039 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4040 if (lvl == AOTCompLevel_count-1) {
4041 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4042 }
4043 st->cr();
4044 }
4045 }
4046 }
4047 }
4048 LogStreamHandle(Debug, aot, codecache, init) log;
4049 if (log.is_enabled()) {
4050 AOTCodeCache::print_unused_entries_on(&log);
4051 }
4052 LogStreamHandle(Trace, aot, codecache) aot_info;
4053 // need a lock to traverse the code cache
4054 if (aot_info.is_enabled()) {
4055 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4056 NMethodIterator iter(NMethodIterator::all);
4057 while (iter.next()) {
4058 nmethod* nm = iter.method();
4059 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4060 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4061 (nm->method()->is_shared() ? 'S' : ' '),
4062 (nm->is_aot() ? 'A' : ' '),
4063 (nm->preloaded() ? 'P' : ' '),
4064 nm->comp_level());
4065 print_helper(nm, &aot_info);
4066 aot_info.print(": ");
4067 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4068 LogStreamHandle(Trace, aot, codecache) aot_debug;
4069 if (aot_debug.is_enabled()) {
4070 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4071 if (mtd != nullptr) {
4072 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4073 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4074 });
4075 }
4076 }
4077 }
4078 }
4079 }
4080 } else {
4081 st->print_cr("failed to map code cache");
4082 }
4083 }
4084
4085 void AOTCodeEntry::print(outputStream* st) const {
4086 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4087 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4088 (_not_entrant? "not_entrant" : "entrant"),
4089 (_loaded ? ", loaded" : ""),
4090 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4091 (_for_preload ? ", for_preload" : ""));
4092 }
4093
4094 void AOTCodeCache::print_on(outputStream* st) {
4095 if (opened_cache != nullptr && opened_cache->for_use()) {
4096 ReadingMark rdmk;
4097 if (rdmk.failed()) {
4098 // Cache is closed, cannot touch anything.
4099 return;
4100 }
4101
4102 st->print_cr("\nAOT Code Cache");
4103 uint count = opened_cache->_load_header->entries_count();
4104 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4105 AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4106
4107 for (uint i = 0; i < count; i++) {
4108 int index = search_entries[2*i + 1];
4109 AOTCodeEntry* entry = &(load_entries[index]);
4110
4111 uint entry_position = entry->offset();
4112 uint name_offset = entry->name_offset() + entry_position;
4113 const char* saved_name = opened_cache->addr(name_offset);
4114
4115 st->print_cr("%4u: %10s idx:%4u Id:%u L%u size=%u '%s' %s%s%s%s",
4116 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4117 entry->size(), saved_name,
4118 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4119 entry->for_preload() ? " for_preload" : "",
4120 entry->is_loaded() ? " loaded" : "",
4121 entry->not_entrant() ? " not_entrant" : "");
4122
4123 st->print_raw(" ");
4124 AOTCodeReader reader(opened_cache, entry, nullptr);
4125 reader.print_on(st);
4126 }
4127 }
4128 }
4129
4130 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4131 LogStreamHandle(Info, aot, codecache, init) info;
4132 if (info.is_enabled()) {
4133 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4134 if (entry->is_code() && !entry->is_loaded()) {
4135 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4136 if (mtd != nullptr) {
4137 if (mtd->has_holder()) {
4138 if (mtd->holder()->method_holder()->is_initialized()) {
4139 ResourceMark rm;
4140 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4141 if ((uint)ctd->level() == entry->comp_level()) {
4142 if (ctd->init_deps_left() == 0) {
4143 nmethod* nm = mtd->holder()->code();
4144 if (nm == nullptr) {
4145 if (mtd->holder()->queued_for_compilation()) {
4146 return; // scheduled for compilation
4147 }
4148 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4149 return; // already online compiled and superseded by a more optimal method
4150 }
4151 info.print("AOT Code Cache entry not loaded: ");
4152 ctd->print_on(&info);
4153 info.cr();
4154 }
4155 }
4156 });
4157 } else {
4158 // not yet initialized
4159 }
4160 } else {
4161 info.print("AOT Code Cache entry doesn't have a holder: ");
4162 mtd->print_on(&info);
4163 info.cr();
4164 }
4165 }
4166 }
4167 });
4168 }
4169 }
4170
4171 void AOTCodeReader::print_on(outputStream* st) {
4172 uint entry_position = _entry->offset();
4173 set_read_position(entry_position);
4174
4175 // Read name
4176 uint name_offset = entry_position + _entry->name_offset();
4177 uint name_size = _entry->name_size(); // Includes '/0'
4178 const char* name = addr(name_offset);
4179
4180 st->print_cr(" name: %s", name);
4181 }
4182
|