12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciUtilities.hpp"
33 #include "classfile/javaAssertions.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/gcConfig.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/memoryReserver.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/flags/flagSetting.hpp"
42 #include "runtime/globals_extension.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/os.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubInfo.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "utilities/copy.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56 #if INCLUDE_G1GC
57 #include "gc/g1/g1BarrierSetRuntime.hpp"
58 #include "gc/g1/g1HeapRegion.hpp"
59 #endif
60 #if INCLUDE_SHENANDOAHGC
61 #include "gc/shenandoah/shenandoahRuntime.hpp"
62 #endif
63 #if INCLUDE_ZGC
64 #include "gc/z/zBarrierSetRuntime.hpp"
65 #endif
66
67 #include <errno.h>
68 #include <sys/stat.h>
69
70 const char* aot_code_entry_kind_name[] = {
71 #define DECL_KIND_STRING(kind) XSTR(kind),
72 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
73 #undef DECL_KIND_STRING
74 };
75
76 static void report_load_failure() {
77 if (AbortVMOnAOTCodeFailure) {
78 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
79 }
80 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
81 AOTCodeCache::disable_caching();
82 }
83
84 static void report_store_failure() {
85 if (AbortVMOnAOTCodeFailure) {
86 tty->print_cr("Unable to create AOT Code Cache.");
87 vm_abort(false);
88 }
89 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
90 AOTCodeCache::disable_caching();
91 }
92
93 // The sequence of AOT code caching flags and parametters settings.
94 //
95 // 1. The initial AOT code caching flags setting is done
110
111 // Next methods determine which action we do with AOT code depending
112 // on phase of AOT process: assembly or production.
113
114 bool AOTCodeCache::is_dumping_adapter() {
115 return AOTAdapterCaching && is_on_for_dump();
116 }
117
118 bool AOTCodeCache::is_using_adapter() {
119 return AOTAdapterCaching && is_on_for_use();
120 }
121
122 bool AOTCodeCache::is_dumping_stub() {
123 return AOTStubCaching && is_on_for_dump();
124 }
125
126 bool AOTCodeCache::is_using_stub() {
127 return AOTStubCaching && is_on_for_use();
128 }
129
130 // Next methods could be called regardless AOT code cache status.
131 // Initially they are called during flags parsing and finilized
132 // in AOTCodeCache::initialize().
133 void AOTCodeCache::enable_caching() {
134 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
135 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
136 }
137
138 void AOTCodeCache::disable_caching() {
139 FLAG_SET_ERGO(AOTStubCaching, false);
140 FLAG_SET_ERGO(AOTAdapterCaching, false);
141 }
142
143 bool AOTCodeCache::is_caching_enabled() {
144 return AOTStubCaching || AOTAdapterCaching;
145 }
146
147 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
148 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
149 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
150 // becasue both id and kind are used to find an entry, and that combination should be unique
151 if (kind == AOTCodeEntry::Adapter) {
152 return id;
153 } else if (kind == AOTCodeEntry::SharedBlob) {
154 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
155 return id;
156 } else if (kind == AOTCodeEntry::C1Blob) {
157 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
158 return id;
159 } else {
160 // kind must be AOTCodeEntry::C2Blob
161 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
162 return id;
163 }
164 }
165
166 static uint _max_aot_code_size = 0;
167 uint AOTCodeCache::max_aot_code_size() {
168 return _max_aot_code_size;
169 }
170
171 // It is called from AOTMetaspace::initialize_shared_spaces()
172 // which is called from universe_init().
173 // At this point all AOT class linking seetings are finilized
174 // and AOT cache is open so we can map AOT code region.
175 void AOTCodeCache::initialize() {
176 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
177 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
178 disable_caching();
179 return;
180 #else
181 if (FLAG_IS_DEFAULT(AOTCache)) {
182 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
183 disable_caching();
184 return; // AOTCache must be specified to dump and use AOT code
185 }
186
187 // Disable stubs caching until JDK-8357398 is fixed.
188 FLAG_SET_ERGO(AOTStubCaching, false);
189
190 if (VerifyOops) {
191 // Disable AOT stubs caching when VerifyOops flag is on.
192 // Verify oops code generated a lot of C strings which overflow
193 // AOT C string table (which has fixed size).
194 // AOT C string table will be reworked later to handle such cases.
195 //
196 // Note: AOT adapters are not affected - they don't have oop operations.
197 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
198 FLAG_SET_ERGO(AOTStubCaching, false);
199 }
200
201 bool is_dumping = false;
202 bool is_using = false;
203 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
204 is_dumping = true;
205 enable_caching();
206 is_dumping = is_caching_enabled();
207 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
208 enable_caching();
209 is_using = is_caching_enabled();
210 } else {
211 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
212 disable_caching();
213 return; // nothing to do
214 }
215 if (!(is_dumping || is_using)) {
216 disable_caching();
217 return; // AOT code caching disabled on command line
218 }
219 _max_aot_code_size = AOTCodeMaxSize;
220 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
221 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
222 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
223 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
224 }
225 }
226 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
227 if (is_using && aot_code_size == 0) {
228 log_info(aot, codecache, init)("AOT Code Cache is empty");
229 disable_caching();
230 return;
231 }
232 if (!open_cache(is_dumping, is_using)) {
233 if (is_using) {
234 report_load_failure();
235 } else {
236 report_store_failure();
237 }
238 return;
239 }
240 if (is_dumping) {
241 FLAG_SET_DEFAULT(ForceUnreachable, true);
242 }
243 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
244 #endif // defined(AMD64) || defined(AARCH64)
245 }
246
247 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
248 AOTCodeCache* AOTCodeCache::_cache = nullptr;
249 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
250
251 // It is called after universe_init() when all GC settings are finalized.
252 void AOTCodeCache::init2() {
253 DEBUG_ONLY( _passed_init2 = true; )
254 if (opened_cache == nullptr) {
255 return;
256 }
257 if (!opened_cache->verify_config()) {
258 delete opened_cache;
259 opened_cache = nullptr;
260 report_load_failure();
261 return;
262 }
263
264 // initialize aot runtime constants as appropriate to this runtime
265 AOTRuntimeConstants::initialize_from_runtime();
266
267 // initialize the table of external routines so we can save
268 // generated code blobs that reference them
269 AOTCodeAddressTable* table = opened_cache->_table;
270 assert(table != nullptr, "should be initialized already");
271 table->init_extrs();
272
273 // Now cache and address table are ready for AOT code generation
274 _cache = opened_cache;
275 }
276
277 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
278 opened_cache = new AOTCodeCache(is_dumping, is_using);
279 if (opened_cache->failed()) {
280 delete opened_cache;
281 opened_cache = nullptr;
282 return false;
283 }
284 return true;
285 }
286
287 void AOTCodeCache::close() {
288 if (is_on()) {
289 delete _cache; // Free memory
290 _cache = nullptr;
291 opened_cache = nullptr;
292 }
293 }
294
295 #define DATA_ALIGNMENT HeapWordSize
296
297 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
298 _load_header(nullptr),
299 _load_buffer(nullptr),
300 _store_buffer(nullptr),
301 _C_store_buffer(nullptr),
302 _write_position(0),
303 _load_size(0),
304 _store_size(0),
305 _for_use(is_using),
306 _for_dump(is_dumping),
307 _closing(false),
308 _failed(false),
309 _lookup_failed(false),
310 _table(nullptr),
311 _load_entries(nullptr),
312 _search_entries(nullptr),
313 _store_entries(nullptr),
314 _C_strings_buf(nullptr),
315 _store_entries_cnt(0)
316 {
317 // Read header at the begining of cache
318 if (_for_use) {
319 // Read cache
320 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
321 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
322 if (!rs.is_reserved()) {
323 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
324 set_failed();
325 return;
326 }
327 if (!AOTCacheAccess::map_aot_code_region(rs)) {
328 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
329 set_failed();
330 return;
331 }
332
333 _load_size = (uint)load_size;
334 _load_buffer = (char*)rs.base();
335 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
336 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
337
338 _load_header = (Header*)addr(0);
339 if (!_load_header->verify(_load_size)) {
340 set_failed();
341 return;
342 }
343 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
344 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
345 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
346 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
347 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
348 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
349
350 // Read strings
351 load_strings();
352 }
353 if (_for_dump) {
354 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
355 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
356 // Entries allocated at the end of buffer in reverse (as on stack).
357 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
358 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
359 }
360 _table = new AOTCodeAddressTable();
361 }
362
363 void AOTCodeCache::init_early_stubs_table() {
364 AOTCodeAddressTable* table = addr_table();
365 if (table != nullptr) {
366 table->init_early_stubs();
367 }
368 }
369
370 void AOTCodeCache::init_shared_blobs_table() {
371 AOTCodeAddressTable* table = addr_table();
372 if (table != nullptr) {
373 table->init_shared_blobs();
374 }
375 }
376
377 void AOTCodeCache::init_early_c1_table() {
378 AOTCodeAddressTable* table = addr_table();
379 if (table != nullptr) {
380 table->init_early_c1();
381 }
382 }
383
384 AOTCodeCache::~AOTCodeCache() {
385 if (_closing) {
386 return; // Already closed
387 }
388 // Stop any further access to cache.
389 _closing = true;
390
391 MutexLocker ml(Compile_lock);
392 if (for_dump()) { // Finalize cache
393 finish_write();
394 }
395 _load_buffer = nullptr;
396 if (_C_store_buffer != nullptr) {
397 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
398 _C_store_buffer = nullptr;
399 _store_buffer = nullptr;
400 }
401 if (_table != nullptr) {
402 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
403 delete _table;
404 _table = nullptr;
405 }
406 }
407
408 void AOTCodeCache::Config::record(uint cpu_features_offset) {
409 _flags = 0;
410 #ifdef ASSERT
414 _flags |= compressedOops;
415 }
416 if (UseCompressedClassPointers) {
417 _flags |= compressedClassPointers;
418 }
419 if (UseTLAB) {
420 _flags |= useTLAB;
421 }
422 if (JavaAssertions::systemClassDefault()) {
423 _flags |= systemClassAssertions;
424 }
425 if (JavaAssertions::userClassDefault()) {
426 _flags |= userClassAssertions;
427 }
428 if (EnableContended) {
429 _flags |= enableContendedPadding;
430 }
431 if (RestrictContended) {
432 _flags |= restrictContendedPadding;
433 }
434 _compressedOopShift = CompressedOops::shift();
435 _compressedOopBase = CompressedOops::base();
436 _compressedKlassShift = CompressedKlassPointers::shift();
437 _contendedPaddingWidth = ContendedPaddingWidth;
438 _gc = (uint)Universe::heap()->kind();
439 _cpu_features_offset = cpu_features_offset;
440 }
441
442 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
443 LogStreamHandle(Debug, aot, codecache, init) log;
444 uint offset = _cpu_features_offset;
445 uint cpu_features_size = *(uint *)cache->addr(offset);
446 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
447 offset += sizeof(uint);
448
449 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
450 if (log.is_enabled()) {
451 ResourceMark rm; // required for stringStream::as_string()
452 stringStream ss;
453 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
454 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
455 }
456
457 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
458 if (log.is_enabled()) {
476 }
477 return false;
478 }
479 return true;
480 }
481
482 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
483 // First checks affect all cached AOT code
484 #ifdef ASSERT
485 if ((_flags & debugVM) == 0) {
486 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
487 return false;
488 }
489 #else
490 if ((_flags & debugVM) != 0) {
491 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
492 return false;
493 }
494 #endif
495
496 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
497 if (aot_gc != Universe::heap()->kind()) {
498 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
499 return false;
500 }
501
502 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
503 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
504 return false;
505 }
506 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
507 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
508 return false;
509 }
510
511 // The following checks do not affect AOT adapters caching
512
513 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
514 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
515 AOTStubCaching = false;
516 }
517 if (_compressedOopShift != (uint)CompressedOops::shift()) {
518 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
519 AOTStubCaching = false;
520 }
521
522 // This should be the last check as it only disables AOTStubCaching
523 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
524 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
525 AOTStubCaching = false;
526 }
527
528 if (!verify_cpu_features(cache)) {
529 return false;
530 }
531 return true;
532 }
533
534 bool AOTCodeCache::Header::verify(uint load_size) const {
535 if (_version != AOT_CODE_VERSION) {
536 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
537 return false;
538 }
539 if (load_size < _cache_size) {
540 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
541 return false;
542 }
543 return true;
544 }
545
546 AOTCodeCache* AOTCodeCache::open_for_use() {
547 if (AOTCodeCache::is_on_for_use()) {
548 return AOTCodeCache::cache();
549 }
550 return nullptr;
551 }
552
553 AOTCodeCache* AOTCodeCache::open_for_dump() {
554 if (AOTCodeCache::is_on_for_dump()) {
555 AOTCodeCache* cache = AOTCodeCache::cache();
556 cache->clear_lookup_failed(); // Reset bit
557 return cache;
558 }
559 return nullptr;
560 }
561
562 void copy_bytes(const char* from, address to, uint size) {
563 assert((int)size > 0, "sanity");
564 memcpy(to, from, size);
565 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
566 }
567
568 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
569 _cache = cache;
570 _entry = entry;
571 _load_buffer = cache->cache_buffer();
572 _read_position = 0;
573 _lookup_failed = false;
574 }
575
576 void AOTCodeReader::set_read_position(uint pos) {
577 if (pos == _read_position) {
578 return;
579 }
580 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
581 _read_position = pos;
582 }
583
584 bool AOTCodeCache::set_write_position(uint pos) {
585 if (pos == _write_position) {
586 return true;
587 }
588 if (_store_size < _write_position) {
589 _store_size = _write_position; // Adjust during write
590 }
591 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
592 _write_position = pos;
635 if (nbytes == 0) {
636 return 0;
637 }
638 uint new_position = _write_position + nbytes;
639 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
640 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
641 nbytes, _write_position);
642 set_failed();
643 report_store_failure();
644 return 0;
645 }
646 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
647 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
648 _write_position += nbytes;
649 if (_store_size < _write_position) {
650 _store_size = _write_position;
651 }
652 return nbytes;
653 }
654
655 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
656 return (void*)(cache->add_entry());
657 }
658
659 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
660 if (entry->kind() == kind) {
661 assert(entry->id() == id, "sanity");
662 return true; // Found
663 }
664 return false;
665 }
666
667 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
668 assert(_for_use, "sanity");
669 uint count = _load_header->entries_count();
670 if (_load_entries == nullptr) {
671 // Read it
672 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
673 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
674 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
675 }
676 // Binary search
677 int l = 0;
678 int h = count - 1;
679 while (l <= h) {
680 int mid = (l + h) >> 1;
681 int ix = mid * 2;
682 uint is = _search_entries[ix];
683 if (is == id) {
684 int index = _search_entries[ix + 1];
685 AOTCodeEntry* entry = &(_load_entries[index]);
686 if (check_entry(kind, id, entry)) {
687 return entry; // Found
688 }
689 // Linear search around to handle id collission
690 for (int i = mid - 1; i >= l; i--) { // search back
691 ix = i * 2;
692 is = _search_entries[ix];
693 if (is != id) {
694 break;
695 }
696 index = _search_entries[ix + 1];
697 AOTCodeEntry* entry = &(_load_entries[index]);
698 if (check_entry(kind, id, entry)) {
699 return entry; // Found
700 }
701 }
702 for (int i = mid + 1; i <= h; i++) { // search forward
703 ix = i * 2;
704 is = _search_entries[ix];
705 if (is != id) {
706 break;
707 }
708 index = _search_entries[ix + 1];
709 AOTCodeEntry* entry = &(_load_entries[index]);
710 if (check_entry(kind, id, entry)) {
711 return entry; // Found
712 }
713 }
714 break; // Not found match
715 } else if (is < id) {
716 l = mid + 1;
717 } else {
718 h = mid - 1;
719 }
720 }
721 return nullptr;
722 }
723
724 extern "C" {
725 static int uint_cmp(const void *i, const void *j) {
726 uint a = *(uint *)i;
727 uint b = *(uint *)j;
728 return a > b ? 1 : a < b ? -1 : 0;
729 }
730 }
731
732 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
733 uint* size_ptr = (uint *)buffer;
734 *size_ptr = buffer_size;
735 buffer += sizeof(uint);
736
737 VM_Version::store_cpu_features(buffer);
738 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
739 buffer += buffer_size;
740 buffer = align_up(buffer, DATA_ALIGNMENT);
741 }
742
743 bool AOTCodeCache::finish_write() {
744 if (!align_write()) {
745 return false;
746 }
747 uint strings_offset = _write_position;
748 int strings_count = store_strings();
749 if (strings_count < 0) {
750 return false;
751 }
752 if (!align_write()) {
753 return false;
754 }
755 uint strings_size = _write_position - strings_offset;
756
757 uint entries_count = 0; // Number of entrant (useful) code entries
758 uint entries_offset = _write_position;
759
760 uint store_count = _store_entries_cnt;
761 if (store_count > 0) {
762 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
763 uint code_count = store_count;
764 uint search_count = code_count * 2;
765 uint search_size = search_count * sizeof(uint);
766 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
767 // _write_position includes size of code and strings
768 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
769 uint cpu_features_size = VM_Version::cpu_features_size();
770 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
771 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
772 align_up(total_cpu_features_size, DATA_ALIGNMENT);
773 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
774
775 // Allocate in AOT Cache buffer
776 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
777 char* start = align_up(buffer, DATA_ALIGNMENT);
778 char* current = start + header_size; // Skip header
779
780 uint cpu_features_offset = current - start;
781 store_cpu_features(current, cpu_features_size);
782 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
783 assert(current < start + total_size, "sanity check");
784
785 // Create ordered search table for entries [id, index];
786 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
787
788 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
789 uint adapters_count = 0;
790 uint shared_blobs_count = 0;
791 uint C1_blobs_count = 0;
792 uint C2_blobs_count = 0;
793 uint max_size = 0;
794 // AOTCodeEntry entries were allocated in reverse in store buffer.
795 // Process them in reverse order to cache first code first.
796 for (int i = store_count - 1; i >= 0; i--) {
797 entries_address[i].set_next(nullptr); // clear pointers before storing data
798 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
799 if (size > max_size) {
800 max_size = size;
801 }
802 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
803 entries_address[i].set_offset(current - start); // New offset
804 current += size;
805 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
806 if (n != sizeof(AOTCodeEntry)) {
807 FREE_C_HEAP_ARRAY(uint, search);
808 return false;
809 }
810 search[entries_count*2 + 0] = entries_address[i].id();
811 search[entries_count*2 + 1] = entries_count;
812 entries_count++;
813 AOTCodeEntry::Kind kind = entries_address[i].kind();
814 if (kind == AOTCodeEntry::Adapter) {
815 adapters_count++;
816 } else if (kind == AOTCodeEntry::SharedBlob) {
817 shared_blobs_count++;
818 } else if (kind == AOTCodeEntry::C1Blob) {
819 C1_blobs_count++;
820 } else if (kind == AOTCodeEntry::C2Blob) {
821 C2_blobs_count++;
822 }
823 }
824 if (entries_count == 0) {
825 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
826 FREE_C_HEAP_ARRAY(uint, search);
827 return true; // Nothing to write
828 }
829 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
830 // Write strings
831 if (strings_count > 0) {
832 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
833 strings_offset = (current - start); // New offset
834 current += strings_size;
835 }
836
837 uint new_entries_offset = (current - start); // New offset
838 // Sort and store search table
839 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
840 search_size = 2 * entries_count * sizeof(uint);
841 copy_bytes((const char*)search, (address)current, search_size);
842 FREE_C_HEAP_ARRAY(uint, search);
843 current += search_size;
844
845 // Write entries
846 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
847 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
848 current += entries_size;
849 uint size = (current - start);
850 assert(size <= total_size, "%d > %d", size , total_size);
851
852 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
853 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
854 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
855 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
856 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
857
858 // Finalize header
859 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
860 header->init(size, (uint)strings_count, strings_offset,
861 entries_count, new_entries_offset,
862 adapters_count, shared_blobs_count,
863 C1_blobs_count, C2_blobs_count, cpu_features_offset);
864
865 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
866 }
867 return true;
868 }
869
870 //------------------Store/Load AOT code ----------------------
871
872 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
873 AOTCodeCache* cache = open_for_dump();
874 if (cache == nullptr) {
875 return false;
876 }
877 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
878
879 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
880 return false;
881 }
882 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
883 return false;
884 }
885 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
920 return false;
921 }
922 CodeBlob::archive_blob(&blob, archive_buffer);
923
924 uint reloc_data_size = blob.relocation_size();
925 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
926 if (n != reloc_data_size) {
927 return false;
928 }
929
930 bool has_oop_maps = false;
931 if (blob.oop_maps() != nullptr) {
932 if (!cache->write_oop_map_set(blob)) {
933 return false;
934 }
935 has_oop_maps = true;
936 }
937
938 #ifndef PRODUCT
939 // Write asm remarks
940 if (!cache->write_asm_remarks(blob)) {
941 return false;
942 }
943 if (!cache->write_dbg_strings(blob)) {
944 return false;
945 }
946 #endif /* PRODUCT */
947
948 if (!cache->write_relocations(blob)) {
949 if (!cache->failed()) {
950 // We may miss an address in AOT table - skip this code blob.
951 cache->set_write_position(entry_position);
952 }
953 return false;
954 }
955
956 uint entry_size = cache->_write_position - entry_position;
957 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
958 entry_position, entry_size, name_offset, name_size,
959 blob_offset, has_oop_maps, blob.content_begin());
960 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
961 return true;
962 }
963
964 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
965 assert(AOTCodeEntry::is_blob(entry_kind),
966 "wrong entry kind for blob id %s", StubInfo::name(id));
967 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
968 }
969
970 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
971 AOTCodeCache* cache = open_for_use();
972 if (cache == nullptr) {
973 return nullptr;
974 }
975 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
976
977 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
978 return nullptr;
979 }
980 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
981 return nullptr;
982 }
983 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
984
985 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
986 if (entry == nullptr) {
987 return nullptr;
988 }
989 AOTCodeReader reader(cache, entry);
990 CodeBlob* blob = reader.compile_code_blob(name);
991
992 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
993 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
994 return blob;
995 }
996
997 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
998 assert(AOTCodeEntry::is_blob(entry_kind),
999 "wrong entry kind for blob id %s", StubInfo::name(id));
1000 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1001 }
1002
1003 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1004 uint entry_position = _entry->offset();
1005
1006 // Read name
1007 uint name_offset = entry_position + _entry->name_offset();
1008 uint name_size = _entry->name_size(); // Includes '/0'
1009 const char* stored_name = addr(name_offset);
1010
1011 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1012 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1013 stored_name, name);
1014 set_lookup_failed(); // Skip this blob
1015 return nullptr;
1016 }
1017
1018 // Read archived code blob
1019 uint offset = entry_position + _entry->blob_offset();
1020 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1021 offset += archived_blob->size();
1022
1023 address reloc_data = (address)addr(offset);
1024 offset += archived_blob->relocation_size();
1025 set_read_position(offset);
1026
1027 ImmutableOopMapSet* oop_maps = nullptr;
1028 if (_entry->has_oop_maps()) {
1029 oop_maps = read_oop_map_set();
1030 }
1031
1032 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1033 stored_name,
1034 reloc_data,
1035 oop_maps
1036 );
1037 if (code_blob == nullptr) { // no space left in CodeCache
1038 return nullptr;
1039 }
1040
1041 #ifndef PRODUCT
1042 code_blob->asm_remarks().init();
1043 read_asm_remarks(code_blob->asm_remarks());
1044 code_blob->dbg_strings().init();
1045 read_dbg_strings(code_blob->dbg_strings());
1046 #endif // PRODUCT
1047
1048 fix_relocations(code_blob);
1049
1050 #ifdef ASSERT
1051 LogStreamHandle(Trace, aot, codecache, stubs) log;
1052 if (log.is_enabled()) {
1053 FlagSetting fs(PrintRelocations, true);
1054 code_blob->print_on(&log);
1055 }
1056 #endif
1057 return code_blob;
1058 }
1059
1060 // ------------ process code and data --------------
1061
1062 // Can't use -1. It is valid value for jump to iteself destination
1063 // used by static call stub: see NativeJump::jump_destination().
1064 #define BAD_ADDRESS_ID -2
1065
1066 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
1067 GrowableArray<uint> reloc_data;
1068 RelocIterator iter(&code_blob);
1069 LogStreamHandle(Trace, aot, codecache, reloc) log;
1070 while (iter.next()) {
1071 int idx = reloc_data.append(0); // default value
1072 switch (iter.type()) {
1073 case relocInfo::none:
1074 break;
1075 case relocInfo::runtime_call_type: {
1076 // Record offset of runtime destination
1077 CallRelocation* r = (CallRelocation*)iter.reloc();
1078 address dest = r->destination();
1079 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1080 dest = (address)-1; // do nothing in this case when loading this relocation
1081 }
1082 int id = _table->id_for_address(dest, iter, &code_blob);
1083 if (id == BAD_ADDRESS_ID) {
1084 return false;
1085 }
1086 reloc_data.at_put(idx, id);
1087 break;
1088 }
1089 case relocInfo::runtime_call_w_cp_type:
1090 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
1091 return false;
1092 case relocInfo::external_word_type: {
1093 // Record offset of runtime target
1094 address target = ((external_word_Relocation*)iter.reloc())->target();
1095 int id = _table->id_for_address(target, iter, &code_blob);
1096 if (id == BAD_ADDRESS_ID) {
1097 return false;
1098 }
1099 reloc_data.at_put(idx, id);
1100 break;
1101 }
1102 case relocInfo::internal_word_type:
1103 break;
1104 case relocInfo::section_word_type:
1105 break;
1106 case relocInfo::post_call_nop_type:
1107 break;
1108 default:
1109 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
1110 return false;
1111 break;
1112 }
1113 if (log.is_enabled()) {
1114 iter.print_current_on(&log);
1115 }
1116 }
1117
1118 // Write additional relocation data: uint per relocation
1119 // Write the count first
1120 int count = reloc_data.length();
1121 write_bytes(&count, sizeof(int));
1122 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1123 iter != reloc_data.end(); ++iter) {
1124 uint value = *iter;
1125 int n = write_bytes(&value, sizeof(uint));
1126 if (n != sizeof(uint)) {
1127 return false;
1128 }
1129 }
1130 return true;
1131 }
1132
1133 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1134 LogStreamHandle(Trace, aot, reloc) log;
1135 uint offset = read_position();
1136 int count = *(int*)addr(offset);
1137 offset += sizeof(int);
1138 if (log.is_enabled()) {
1139 log.print_cr("======== extra relocations count=%d", count);
1140 }
1141 uint* reloc_data = (uint*)addr(offset);
1142 offset += (count * sizeof(uint));
1143 set_read_position(offset);
1144
1145 RelocIterator iter(code_blob);
1146 int j = 0;
1147 while (iter.next()) {
1148 switch (iter.type()) {
1149 case relocInfo::none:
1150 break;
1151 case relocInfo::runtime_call_type: {
1152 address dest = _cache->address_for_id(reloc_data[j]);
1153 if (dest != (address)-1) {
1154 ((CallRelocation*)iter.reloc())->set_destination(dest);
1155 }
1156 break;
1157 }
1158 case relocInfo::runtime_call_w_cp_type:
1159 // this relocation should not be in cache (see write_relocations)
1160 assert(false, "runtime_call_w_cp_type relocation is not implemented");
1161 break;
1162 case relocInfo::external_word_type: {
1163 address target = _cache->address_for_id(reloc_data[j]);
1164 // Add external address to global table
1165 int index = ExternalsRecorder::find_index(target);
1166 // Update index in relocation
1167 Relocation::add_jint(iter.data(), index);
1168 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1169 assert(reloc->target() == target, "sanity");
1170 reloc->set_value(target); // Patch address in the code
1171 break;
1172 }
1173 case relocInfo::internal_word_type: {
1174 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1175 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1176 break;
1177 }
1178 case relocInfo::section_word_type: {
1179 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1180 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1181 break;
1182 }
1183 case relocInfo::post_call_nop_type:
1184 break;
1185 default:
1186 assert(false,"relocation %d unimplemented", (int)iter.type());
1187 break;
1188 }
1189 if (log.is_enabled()) {
1190 iter.print_current_on(&log);
1191 }
1192 j++;
1193 }
1194 assert(j == count, "sanity");
1195 }
1196
1197 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1198 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1199 int oopmaps_size = oopmaps->nr_of_bytes();
1200 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1201 return false;
1202 }
1203 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1204 if (n != (uint)oopmaps->nr_of_bytes()) {
1205 return false;
1206 }
1207 return true;
1208 }
1209
1210 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1211 uint offset = read_position();
1212 int size = *(int *)addr(offset);
1213 offset += sizeof(int);
1214 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1215 offset += size;
1216 set_read_position(offset);
1217 return oopmaps;
1218 }
1219
1220 #ifndef PRODUCT
1221 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1222 // Write asm remarks
1223 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1224 if (count_ptr == nullptr) {
1225 return false;
1226 }
1227 uint count = 0;
1228 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1229 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1230 uint n = write_bytes(&offset, sizeof(uint));
1231 if (n != sizeof(uint)) {
1232 return false;
1233 }
1234 const char* cstr = add_C_string(str);
1235 int id = _table->id_for_C_string((address)cstr);
1236 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1237 n = write_bytes(&id, sizeof(int));
1238 if (n != sizeof(int)) {
1239 return false;
1240 }
1241 count += 1;
1242 return true;
1243 });
1244 *count_ptr = count;
1245 return result;
1246 }
1247
1248 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1249 // Read asm remarks
1250 uint offset = read_position();
1251 uint count = *(uint *)addr(offset);
1252 offset += sizeof(uint);
1253 for (uint i = 0; i < count; i++) {
1254 uint remark_offset = *(uint *)addr(offset);
1255 offset += sizeof(uint);
1256 int remark_string_id = *(uint *)addr(offset);
1257 offset += sizeof(int);
1258 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1259 asm_remarks.insert(remark_offset, remark);
1260 }
1261 set_read_position(offset);
1262 }
1263
1264 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1265 // Write dbg strings
1266 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1267 if (count_ptr == nullptr) {
1268 return false;
1269 }
1270 uint count = 0;
1271 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1272 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1273 const char* cstr = add_C_string(str);
1274 int id = _table->id_for_C_string((address)cstr);
1275 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1276 uint n = write_bytes(&id, sizeof(int));
1277 if (n != sizeof(int)) {
1278 return false;
1279 }
1280 count += 1;
1281 return true;
1282 });
1283 *count_ptr = count;
1284 return result;
1285 }
1286
1287 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1288 // Read dbg strings
1289 uint offset = read_position();
1290 uint count = *(uint *)addr(offset);
1291 offset += sizeof(uint);
1292 for (uint i = 0; i < count; i++) {
1293 int string_id = *(uint *)addr(offset);
1294 offset += sizeof(int);
1295 const char* str = (const char*)_cache->address_for_C_string(string_id);
1296 dbg_strings.insert(str);
1297 }
1298 set_read_position(offset);
1299 }
1300 #endif // PRODUCT
1301
1302 //======================= AOTCodeAddressTable ===============
1303
1304 // address table ids for generated routines, external addresses and C
1305 // string addresses are partitioned into positive integer ranges
1306 // defined by the following positive base and max values
1307 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1308 // [_blobs_base, _blobs_base + _blobs_max -1],
1309 // ...
1310 // [_c_str_base, _c_str_base + _c_str_max -1],
1311
1312 #define _extrs_max 100
1313 #define _stubs_max 3
1314
1315 #define _shared_blobs_max 20
1316 #define _C1_blobs_max 10
1317 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1318 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1319
1320 #define _extrs_base 0
1321 #define _stubs_base (_extrs_base + _extrs_max)
1322 #define _shared_blobs_base (_stubs_base + _stubs_max)
1323 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1324 #define _blobs_end (_shared_blobs_base + _blobs_max)
1325
1326 #define SET_ADDRESS(type, addr) \
1327 { \
1328 type##_addr[type##_length++] = (address) (addr); \
1329 assert(type##_length <= type##_max, "increase size"); \
1330 }
1331
1332 static bool initializing_extrs = false;
1333
1334 void AOTCodeAddressTable::init_extrs() {
1335 if (_extrs_complete || initializing_extrs) return; // Done already
1336
1337 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1338
1339 initializing_extrs = true;
1340 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1341
1342 _extrs_length = 0;
1343
1344 // Record addresses of VM runtime methods
1345 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1346 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1347 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1348 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1349 #if defined(AARCH64) && !defined(ZERO)
1350 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1351 #endif
1352 {
1353 // Required by Shared blobs
1354 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1355 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1356 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1357 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1358 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1359 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1360 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
1361 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1362 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1363 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1364 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1365 }
1366
1367 #ifdef COMPILER1
1368 {
1369 // Required by C1 blobs
1370 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1371 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1372 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1373 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1374 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1375 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1376 SET_ADDRESS(_extrs, Runtime1::new_instance);
1377 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1378 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1379 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1380 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1381 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1382 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1383 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1384 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1385 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1386 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1387 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1388 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1389 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1390 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1391 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1392 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1393 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1394 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1395 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1396 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1397 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1398 SET_ADDRESS(_extrs, Thread::current);
1399 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1400 #ifndef PRODUCT
1401 SET_ADDRESS(_extrs, os::breakpoint);
1402 #endif
1403 }
1404 #endif
1405
1406 #ifdef COMPILER2
1407 {
1408 // Required by C2 blobs
1409 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1410 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1411 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1412 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1413 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1414 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1415 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1416 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1417 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1418 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1419 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1420 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1421 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1422 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1423 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1424 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1425 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
1426 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
1427 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
1428 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
1429 #if defined(AARCH64)
1430 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1431 #endif // AARCH64
1432 }
1433 #endif // COMPILER2
1434
1435 #if INCLUDE_G1GC
1436 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1437 #endif
1438 #if INCLUDE_SHENANDOAHGC
1439 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
1440 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1441 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1442 #endif
1443 #if INCLUDE_ZGC
1444 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
1445 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1446 #if defined(AMD64)
1447 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1448 #endif
1449 #endif
1450 #ifndef ZERO
1451 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1452 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1453 #endif
1454 #endif // ZERO
1455
1456 // addresses of fields in AOT runtime constants area
1457 address* p = AOTRuntimeConstants::field_addresses_list();
1458 while (*p != nullptr) {
1459 SET_ADDRESS(_extrs, *p++);
1460 }
1461
1462 _extrs_complete = true;
1463 log_debug(aot, codecache, init)("External addresses recorded");
1464 }
1465
1466 static bool initializing_early_stubs = false;
1467
1468 void AOTCodeAddressTable::init_early_stubs() {
1469 if (_complete || initializing_early_stubs) return; // Done already
1470 initializing_early_stubs = true;
1471 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1472 _stubs_length = 0;
1473 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1474
1475 {
1476 // Required by C1 blobs
1477 #if defined(AMD64) && !defined(ZERO)
1478 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1479 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1480 #endif // AMD64
1481 }
1482
1483 _early_stubs_complete = true;
1484 log_info(aot, codecache, init)("Early stubs recorded");
1485 }
1486
1487 static bool initializing_shared_blobs = false;
1488
1489 void AOTCodeAddressTable::init_shared_blobs() {
1490 if (_complete || initializing_shared_blobs) return; // Done already
1491 initializing_shared_blobs = true;
1492 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1493
1494 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
1495 _shared_blobs_addr = blobs_addr;
1496 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1497
1498 _shared_blobs_length = 0;
1499 _C1_blobs_length = 0;
1500
1501 // clear the address table
1502 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1503
1504 // Record addresses of generated code blobs
1505 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1506 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1507 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1508 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1509 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1510 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1511 #if INCLUDE_JVMCI
1512 if (EnableJVMCI) {
1513 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1514 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1515 }
1516 #endif
1517
1518 _shared_blobs_complete = true;
1519 log_debug(aot, codecache, init)("Early shared blobs recorded");
1520 _complete = true;
1521 }
1522
1523 void AOTCodeAddressTable::init_early_c1() {
1524 #ifdef COMPILER1
1525 // Runtime1 Blobs
1526 StubId id = StubInfo::stub_base(StubGroup::C1);
1527 // include forward_exception in range we publish
1528 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
1529 for (; id != limit; id = StubInfo::next(id)) {
1530 if (Runtime1::blob_for(id) == nullptr) {
1531 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1532 continue;
1533 }
1534 if (Runtime1::entry_for(id) == nullptr) {
1535 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1536 continue;
1537 }
1538 address entry = Runtime1::entry_for(id);
1539 SET_ADDRESS(_C1_blobs, entry);
1540 }
1541 #endif // COMPILER1
1542 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1543 _early_c1_complete = true;
1544 }
1545
1546 #undef SET_ADDRESS
1547
1548 AOTCodeAddressTable::~AOTCodeAddressTable() {
1549 if (_extrs_addr != nullptr) {
1550 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1551 }
1552 if (_stubs_addr != nullptr) {
1553 FREE_C_HEAP_ARRAY(address, _stubs_addr);
1554 }
1555 if (_shared_blobs_addr != nullptr) {
1556 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1557 }
1558 }
1559
1560 #ifdef PRODUCT
1561 #define MAX_STR_COUNT 200
1562 #else
1563 #define MAX_STR_COUNT 500
1564 #endif
1565 #define _c_str_max MAX_STR_COUNT
1566 static const int _c_str_base = _all_max;
1567
1568 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1569 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1570 static int _C_strings_count = 0;
1571 static int _C_strings_s[MAX_STR_COUNT] = {0};
1572 static int _C_strings_id[MAX_STR_COUNT] = {0};
1573 static int _C_strings_used = 0;
1574
1575 void AOTCodeCache::load_strings() {
1576 uint strings_count = _load_header->strings_count();
1577 if (strings_count == 0) {
1578 return;
1579 }
1580 uint strings_offset = _load_header->strings_offset();
1581 uint* string_lengths = (uint*)addr(strings_offset);
1582 strings_offset += (strings_count * sizeof(uint));
1583 uint strings_size = _load_header->entries_offset() - strings_offset;
1584 // We have to keep cached strings longer than _cache buffer
1585 // because they are refernced from compiled code which may
1586 // still be executed on VM exit after _cache is freed.
1587 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1588 memcpy(p, addr(strings_offset), strings_size);
1589 _C_strings_buf = p;
1590 assert(strings_count <= MAX_STR_COUNT, "sanity");
1591 for (uint i = 0; i < strings_count; i++) {
1592 _C_strings[i] = p;
1593 uint len = string_lengths[i];
1594 _C_strings_s[i] = i;
1595 _C_strings_id[i] = i;
1596 p += len;
1597 }
1598 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1599 _C_strings_count = strings_count;
1600 _C_strings_used = strings_count;
1601 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1602 }
1603
1705 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1706 if (idx == -1) {
1707 return (address)-1;
1708 }
1709 uint id = (uint)idx;
1710 // special case for symbols based relative to os::init
1711 if (id > (_c_str_base + _c_str_max)) {
1712 return (address)os::init + idx;
1713 }
1714 if (idx < 0) {
1715 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1716 return nullptr;
1717 }
1718 // no need to compare unsigned id against 0
1719 if (/* id >= _extrs_base && */ id < _extrs_length) {
1720 return _extrs_addr[id - _extrs_base];
1721 }
1722 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1723 return _stubs_addr[id - _stubs_base];
1724 }
1725 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1726 return _shared_blobs_addr[id - _shared_blobs_base];
1727 }
1728 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1729 return _C1_blobs_addr[id - _C1_blobs_base];
1730 }
1731 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1732 return address_for_C_string(id - _c_str_base);
1733 }
1734 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1735 return nullptr;
1736 }
1737
1738 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1739 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
1740 int id = -1;
1741 if (addr == (address)-1) { // Static call stub has jump to itself
1742 return id;
1743 }
1744 // Check card_table_base address first since it can point to any address
1745 BarrierSet* bs = BarrierSet::barrier_set();
1746 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
1747 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
1748
1749 // Seach for C string
1750 id = id_for_C_string(addr);
1751 if (id >= 0) {
1752 return id + _c_str_base;
1753 }
1754 if (StubRoutines::contains(addr)) {
1755 // Search in stubs
1756 id = search_address(addr, _stubs_addr, _stubs_length);
1757 if (id < 0) {
1758 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1759 if (desc == nullptr) {
1760 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1761 }
1762 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1763 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1764 } else {
1765 return id + _stubs_base;
1766 }
1767 } else {
1768 CodeBlob* cb = CodeCache::find_blob(addr);
1769 if (cb != nullptr) {
1770 // Search in code blobs
1771 int id_base = _shared_blobs_base;
1772 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1773 if (id < 0) {
1774 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1775 } else {
1776 return id_base + id;
1777 }
1778 } else {
1779 // Search in runtime functions
1780 id = search_address(addr, _extrs_addr, _extrs_length);
1781 if (id < 0) {
1782 ResourceMark rm;
1783 const int buflen = 1024;
1784 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1785 int offset = 0;
1786 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1787 if (offset > 0) {
1788 // Could be address of C string
1789 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1790 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1791 p2i(addr), dist, (const char*)addr);
1792 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1793 return dist;
1794 }
1795 #ifdef ASSERT
1796 reloc.print_current_on(tty);
1797 code_blob->print_on(tty);
1798 code_blob->print_code_on(tty);
1799 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1800 #endif
1801 } else {
1802 #ifdef ASSERT
1803 reloc.print_current_on(tty);
1804 code_blob->print_on(tty);
1805 code_blob->print_code_on(tty);
1806 os::find(addr, tty);
1807 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1808 #endif
1809 }
1810 } else {
1811 return _extrs_base + id;
1812 }
1813 }
1814 }
1815 return id;
1816 }
1817
1818 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
1819
1820 void AOTRuntimeConstants::initialize_from_runtime() {
1821 BarrierSet* bs = BarrierSet::barrier_set();
1822 address card_table_base = nullptr;
1823 uint grain_shift = 0;
1824 #if INCLUDE_G1GC
1825 if (bs->is_a(BarrierSet::G1BarrierSet)) {
1826 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
1827 } else
1828 #endif
1829 #if INCLUDE_SHENANDOAHGC
1830 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
1831 grain_shift = 0;
1832 } else
1833 #endif
1834 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
1835 CardTable::CardValue* base = ci_card_table_address_const();
1836 assert(base != nullptr, "unexpected byte_map_base");
1837 card_table_base = base;
1838 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
1839 grain_shift = ctbs->grain_shift();
1840 }
1841 _aot_runtime_constants._card_table_base = card_table_base;
1842 _aot_runtime_constants._grain_shift = grain_shift;
1843 }
1844
1845 address AOTRuntimeConstants::_field_addresses_list[] = {
1846 ((address)&_aot_runtime_constants._card_table_base),
1847 ((address)&_aot_runtime_constants._grain_shift),
1848 nullptr
1849 };
1850
1851 address AOTRuntimeConstants::card_table_base_address() {
1852 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
1853 return (address)&_aot_runtime_constants._card_table_base;
1854 }
1855
1856 // This is called after initialize() but before init2()
1857 // and _cache is not set yet.
1858 void AOTCodeCache::print_on(outputStream* st) {
1859 if (opened_cache != nullptr && opened_cache->for_use()) {
1860 st->print_cr("\nAOT Code Cache");
1861 uint count = opened_cache->_load_header->entries_count();
1862 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
1863 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1864
1865 for (uint i = 0; i < count; i++) {
1866 // Use search_entries[] to order ouput
1867 int index = search_entries[2*i + 1];
1868 AOTCodeEntry* entry = &(load_entries[index]);
1869
1870 uint entry_position = entry->offset();
1871 uint name_offset = entry->name_offset() + entry_position;
1872 const char* saved_name = opened_cache->addr(name_offset);
1873
1874 st->print_cr("%4u: %10s idx:%4u Id:%u size=%u '%s'",
1875 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->size(), saved_name);
1876 }
1877 }
1878 }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cds_globals.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/heapShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/barrierSetAssembler.hpp"
55 #include "gc/shared/cardTableBarrierSet.hpp"
56 #include "gc/shared/gcConfig.hpp"
57 #include "logging/logStream.hpp"
58 #include "memory/memoryReserver.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/klass.inline.hpp"
61 #include "oops/method.inline.hpp"
62 #include "oops/trainingData.hpp"
63 #include "prims/jvmtiThreadState.hpp"
64 #include "runtime/atomicAccess.hpp"
65 #include "runtime/deoptimization.hpp"
66 #include "runtime/flags/flagSetting.hpp"
67 #include "runtime/globals_extension.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/java.hpp"
70 #include "runtime/jniHandles.inline.hpp"
71 #include "runtime/mountUnmountDisabler.hpp"
72 #include "runtime/mutexLocker.hpp"
73 #include "runtime/objectMonitorTable.hpp"
74 #include "runtime/os.inline.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/stubCodeGenerator.hpp"
77 #include "runtime/stubRoutines.hpp"
78 #include "runtime/threadIdentifier.hpp"
79 #include "runtime/timerTrace.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/formatBuffer.hpp"
82 #include "utilities/ostream.hpp"
83 #include "utilities/spinYield.hpp"
84 #ifdef COMPILER1
85 #include "c1/c1_LIRAssembler.hpp"
86 #include "c1/c1_Runtime1.hpp"
87 #include "gc/g1/c1/g1BarrierSetC1.hpp"
88 #include "gc/shared/c1/barrierSetC1.hpp"
89 #if INCLUDE_SHENANDOAHGC
90 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
91 #endif // INCLUDE_SHENANDOAHGC
92 #include "gc/z/c1/zBarrierSetC1.hpp"
93 #endif // COMPILER1
94 #ifdef COMPILER2
95 #include "opto/c2_MacroAssembler.hpp"
96 #include "opto/parse.hpp"
97 #include "opto/runtime.hpp"
98 #endif
99 #if INCLUDE_JVMCI
100 #include "jvmci/jvmci.hpp"
101 #endif
102 #if INCLUDE_G1GC
103 #include "gc/g1/g1BarrierSetRuntime.hpp"
104 #include "gc/g1/g1HeapRegion.hpp"
105 #endif
106 #if INCLUDE_SHENANDOAHGC
107 #include "gc/shenandoah/shenandoahRuntime.hpp"
108 #endif
109 #if INCLUDE_ZGC
110 #include "gc/z/zBarrierSetRuntime.hpp"
111 #endif
112 #if defined(X86) && !defined(ZERO)
113 #include "rdtsc_x86.hpp"
114 #endif
115
116 #include <errno.h>
117 #include <sys/stat.h>
118
119 const char* aot_code_entry_kind_name[] = {
120 #define DECL_KIND_STRING(kind) XSTR(kind),
121 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
122 #undef DECL_KIND_STRING
123 };
124
125 static elapsedTimer _t_totalLoad;
126 static elapsedTimer _t_totalPreload;
127 static elapsedTimer _t_totalRegister;
128 static elapsedTimer _t_totalFind;
129 static elapsedTimer _t_totalStore;
130
131 static bool enable_timers() {
132 return CITime || log_is_enabled(Info, init);
133 }
134
135 static void report_load_failure() {
136 if (AbortVMOnAOTCodeFailure) {
137 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
138 }
139 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
140 AOTCodeCache::disable_caching();
141 }
142
143 static void report_store_failure() {
144 if (AbortVMOnAOTCodeFailure) {
145 tty->print_cr("Unable to create AOT Code Cache.");
146 vm_abort(false);
147 }
148 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
149 AOTCodeCache::disable_caching();
150 }
151
152 // The sequence of AOT code caching flags and parametters settings.
153 //
154 // 1. The initial AOT code caching flags setting is done
169
170 // Next methods determine which action we do with AOT code depending
171 // on phase of AOT process: assembly or production.
172
173 bool AOTCodeCache::is_dumping_adapter() {
174 return AOTAdapterCaching && is_on_for_dump();
175 }
176
177 bool AOTCodeCache::is_using_adapter() {
178 return AOTAdapterCaching && is_on_for_use();
179 }
180
181 bool AOTCodeCache::is_dumping_stub() {
182 return AOTStubCaching && is_on_for_dump();
183 }
184
185 bool AOTCodeCache::is_using_stub() {
186 return AOTStubCaching && is_on_for_use();
187 }
188
189 bool AOTCodeCache::is_dumping_code() {
190 return AOTCodeCaching && is_on_for_dump();
191 }
192
193 bool AOTCodeCache::is_using_code() {
194 return AOTCodeCaching && is_on_for_use();
195 }
196
197 // This is used before AOTCodeCahe is initialized
198 // but after AOT (CDS) Cache flags consistency is checked.
199 bool AOTCodeCache::maybe_dumping_code() {
200 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
201 }
202
203 // Next methods could be called regardless of AOT code cache status.
204 // Initially they are called during AOT flags parsing and finilized
205 // in AOTCodeCache::initialize().
206 void AOTCodeCache::enable_caching() {
207 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
208 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
209 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
210 }
211
212 void AOTCodeCache::disable_caching() {
213 FLAG_SET_ERGO(AOTCodeCaching, false);
214 FLAG_SET_ERGO(AOTStubCaching, false);
215 FLAG_SET_ERGO(AOTAdapterCaching, false);
216 }
217
218 bool AOTCodeCache::is_caching_enabled() {
219 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
220 }
221
222 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
223 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
224 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
225 // becasue both id and kind are used to find an entry, and that combination should be unique
226 if (kind == AOTCodeEntry::Adapter) {
227 return id;
228 } else if (kind == AOTCodeEntry::SharedBlob) {
229 assert(StubInfo::is_shared(static_cast<BlobId>(id)), "not a shared blob id %d", id);
230 return id;
231 } else if (kind == AOTCodeEntry::C1Blob) {
232 assert(StubInfo::is_c1(static_cast<BlobId>(id)), "not a c1 blob id %d", id);
233 return id;
234 } else {
235 // kind must be AOTCodeEntry::C2Blob
236 assert(StubInfo::is_c2(static_cast<BlobId>(id)), "not a c2 blob id %d", id);
237 return id;
238 }
239 }
240
241 static uint _max_aot_code_size = 0;
242 uint AOTCodeCache::max_aot_code_size() {
243 return _max_aot_code_size;
244 }
245
246 bool AOTCodeCache::is_code_load_thread_on() {
247 return UseAOTCodeLoadThread && AOTCodeCaching;
248 }
249
250 bool AOTCodeCache::allow_const_field(ciConstant& value) {
251 ciEnv* env = CURRENT_ENV;
252 precond(env != nullptr);
253 assert(!env->is_aot_compile() || is_dumping_code(), "AOT compilation should be enabled");
254 return !env->is_aot_compile() // Restrict only when we generate AOT code
255 // Can not trust primitive too || !is_reference_type(value.basic_type())
256 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
257 ;
258 }
259
260 // It is called from AOTMetaspace::initialize_shared_spaces()
261 // which is called from universe_init().
262 // At this point all AOT class linking seetings are finilized
263 // and AOT cache is open so we can map AOT code region.
264 void AOTCodeCache::initialize() {
265 if (!is_caching_enabled()) {
266 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
267 return;
268 }
269 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
270 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
271 disable_caching();
272 return;
273 #else
274 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
275
276 // Disable stubs caching until JDK-8357398 is fixed.
277 FLAG_SET_ERGO(AOTStubCaching, false);
278
279 if (VerifyOops) {
280 // Disable AOT stubs caching when VerifyOops flag is on.
281 // Verify oops code generated a lot of C strings which overflow
282 // AOT C string table (which has fixed size).
283 // AOT C string table will be reworked later to handle such cases.
284 //
285 // Note: AOT adapters are not affected - they don't have oop operations.
286 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
287 FLAG_SET_ERGO(AOTStubCaching, false);
288 }
289
290 bool is_dumping = false;
291 bool is_using = false;
292 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
293 is_dumping = is_caching_enabled();
294 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
295 is_using = is_caching_enabled();
296 }
297 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
298 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
299 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
300 }
301 if (!(is_dumping || is_using)) {
302 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
303 disable_caching();
304 return; // AOT code caching disabled on command line
305 }
306 // Reserve AOT Cache region when we dumping AOT code.
307 _max_aot_code_size = AOTCodeMaxSize;
308 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
309 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
310 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
311 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
312 }
313 }
314 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
315 if (is_using && aot_code_size == 0) {
316 log_info(aot, codecache, init)("AOT Code Cache is empty");
317 disable_caching();
318 return;
319 }
320 if (!open_cache(is_dumping, is_using)) {
321 if (is_using) {
322 report_load_failure();
323 } else {
324 report_store_failure();
325 }
326 return;
327 }
328 if (is_dumping) {
329 FLAG_SET_DEFAULT(FoldStableValues, false);
330 FLAG_SET_DEFAULT(ForceUnreachable, true);
331 }
332 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
333 #endif // defined(AMD64) || defined(AARCH64)
334 }
335
336 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
337 AOTCodeCache* AOTCodeCache::_cache = nullptr;
338 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
339
340 // It is called after universe_init() when all GC settings are finalized.
341 void AOTCodeCache::init2() {
342 DEBUG_ONLY( _passed_init2 = true; )
343 if (opened_cache == nullptr) {
344 return;
345 }
346 // After Universe initialized
347 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
348 delete opened_cache;
349 opened_cache = nullptr;
350 report_load_failure();
351 return;
352 }
353
354 // initialize aot runtime constants as appropriate to this runtime
355 AOTRuntimeConstants::initialize_from_runtime();
356
357 // initialize the table of external routines and initial stubs so we can save
358 // generated code blobs that reference them
359 AOTCodeAddressTable* table = opened_cache->_table;
360 assert(table != nullptr, "should be initialized already");
361 table->init_extrs();
362
363 // Now cache and address table are ready for AOT code generation
364 _cache = opened_cache;
365
366 // Set ClassInitBarrierMode after all checks since it affects code generation
367 if (is_dumping_code()) {
368 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
369 } else {
370 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
371 }
372 }
373
374 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
375 opened_cache = new AOTCodeCache(is_dumping, is_using);
376 if (opened_cache->failed()) {
377 delete opened_cache;
378 opened_cache = nullptr;
379 return false;
380 }
381 return true;
382 }
383
384 static void print_helper(nmethod* nm, outputStream* st) {
385 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
386 if (e->method() == nm->method()) {
387 ResourceMark rm;
388 stringStream ss;
389 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
390 ss.print("[%s%s%s]",
391 (e->is_loaded() ? "L" : ""),
392 (e->load_fail() ? "F" : ""),
393 (e->not_entrant() ? "I" : ""));
394 ss.print("#%d", e->comp_id());
395
396 st->print(" %s", ss.freeze());
397 }
398 });
399 }
400
401 void AOTCodeCache::close() {
402 if (is_on()) {
403 delete _cache; // Free memory
404 _cache = nullptr;
405 opened_cache = nullptr;
406 }
407 }
408
409 class CachedCodeDirectory {
410 public:
411 uint _aot_code_size;
412 char* _aot_code_data;
413
414 void set_aot_code_data(uint size, char* aot_data) {
415 _aot_code_size = size;
416 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
417 }
418
419 static CachedCodeDirectory* create();
420 };
421
422 // Storing AOT code in the AOT code region (ac) of AOT Cache:
423 //
424 // [1] Use CachedCodeDirectory to keep track of all of data related to AOT code.
425 // E.g., you can build a hashtable to record what methods have been archived.
426 //
427 // [2] Memory for all data for AOT code, including CachedCodeDirectory, should be
428 // allocated using AOTCacheAccess::allocate_aot_code_region().
429 //
430 // [3] CachedCodeDirectory must be the very first allocation.
431 //
432 // [4] Two kinds of pointer can be stored:
433 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
434 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
435 // (It's OK to point to an interior location within this buffer).
436 // Such pointers must be stored using AOTCacheAccess::set_pointer()
437 //
438 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
439 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
440 // (e.g., to account for the runtime location of the CodeCache).
441 //
442 // This is always at the very beginning of the mmaped CDS "ac" (AOT code) region
443 static CachedCodeDirectory* _aot_code_directory = nullptr;
444
445 CachedCodeDirectory* CachedCodeDirectory::create() {
446 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
447 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
448 return dir;
449 }
450
451 #define DATA_ALIGNMENT HeapWordSize
452
453 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
454 _load_header(nullptr),
455 _load_buffer(nullptr),
456 _store_buffer(nullptr),
457 _C_store_buffer(nullptr),
458 _write_position(0),
459 _load_size(0),
460 _store_size(0),
461 _for_use(is_using),
462 _for_dump(is_dumping),
463 _closing(false),
464 _failed(false),
465 _lookup_failed(false),
466 _for_preload(false),
467 _has_clinit_barriers(false),
468 _table(nullptr),
469 _load_entries(nullptr),
470 _search_entries(nullptr),
471 _store_entries(nullptr),
472 _C_strings_buf(nullptr),
473 _store_entries_cnt(0),
474 _compile_id(0),
475 _comp_level(0)
476 {
477 // Read header at the begining of cache
478 if (_for_use) {
479 // Read cache
480 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
481 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
482 if (!rs.is_reserved()) {
483 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
484 set_failed();
485 return;
486 }
487 if (!AOTCacheAccess::map_aot_code_region(rs)) {
488 log_warning(aot, codecache, init)("Failed to read/mmap AOT code region (ac) into AOT Code Cache");
489 set_failed();
490 return;
491 }
492 _aot_code_directory = (CachedCodeDirectory*)rs.base();
493
494 _load_size = _aot_code_directory->_aot_code_size;
495 _load_buffer = _aot_code_directory->_aot_code_data;
496 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
497 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
498
499 _load_header = (Header*)addr(0);
500 if (!_load_header->verify(_load_size)) {
501 set_failed();
502 return;
503 }
504 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
505 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Adapter], _load_header->adapters_count());
506 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::SharedBlob], _load_header->shared_blobs_count());
507 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C1Blob], _load_header->C1_blobs_count());
508 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::C2Blob], _load_header->C2_blobs_count());
509 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Stub], _load_header->stubs_count());
510 log_debug(aot, codecache, init)(" %s: total=%u", aot_code_entry_kind_name[AOTCodeEntry::Nmethod], _load_header->nmethods_count());
511 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
512
513 // Read strings
514 load_strings();
515 }
516 if (_for_dump) {
517 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
518 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
519 // Entries allocated at the end of buffer in reverse (as on stack).
520 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
521 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
522 }
523 _table = new AOTCodeAddressTable();
524 }
525
526 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
527 // This could be concurent execution
528 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
529 _cache->invalidate_entry(entry);
530 }
531 }
532
533 void AOTCodeCache::init_early_stubs_table() {
534 AOTCodeAddressTable* table = addr_table();
535 if (table != nullptr) {
536 table->init_early_stubs();
537 }
538 }
539
540 void AOTCodeCache::init_shared_blobs_table() {
541 AOTCodeAddressTable* table = addr_table();
542 if (table != nullptr) {
543 table->init_shared_blobs();
544 }
545 }
546
547 void AOTCodeCache::init_stubs_table() {
548 AOTCodeAddressTable* table = addr_table();
549 if (table != nullptr) {
550 table->init_stubs();
551 }
552 }
553
554 void AOTCodeCache::init_early_c1_table() {
555 AOTCodeAddressTable* table = addr_table();
556 if (table != nullptr) {
557 table->init_early_c1();
558 }
559 }
560
561 void AOTCodeCache::init_c1_table() {
562 AOTCodeAddressTable* table = addr_table();
563 if (table != nullptr) {
564 table->init_c1();
565 }
566 }
567
568 void AOTCodeCache::init_c2_table() {
569 AOTCodeAddressTable* table = addr_table();
570 if (table != nullptr) {
571 table->init_c2();
572 }
573 }
574
575 AOTCodeCache::~AOTCodeCache() {
576 if (_closing) {
577 return; // Already closed
578 }
579 // Stop any further access to cache.
580 // Checked on entry to load_nmethod() and store_nmethod().
581 _closing = true;
582 if (_for_use) {
583 // Wait for all load_nmethod() finish.
584 wait_for_no_nmethod_readers();
585 }
586 // Prevent writing code into cache while we are closing it.
587 // This lock held by ciEnv::register_method() which calls store_nmethod().
588 MutexLocker ml(Compile_lock);
589 if (for_dump()) { // Finalize cache
590 finish_write();
591 }
592 _load_buffer = nullptr;
593 if (_C_store_buffer != nullptr) {
594 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
595 _C_store_buffer = nullptr;
596 _store_buffer = nullptr;
597 }
598 if (_table != nullptr) {
599 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
600 delete _table;
601 _table = nullptr;
602 }
603 }
604
605 void AOTCodeCache::Config::record(uint cpu_features_offset) {
606 _flags = 0;
607 #ifdef ASSERT
611 _flags |= compressedOops;
612 }
613 if (UseCompressedClassPointers) {
614 _flags |= compressedClassPointers;
615 }
616 if (UseTLAB) {
617 _flags |= useTLAB;
618 }
619 if (JavaAssertions::systemClassDefault()) {
620 _flags |= systemClassAssertions;
621 }
622 if (JavaAssertions::userClassDefault()) {
623 _flags |= userClassAssertions;
624 }
625 if (EnableContended) {
626 _flags |= enableContendedPadding;
627 }
628 if (RestrictContended) {
629 _flags |= restrictContendedPadding;
630 }
631 if (PreserveFramePointer) {
632 _flags |= preserveFramePointer;
633 }
634 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
635 _compressedOopShift = CompressedOops::shift();
636 _compressedOopBase = CompressedOops::base();
637 _compressedKlassShift = CompressedKlassPointers::shift();
638 _compressedKlassBase = CompressedKlassPointers::base();
639 _contendedPaddingWidth = ContendedPaddingWidth;
640 _objectAlignment = ObjectAlignmentInBytes;
641 _gcCardSize = GCCardSizeInBytes;
642 _gc = (uint)Universe::heap()->kind();
643 _maxVectorSize = MaxVectorSize;
644 _arrayOperationPartialInlineSize = ArrayOperationPartialInlineSize;
645 _allocatePrefetchLines = AllocatePrefetchLines;
646 _allocateInstancePrefetchLines = AllocateInstancePrefetchLines;
647 _allocatePrefetchDistance = AllocatePrefetchDistance;
648 _allocatePrefetchStepSize = AllocatePrefetchStepSize;
649 _cpu_features_offset = cpu_features_offset;
650 }
651
652 bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
653 LogStreamHandle(Debug, aot, codecache, init) log;
654 uint offset = _cpu_features_offset;
655 uint cpu_features_size = *(uint *)cache->addr(offset);
656 assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
657 offset += sizeof(uint);
658
659 void* cached_cpu_features_buffer = (void *)cache->addr(offset);
660 if (log.is_enabled()) {
661 ResourceMark rm; // required for stringStream::as_string()
662 stringStream ss;
663 VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
664 log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
665 }
666
667 if (VM_Version::supports_features(cached_cpu_features_buffer)) {
668 if (log.is_enabled()) {
686 }
687 return false;
688 }
689 return true;
690 }
691
692 bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
693 // First checks affect all cached AOT code
694 #ifdef ASSERT
695 if ((_flags & debugVM) == 0) {
696 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
697 return false;
698 }
699 #else
700 if ((_flags & debugVM) != 0) {
701 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
702 return false;
703 }
704 #endif
705
706 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
707 if (codeCacheSize > _codeCacheSize) { // Only allow smaller or equal CodeCache size in production run
708 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
709 return false;
710 }
711
712 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
713 if (aot_gc != Universe::heap()->kind()) {
714 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
715 return false;
716 }
717
718 // We don't need to cache CardTable::card_shift() if GCCardSizeInBytes stay the same
719 if (_gcCardSize != (uint)GCCardSizeInBytes) {
720 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with GCCardSizeInBytes = %d vs current %d", _gcCardSize, GCCardSizeInBytes);
721 return false;
722 }
723
724 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
725 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
726 return false;
727 }
728
729 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
730 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (enableContendedPadding ? "false" : "true"), (EnableContended ? "true" : "false"));
731 return false;
732 }
733 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
734 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (restrictContendedPadding ? "false" : "true"), (RestrictContended ? "true" : "false"));
735 return false;
736 }
737 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
738 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
739 return false;
740 }
741
742 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
743 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (preserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
744 return false;
745 }
746
747 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
748 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (compressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
749 return false;
750 }
751 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
752 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
753 return false;
754 }
755 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
756 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
757 return false;
758 }
759
760 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
761 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (compressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
762 return false;
763 }
764 if (_compressedOopShift != (uint)CompressedOops::shift()) {
765 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
766 return false;
767 }
768 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
769 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
770 return false;
771 }
772
773 // Some of the following checked flags depend on CPU features. Check CPU first.
774 if (!verify_cpu_features(cache)) {
775 return false;
776 }
777
778 // TLAB related flags
779 if (((_flags & useTLAB) != 0) != UseTLAB) {
780 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseTLAB = %s vs current %s", (useTLAB ? "false" : "true"), (UseTLAB ? "true" : "false"));
781 return false;
782 }
783 if (_allocatePrefetchLines != (uint)AllocatePrefetchLines) {
784 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchLines = %d vs current %d", _allocatePrefetchLines, AllocatePrefetchLines);
785 return false;
786 }
787 if (_allocateInstancePrefetchLines != (uint)AllocateInstancePrefetchLines) {
788 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocateInstancePrefetchLines = %d vs current %d", _allocateInstancePrefetchLines, AllocateInstancePrefetchLines);
789 return false;
790 }
791 if (_allocatePrefetchDistance != (uint)AllocatePrefetchDistance) {
792 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchDistance = %d vs current %d", _allocatePrefetchDistance, AllocatePrefetchDistance);
793 return false;
794 }
795 if (_allocatePrefetchStepSize != (uint)AllocatePrefetchStepSize) {
796 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AllocatePrefetchStepSize = %d vs current %d", _allocatePrefetchStepSize, AllocatePrefetchStepSize);
797 return false;
798 }
799
800 // Vectorization and intrinsics related flags
801 if (_maxVectorSize != (uint)MaxVectorSize) {
802 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with MaxVectorSize = %d vs current %d", _maxVectorSize, (uint)MaxVectorSize);
803 return false;
804 }
805 if (_arrayOperationPartialInlineSize != (uint)ArrayOperationPartialInlineSize) {
806 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ArrayOperationPartialInlineSize = %d vs current %d", _arrayOperationPartialInlineSize, (uint)ArrayOperationPartialInlineSize);
807 return false;
808 }
809
810 // Next affects only AOT nmethod
811 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
812 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (systemClassAssertions ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
813 FLAG_SET_ERGO(AOTCodeCaching, false);
814 }
815 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
816 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (userClassAssertions ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
817 FLAG_SET_ERGO(AOTCodeCaching, false);
818 }
819 return true;
820 }
821
822 bool AOTCodeCache::Header::verify(uint load_size) const {
823 if (_version != AOT_CODE_VERSION) {
824 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
825 return false;
826 }
827 if (load_size < _cache_size) {
828 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
829 return false;
830 }
831 return true;
832 }
833
834 volatile int AOTCodeCache::_nmethod_readers = 0;
835
836 AOTCodeCache* AOTCodeCache::open_for_use() {
837 if (AOTCodeCache::is_on_for_use()) {
838 return AOTCodeCache::cache();
839 }
840 return nullptr;
841 }
842
843 AOTCodeCache* AOTCodeCache::open_for_dump() {
844 if (AOTCodeCache::is_on_for_dump()) {
845 AOTCodeCache* cache = AOTCodeCache::cache();
846 cache->clear_lookup_failed(); // Reset bit
847 return cache;
848 }
849 return nullptr;
850 }
851
852 bool AOTCodeCache::is_address_in_aot_cache(address p) {
853 AOTCodeCache* cache = open_for_use();
854 if (cache == nullptr) {
855 return false;
856 }
857 if ((p >= (address)cache->cache_buffer()) &&
858 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
859 return true;
860 }
861 return false;
862 }
863
864 static void copy_bytes(const char* from, address to, uint size) {
865 assert((int)size > 0, "sanity");
866 memcpy(to, from, size);
867 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
868 }
869
870 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
871 _cache = cache;
872 _entry = entry;
873 _load_buffer = cache->cache_buffer();
874 _read_position = 0;
875 if (task != nullptr) {
876 _compile_id = task->compile_id();
877 _comp_level = task->comp_level();
878 _preload = task->preload();
879 } else {
880 _compile_id = 0;
881 _comp_level = 0;
882 _preload = false;
883 }
884 _lookup_failed = false;
885 }
886
887 void AOTCodeReader::set_read_position(uint pos) {
888 if (pos == _read_position) {
889 return;
890 }
891 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
892 _read_position = pos;
893 }
894
895 bool AOTCodeCache::set_write_position(uint pos) {
896 if (pos == _write_position) {
897 return true;
898 }
899 if (_store_size < _write_position) {
900 _store_size = _write_position; // Adjust during write
901 }
902 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
903 _write_position = pos;
946 if (nbytes == 0) {
947 return 0;
948 }
949 uint new_position = _write_position + nbytes;
950 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
951 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
952 nbytes, _write_position);
953 set_failed();
954 report_store_failure();
955 return 0;
956 }
957 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
958 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
959 _write_position += nbytes;
960 if (_store_size < _write_position) {
961 _store_size = _write_position;
962 }
963 return nbytes;
964 }
965
966 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
967 assert(is_using_code(), "AOT code caching should be enabled");
968 if (!method->in_aot_cache()) {
969 return nullptr;
970 }
971
972 MethodCounters* mc = method->method_counters();
973 if (mc != nullptr && mc->aot_code_recompile_requested()) {
974 return nullptr; // Already requested JIT compilation
975 }
976
977 switch (comp_level) {
978 case CompLevel_simple:
979 if ((DisableAOTCode & (1 << 0)) != 0) {
980 return nullptr;
981 }
982 break;
983 case CompLevel_limited_profile:
984 if ((DisableAOTCode & (1 << 1)) != 0) {
985 return nullptr;
986 }
987 break;
988 case CompLevel_full_optimization:
989 if ((DisableAOTCode & (1 << 2)) != 0) {
990 return nullptr;
991 }
992 break;
993
994 default: return nullptr; // Level 1, 2, and 4 only
995 }
996 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
997 if (is_on() && _cache->cache_buffer() != nullptr) {
998 uint id = AOTCacheAccess::convert_method_to_offset(method());
999 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Nmethod, id, comp_level);
1000 if (entry == nullptr) {
1001 LogStreamHandle(Info, aot, codecache, nmethod) log;
1002 if (log.is_enabled()) {
1003 ResourceMark rm;
1004 const char* target_name = method->name_and_sig_as_C_string();
1005 log.print("Missing entry for '%s' (comp_level %d, id: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, id);
1006 }
1007 #ifdef ASSERT
1008 } else {
1009 assert(!entry->has_clinit_barriers(), "only preload code should have clinit barriers");
1010 ResourceMark rm;
1011 assert(method() == entry->method(), "AOTCodeCache: saved nmethod's method %p (name: %s id: " UINT32_FORMAT_X_0
1012 ") is different from the method %p (name: %s, id: " UINT32_FORMAT_X_0 " being looked up" ,
1013 entry->method(), entry->method()->name_and_sig_as_C_string(), entry->id(), method(), method()->name_and_sig_as_C_string(), id);
1014 #endif
1015 }
1016
1017 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
1018 if (directives->IgnoreAOTCompiledOption || directives->ExcludeOption) {
1019 LogStreamHandle(Info, aot, codecache, compilation) log;
1020 if (log.is_enabled()) {
1021 log.print("Ignore AOT code entry on level %d for ", comp_level);
1022 method->print_value_on(&log);
1023 }
1024 return nullptr;
1025 }
1026
1027 return entry;
1028 }
1029 return nullptr;
1030 }
1031
1032 Method* AOTCodeEntry::method() {
1033 assert(_kind == Nmethod, "invalid kind %d", _kind);
1034 assert(AOTCodeCache::is_on_for_use(), "must be");
1035 return AOTCacheAccess::convert_offset_to_method(_id);
1036 }
1037
1038 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
1039 return (void*)(cache->add_entry());
1040 }
1041
1042 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
1043 if (entry->kind() == kind) {
1044 assert(entry->id() == id, "sanity");
1045 if (kind != AOTCodeEntry::Nmethod || // addapters and stubs have only one version
1046 // Look only for normal AOT code entry, preload code is handled separately
1047 (!entry->not_entrant() && (entry->comp_level() == comp_level))) {
1048 return true; // Found
1049 }
1050 }
1051 return false;
1052 }
1053
1054 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
1055 assert(_for_use, "sanity");
1056 uint count = _load_header->entries_count();
1057 if (_load_entries == nullptr) {
1058 // Read it
1059 _search_entries = (uint*)addr(_load_header->search_table_offset()); // [id, index]
1060 _load_entries = (AOTCodeEntry*)addr(_load_header->entries_offset());
1061 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1062 }
1063 // Binary search
1064 int l = 0;
1065 int h = count - 1;
1066 while (l <= h) {
1067 int mid = (l + h) >> 1;
1068 int ix = mid * 2;
1069 uint is = _search_entries[ix];
1070 if (is == id) {
1071 int index = _search_entries[ix + 1];
1072 AOTCodeEntry* entry = &(_load_entries[index]);
1073 if (check_entry(kind, id, comp_level, entry)) {
1074 return entry; // Found
1075 }
1076 // Leaner search around
1077 for (int i = mid - 1; i >= l; i--) { // search back
1078 ix = i * 2;
1079 is = _search_entries[ix];
1080 if (is != id) {
1081 break;
1082 }
1083 index = _search_entries[ix + 1];
1084 AOTCodeEntry* entry = &(_load_entries[index]);
1085 if (check_entry(kind, id, comp_level, entry)) {
1086 return entry; // Found
1087 }
1088 }
1089 for (int i = mid + 1; i <= h; i++) { // search forward
1090 ix = i * 2;
1091 is = _search_entries[ix];
1092 if (is != id) {
1093 break;
1094 }
1095 index = _search_entries[ix + 1];
1096 AOTCodeEntry* entry = &(_load_entries[index]);
1097 if (check_entry(kind, id, comp_level, entry)) {
1098 return entry; // Found
1099 }
1100 }
1101 break; // No match found
1102 } else if (is < id) {
1103 l = mid + 1;
1104 } else {
1105 h = mid - 1;
1106 }
1107 }
1108 return nullptr;
1109 }
1110
1111 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1112 assert(entry!= nullptr, "all entries should be read already");
1113 if (entry->not_entrant()) {
1114 return; // Someone invalidated it already
1115 }
1116 #ifdef ASSERT
1117 assert(_load_entries != nullptr, "sanity");
1118 {
1119 uint name_offset = entry->offset() + entry->name_offset();
1120 const char* name = _load_buffer + name_offset;;
1121 uint level = entry->comp_level();
1122 uint comp_id = entry->comp_id();
1123 bool for_preload = entry->for_preload();
1124 bool clinit_brs = entry->has_clinit_barriers();
1125 log_info(aot, codecache, nmethod)("Invalidating entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1126 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1127 }
1128 assert(entry->is_loaded() || entry->for_preload(), "invalidate only AOT code in use or a preload code");
1129 bool found = false;
1130 uint i = 0;
1131 uint count = 0;
1132 if (entry->for_preload()) {
1133 count = _load_header->preload_entries_count();
1134 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
1135 for (; i < count; i++) {
1136 if (entry == &preload_entry[i]) {
1137 break;
1138 }
1139 }
1140 } else {
1141 count = _load_header->entries_count();
1142 for(; i < count; i++) {
1143 if (entry == &(_load_entries[i])) {
1144 break;
1145 }
1146 }
1147 }
1148 found = (i < count);
1149 assert(found, "entry should exist");
1150 #endif
1151 entry->set_not_entrant();
1152 uint name_offset = entry->offset() + entry->name_offset();
1153 const char* name = _load_buffer + name_offset;;
1154 uint level = entry->comp_level();
1155 uint comp_id = entry->comp_id();
1156 bool for_preload = entry->for_preload();
1157 bool clinit_brs = entry->has_clinit_barriers();
1158 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s%s)",
1159 name, comp_id, level, entry->id(), (for_preload ? "P" : "A"), (clinit_brs ? ", has clinit barriers" : ""));
1160
1161 if (!for_preload && (entry->comp_level() == CompLevel_full_optimization)) {
1162 // Invalidate preload code if normal AOT C2 code is invalidated,
1163 // most likely because some dependencies changed during run.
1164 // We can still use normal AOT code if preload code is
1165 // invalidated - normal AOT code has less restrictions.
1166 Method* method = entry->method();
1167 MethodCounters* mc = entry->method()->method_counters();
1168 if (mc != nullptr && mc->aot_preload_code_entry() != nullptr) {
1169 AOTCodeEntry* preload_entry = mc->aot_preload_code_entry();
1170 if (preload_entry != nullptr) {
1171 assert(preload_entry->for_preload(), "expecting only such entries here");
1172 invalidate_entry(preload_entry);
1173 }
1174 }
1175 }
1176 }
1177
1178 static int uint_cmp(const void *i, const void *j) {
1179 uint a = *(uint *)i;
1180 uint b = *(uint *)j;
1181 return a > b ? 1 : a < b ? -1 : 0;
1182 }
1183
1184 void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
1185 uint* size_ptr = (uint *)buffer;
1186 *size_ptr = buffer_size;
1187 buffer += sizeof(uint);
1188
1189 VM_Version::store_cpu_features(buffer);
1190 log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
1191 buffer += buffer_size;
1192 buffer = align_up(buffer, DATA_ALIGNMENT);
1193 }
1194
1195 bool AOTCodeCache::finish_write() {
1196 if (!align_write()) {
1197 return false;
1198 }
1199 // End of AOT code
1200 uint code_size = _write_position;
1201 uint strings_offset = code_size;
1202 int strings_count = store_strings();
1203 if (strings_count < 0) {
1204 return false;
1205 }
1206 if (!align_write()) {
1207 return false;
1208 }
1209 uint strings_size = _write_position - strings_offset;
1210
1211 uint code_count = _store_entries_cnt;
1212 if (code_count > 0) {
1213 _aot_code_directory = CachedCodeDirectory::create();
1214 assert(_aot_code_directory != nullptr, "Sanity check");
1215
1216 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1217 uint search_count = code_count * 2;
1218 uint search_size = search_count * sizeof(uint);
1219 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1220 // _write_position should include code and strings
1221 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1222 uint cpu_features_size = VM_Version::cpu_features_size();
1223 uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
1224 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
1225 align_up(total_cpu_features_size, DATA_ALIGNMENT);
1226 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1227
1228 // Allocate in AOT Cache buffer
1229 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1230 char* start = align_up(buffer, DATA_ALIGNMENT);
1231 char* current = start + header_size; // Skip header
1232
1233 uint cpu_features_offset = current - start;
1234 store_cpu_features(current, cpu_features_size);
1235 assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
1236 assert(current < start + total_size, "sanity check");
1237
1238 // Create ordered search table for entries [id, index];
1239 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1240
1241 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1242 AOTCodeStats stats;
1243 uint max_size = 0;
1244 // AOTCodeEntry entries were allocated in reverse in store buffer.
1245 // Process them in reverse order to cache first code first.
1246
1247 // Store AOTCodeEntry-s for preload code
1248 current = align_up(current, DATA_ALIGNMENT);
1249 uint preload_entries_cnt = 0;
1250 uint preload_entries_offset = current - start;
1251 AOTCodeEntry* preload_entries = (AOTCodeEntry*)current;
1252 for (int i = code_count - 1; i >= 0; i--) {
1253 AOTCodeEntry* entry = &entries_address[i];
1254 if (entry->load_fail()) {
1255 continue;
1256 }
1257 if (entry->for_preload()) {
1258 if (entry->not_entrant()) {
1259 // Skip not entrant preload code:
1260 // we can't pre-load code which may have failing dependencies.
1261 log_info(aot, codecache, exit)("Skip not entrant preload code comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1262 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1263 } else {
1264 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1265 stats.collect_entry_stats(entry);
1266 current += sizeof(AOTCodeEntry);
1267 preload_entries_cnt++;
1268 }
1269 }
1270 }
1271
1272 // Now write the data for preload AOTCodeEntry
1273 for (int i = 0; i < (int)preload_entries_cnt; i++) {
1274 AOTCodeEntry* entry = &preload_entries[i];
1275 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1276 if (size > max_size) {
1277 max_size = size;
1278 }
1279 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1280 entry->set_offset(current - start); // New offset
1281 current += size;
1282 }
1283
1284 current = align_up(current, DATA_ALIGNMENT);
1285 uint entries_count = 0;
1286 uint new_entries_offset = current - start;
1287 AOTCodeEntry* code_entries = (AOTCodeEntry*)current;
1288 // Now scan normal entries
1289 for (int i = code_count - 1; i >= 0; i--) {
1290 AOTCodeEntry* entry = &entries_address[i];
1291 if (entry->load_fail() || entry->for_preload()) {
1292 continue;
1293 }
1294 if (entry->not_entrant()) {
1295 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1296 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1297 entry->set_entrant(); // Reset
1298 }
1299 copy_bytes((const char*)entry, (address)current, sizeof(AOTCodeEntry));
1300 stats.collect_entry_stats(entry);
1301 current += sizeof(AOTCodeEntry);
1302 search[entries_count*2 + 0] = entry->id();
1303 search[entries_count*2 + 1] = entries_count;
1304 entries_count++;
1305 }
1306
1307 // Now write the data for normal AOTCodeEntry
1308 for (int i = 0; i < (int)entries_count; i++) {
1309 AOTCodeEntry* entry = &code_entries[i];
1310 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1311 if (size > max_size) {
1312 max_size = size;
1313 }
1314 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1315 entry->set_offset(current - start); // New offset
1316 current += size;
1317 }
1318
1319 if (preload_entries_cnt == 0 && entries_count == 0) {
1320 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entries");
1321 FREE_C_HEAP_ARRAY(uint, search);
1322 return true; // Nothing to write
1323 }
1324 uint total_entries_cnt = preload_entries_cnt + entries_count;
1325 assert(total_entries_cnt <= code_count, "%d > %d", total_entries_cnt, code_count);
1326 // Write strings
1327 if (strings_count > 0) {
1328 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1329 strings_offset = (current - start); // New offset
1330 current += strings_size;
1331 }
1332
1333 uint search_table_offset = current - start;
1334 // Sort and store search table
1335 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1336 search_size = 2 * entries_count * sizeof(uint);
1337 copy_bytes((const char*)search, (address)current, search_size);
1338 FREE_C_HEAP_ARRAY(uint, search);
1339 current += search_size;
1340
1341 log_stats_on_exit(stats);
1342
1343 uint size = (current - start);
1344 assert(size <= total_size, "%d > %d", size , total_size);
1345 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes", size);
1346 log_debug(aot, codecache, exit)(" header size: %u", header_size);
1347 log_debug(aot, codecache, exit)(" total code size: %u (max code's size: %u)", code_size, max_size);
1348 log_debug(aot, codecache, exit)(" entries size: %u", entries_size);
1349 log_debug(aot, codecache, exit)(" entry search table: %u", search_size);
1350 log_debug(aot, codecache, exit)(" C strings size: %u", strings_size);
1351 log_debug(aot, codecache, exit)(" CPU features data: %u", total_cpu_features_size);
1352
1353 // Finalize header
1354 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1355 header->init(size, (uint)strings_count, strings_offset,
1356 entries_count, search_table_offset, new_entries_offset,
1357 preload_entries_cnt, preload_entries_offset,
1358 stats.entry_count(AOTCodeEntry::Adapter), stats.entry_count(AOTCodeEntry::SharedBlob),
1359 stats.entry_count(AOTCodeEntry::C1Blob), stats.entry_count(AOTCodeEntry::C2Blob),
1360 stats.entry_count(AOTCodeEntry::Stub), cpu_features_offset);
1361
1362 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", total_entries_cnt);
1363
1364 _aot_code_directory->set_aot_code_data(size, start);
1365 }
1366 return true;
1367 }
1368
1369 //------------------Store/Load AOT code ----------------------
1370
1371 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1372 AOTCodeCache* cache = open_for_dump();
1373 if (cache == nullptr) {
1374 return false;
1375 }
1376 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1377
1378 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1379 return false;
1380 }
1381 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1382 return false;
1383 }
1384 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1419 return false;
1420 }
1421 CodeBlob::archive_blob(&blob, archive_buffer);
1422
1423 uint reloc_data_size = blob.relocation_size();
1424 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1425 if (n != reloc_data_size) {
1426 return false;
1427 }
1428
1429 bool has_oop_maps = false;
1430 if (blob.oop_maps() != nullptr) {
1431 if (!cache->write_oop_map_set(blob)) {
1432 return false;
1433 }
1434 has_oop_maps = true;
1435 }
1436
1437 #ifndef PRODUCT
1438 // Write asm remarks
1439 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1440 return false;
1441 }
1442 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1443 return false;
1444 }
1445 #endif /* PRODUCT */
1446
1447 if (!cache->write_relocations(blob)) {
1448 if (!cache->failed()) {
1449 // We may miss an address in AOT table - skip this code blob.
1450 cache->set_write_position(entry_position);
1451 }
1452 return false;
1453 }
1454
1455 uint entry_size = cache->_write_position - entry_position;
1456 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1457 entry_position, entry_size, name_offset, name_size,
1458 blob_offset, has_oop_maps);
1459 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1460 return true;
1461 }
1462
1463 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id) {
1464 assert(AOTCodeEntry::is_blob(entry_kind),
1465 "wrong entry kind for blob id %s", StubInfo::name(id));
1466 return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id));
1467 }
1468
1469 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name) {
1470 AOTCodeCache* cache = open_for_use();
1471 if (cache == nullptr) {
1472 return nullptr;
1473 }
1474 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1475
1476 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1477 return nullptr;
1478 }
1479 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1480 return nullptr;
1481 }
1482 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1483
1484 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1485 if (entry == nullptr) {
1486 return nullptr;
1487 }
1488 AOTCodeReader reader(cache, entry, nullptr);
1489 CodeBlob* blob = reader.compile_code_blob(name);
1490
1491 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1492 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1493 return blob;
1494 }
1495
1496 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id) {
1497 assert(AOTCodeEntry::is_blob(entry_kind),
1498 "wrong entry kind for blob id %s", StubInfo::name(id));
1499 return load_code_blob(entry_kind, (uint)id, StubInfo::name(id));
1500 }
1501
1502 CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
1503 uint entry_position = _entry->offset();
1504
1505 // Read name
1506 uint name_offset = entry_position + _entry->name_offset();
1507 uint name_size = _entry->name_size(); // Includes '/0'
1508 const char* stored_name = addr(name_offset);
1509
1510 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1511 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1512 stored_name, name);
1513 set_lookup_failed(); // Skip this blob
1514 return nullptr;
1515 }
1516
1517 // Read archived code blob
1518 uint offset = entry_position + _entry->code_offset();
1519 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1520 offset += archived_blob->size();
1521
1522 address reloc_data = (address)addr(offset);
1523 offset += archived_blob->relocation_size();
1524 set_read_position(offset);
1525
1526 ImmutableOopMapSet* oop_maps = nullptr;
1527 if (_entry->has_oop_maps()) {
1528 oop_maps = read_oop_map_set();
1529 }
1530
1531 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1532 stored_name,
1533 reloc_data,
1534 oop_maps
1535 );
1536 if (code_blob == nullptr) { // no space left in CodeCache
1537 return nullptr;
1538 }
1539
1540 #ifndef PRODUCT
1541 code_blob->asm_remarks().init();
1542 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1543 code_blob->dbg_strings().init();
1544 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1545 #endif // PRODUCT
1546
1547 fix_relocations(code_blob);
1548
1549 #ifdef ASSERT
1550 LogStreamHandle(Trace, aot, codecache, stubs) log;
1551 if (log.is_enabled()) {
1552 FlagSetting fs(PrintRelocations, true);
1553 code_blob->print_on(&log);
1554 }
1555 #endif
1556 return code_blob;
1557 }
1558
1559 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1560 if (!is_dumping_stub()) {
1561 return false;
1562 }
1563 AOTCodeCache* cache = open_for_dump();
1564 if (cache == nullptr) {
1565 return false;
1566 }
1567 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1568 if (!cache->align_write()) {
1569 return false;
1570 }
1571 #ifdef ASSERT
1572 CodeSection* cs = cgen->assembler()->code_section();
1573 if (cs->has_locs()) {
1574 uint reloc_count = cs->locs_count();
1575 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1576 // Collect additional data
1577 RelocIterator iter(cs);
1578 while (iter.next()) {
1579 switch (iter.type()) {
1580 case relocInfo::none:
1581 break;
1582 default: {
1583 iter.print_current_on(tty);
1584 fatal("stub's relocation %d unimplemented", (int)iter.type());
1585 break;
1586 }
1587 }
1588 }
1589 }
1590 #endif
1591 uint entry_position = cache->_write_position;
1592
1593 // Write code
1594 uint code_offset = 0;
1595 uint code_size = cgen->assembler()->pc() - start;
1596 uint n = cache->write_bytes(start, code_size);
1597 if (n != code_size) {
1598 return false;
1599 }
1600 // Write name
1601 uint name_offset = cache->_write_position - entry_position;
1602 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1603 n = cache->write_bytes(name, name_size);
1604 if (n != name_size) {
1605 return false;
1606 }
1607 uint entry_size = cache->_write_position - entry_position;
1608 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1609 code_offset, code_size,
1610 AOTCodeEntry::Stub, (uint32_t)id);
1611 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1612 return true;
1613 }
1614
1615 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1616 if (!is_using_stub()) {
1617 return false;
1618 }
1619 assert(start == cgen->assembler()->pc(), "wrong buffer");
1620 AOTCodeCache* cache = open_for_use();
1621 if (cache == nullptr) {
1622 return false;
1623 }
1624 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1625 if (entry == nullptr) {
1626 return false;
1627 }
1628 uint entry_position = entry->offset();
1629 // Read name
1630 uint name_offset = entry->name_offset() + entry_position;
1631 uint name_size = entry->name_size(); // Includes '/0'
1632 const char* saved_name = cache->addr(name_offset);
1633 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1634 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1635 cache->set_failed();
1636 report_load_failure();
1637 return false;
1638 }
1639 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1640 // Read code
1641 uint code_offset = entry->code_offset() + entry_position;
1642 uint code_size = entry->code_size();
1643 copy_bytes(cache->addr(code_offset), start, code_size);
1644 cgen->assembler()->code_section()->set_end(start + code_size);
1645 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1646 return true;
1647 }
1648
1649 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1650 if (!is_dumping_code()) {
1651 return nullptr;
1652 }
1653 assert(CDSConfig::is_dumping_aot_code(), "should be called only when allowed");
1654 AOTCodeCache* cache = open_for_dump();
1655 precond(cache != nullptr);
1656 precond(!nm->is_osr_method()); // AOT compilation is requested only during AOT cache assembly phase
1657 if (!compiler->is_c1() && !compiler->is_c2()) {
1658 // Only c1 and c2 compilers
1659 return nullptr;
1660 }
1661 int comp_level = nm->comp_level();
1662 if (comp_level == CompLevel_full_profile) {
1663 // Do not cache C1 compiles with full profile i.e. tier3
1664 return nullptr;
1665 }
1666 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1667
1668 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1669 AOTCodeEntry* entry = nullptr;
1670 entry = cache->write_nmethod(nm, for_preload);
1671 if (entry == nullptr) {
1672 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1673 }
1674 // Clean up fields which could be set here
1675 cache->_for_preload = false;
1676 cache->_has_clinit_barriers = false;
1677 return entry;
1678 }
1679
1680 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1681 AOTCodeCache* cache = open_for_dump();
1682 assert(cache != nullptr, "sanity check");
1683 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1684 uint comp_id = nm->compile_id();
1685 uint comp_level = nm->comp_level();
1686 Method* method = nm->method();
1687 if (!AOTCacheAccess::can_generate_aot_code(method)) {
1688 ResourceMark rm;
1689 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for AOT%s compile: not in AOT cache", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), (for_preload ? " preload" : ""));
1690 assert(AOTCacheAccess::can_generate_aot_code(method), "sanity");
1691 return nullptr;
1692 }
1693 InstanceKlass* holder = method->method_holder();
1694 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1695 if (!builtin_loader) {
1696 ResourceMark rm;
1697 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1698 assert(builtin_loader, "sanity");
1699 return nullptr;
1700 }
1701
1702 _for_preload = for_preload;
1703 _has_clinit_barriers = nm->has_clinit_barriers();
1704 assert(!_has_clinit_barriers || _for_preload, "only preload code has clinit barriers");
1705
1706 if (!align_write()) {
1707 return nullptr;
1708 }
1709
1710 uint entry_position = _write_position;
1711
1712 // Write name
1713 uint name_offset = 0;
1714 uint name_size = 0;
1715 uint id = 0;
1716 uint n;
1717 {
1718 ResourceMark rm;
1719 const char* name = method->name_and_sig_as_C_string();
1720 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1721 comp_id, (int)comp_level, name, comp_level,
1722 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1723
1724 LogStreamHandle(Info, aot, codecache, loader) log;
1725 if (log.is_enabled()) {
1726 oop loader = holder->class_loader();
1727 oop domain = holder->protection_domain();
1728 log.print("Holder: ");
1729 holder->print_value_on(&log);
1730 log.print(" loader: ");
1731 if (loader == nullptr) {
1732 log.print("nullptr");
1733 } else {
1734 loader->print_value_on(&log);
1735 }
1736 log.print(" domain: ");
1737 if (domain == nullptr) {
1738 log.print("nullptr");
1739 } else {
1740 domain->print_value_on(&log);
1741 }
1742 log.cr();
1743 }
1744 name_offset = _write_position - entry_position;
1745 name_size = (uint)strlen(name) + 1; // Includes '/0'
1746 n = write_bytes(name, name_size);
1747 if (n != name_size) {
1748 return nullptr;
1749 }
1750 }
1751 id = AOTCacheAccess::delta_from_base_address((address)nm->method());
1752
1753 // Write CodeBlob
1754 if (!cache->align_write()) {
1755 return nullptr;
1756 }
1757 uint blob_offset = cache->_write_position - entry_position;
1758 address archive_buffer = cache->reserve_bytes(nm->size());
1759 if (archive_buffer == nullptr) {
1760 return nullptr;
1761 }
1762 CodeBlob::archive_blob(nm, archive_buffer);
1763
1764 uint reloc_data_size = nm->relocation_size();
1765 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1766 if (n != reloc_data_size) {
1767 return nullptr;
1768 }
1769
1770 // Write oops and metadata present in the nmethod's data region
1771 if (!write_oops(nm)) {
1772 if (lookup_failed() && !failed()) {
1773 // Skip this method and reposition file
1774 set_write_position(entry_position);
1775 }
1776 return nullptr;
1777 }
1778 if (!write_metadata(nm)) {
1779 if (lookup_failed() && !failed()) {
1780 // Skip this method and reposition file
1781 set_write_position(entry_position);
1782 }
1783 return nullptr;
1784 }
1785
1786 bool has_oop_maps = false;
1787 if (nm->oop_maps() != nullptr) {
1788 if (!cache->write_oop_map_set(*nm)) {
1789 return nullptr;
1790 }
1791 has_oop_maps = true;
1792 }
1793
1794 uint immutable_data_size = nm->immutable_data_size();
1795 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1796 if (n != immutable_data_size) {
1797 return nullptr;
1798 }
1799
1800 JavaThread* thread = JavaThread::current();
1801 HandleMark hm(thread);
1802 GrowableArray<Handle> oop_list;
1803 GrowableArray<Metadata*> metadata_list;
1804
1805 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1806 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1807 if (lookup_failed() && !failed()) {
1808 // Skip this method and reposition file
1809 set_write_position(entry_position);
1810 }
1811 return nullptr;
1812 }
1813
1814 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1815 return nullptr;
1816 }
1817
1818 #ifndef PRODUCT
1819 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1820 return nullptr;
1821 }
1822 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1823 return nullptr;
1824 }
1825 #endif /* PRODUCT */
1826
1827 uint entry_size = _write_position - entry_position;
1828 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Nmethod, id,
1829 entry_position, entry_size,
1830 name_offset, name_size,
1831 blob_offset, has_oop_maps,
1832 comp_level, comp_id,
1833 nm->has_clinit_barriers(), for_preload);
1834 {
1835 ResourceMark rm;
1836 const char* name = nm->method()->name_and_sig_as_C_string();
1837 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1838 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1839 }
1840 if (VerifyAOTCode) {
1841 return nullptr;
1842 }
1843 return entry;
1844 }
1845
1846 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1847 if (!is_using_code()) {
1848 return false;
1849 }
1850 AOTCodeCache* cache = open_for_use();
1851 if (cache == nullptr) {
1852 return false;
1853 }
1854 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1855 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1856 CompileTask* task = env->task();
1857 task->mark_aot_load_start(os::elapsed_counter());
1858 AOTCodeEntry* entry = task->aot_code_entry();
1859 bool preload = task->preload();
1860 assert(entry != nullptr, "sanity");
1861 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1862 VM_ENTRY_MARK;
1863 ResourceMark rm;
1864 methodHandle method(THREAD, target->get_Method());
1865 const char* target_name = method->name_and_sig_as_C_string();
1866 uint id = AOTCacheAccess::convert_method_to_offset(method());
1867 bool clinit_brs = entry->has_clinit_barriers();
1868 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (id: " UINT32_FORMAT_X_0 "%s)",
1869 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1870 target_name, id, (clinit_brs ? ", has clinit barriers" : ""));
1871 }
1872 ReadingMark rdmk;
1873 if (rdmk.failed()) {
1874 // Cache is closed, cannot touch anything.
1875 return false;
1876 }
1877
1878 AOTCodeReader reader(cache, entry, task);
1879 bool success = reader.compile_nmethod(env, target, compiler);
1880 if (success) {
1881 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1882 } else {
1883 entry->set_load_fail();
1884 entry->set_not_entrant();
1885 }
1886 task->mark_aot_load_finish(os::elapsed_counter());
1887 return success;
1888 }
1889
1890 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1891 CompileTask* task = env->task();
1892 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1893 nmethod* nm = nullptr;
1894
1895 uint entry_position = aot_code_entry->offset();
1896 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1897 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1898 set_read_position(archived_nm_offset + archived_nm->size());
1899
1900 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1901 env->set_oop_recorder(oop_recorder);
1902
1903 uint offset;
1904
1905 offset = read_position();
1906 address reloc_data = (address)addr(offset);
1907 offset += archived_nm->relocation_size();
1908 set_read_position(offset);
1909
1910 // Read oops and metadata
1911 VM_ENTRY_MARK
1912 GrowableArray<Handle> oop_list;
1913 GrowableArray<Metadata*> metadata_list;
1914
1915 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1916 return false;
1917 }
1918
1919 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1920
1921 offset = read_position();
1922 address immutable_data = (address)addr(offset);
1923 offset += archived_nm->immutable_data_size();
1924 set_read_position(offset);
1925
1926 GrowableArray<Handle> reloc_immediate_oop_list;
1927 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1928 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1929 return false;
1930 }
1931
1932 // Read Dependencies (compressed already)
1933 Dependencies* dependencies = new Dependencies(env);
1934 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1935 env->set_dependencies(dependencies);
1936
1937 const char* name = addr(entry_position + aot_code_entry->name_offset());
1938
1939 if (VerifyAOTCode) {
1940 return false;
1941 }
1942
1943 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1944 nm = env->register_aot_method(THREAD,
1945 target,
1946 compiler,
1947 archived_nm,
1948 reloc_data,
1949 oop_list,
1950 metadata_list,
1951 oopmaps,
1952 immutable_data,
1953 reloc_immediate_oop_list,
1954 reloc_immediate_metadata_list,
1955 this);
1956 bool success = task->is_success();
1957 if (success) {
1958 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1959 #ifdef ASSERT
1960 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1961 if (log.is_enabled()) {
1962 FlagSetting fs(PrintRelocations, true);
1963 nm->print_on(&log);
1964 nm->decode2(&log);
1965 }
1966 #endif
1967 }
1968
1969 return success;
1970 }
1971
1972 bool skip_preload(methodHandle mh) {
1973 if (!mh->method_holder()->is_loaded()) {
1974 return true;
1975 }
1976 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1977 if (directives->DontPreloadOption || directives->ExcludeOption) {
1978 LogStreamHandle(Info, aot, codecache, init) log;
1979 if (log.is_enabled()) {
1980 log.print("Exclude preloading code for ");
1981 mh->print_value_on(&log);
1982 }
1983 return true;
1984 }
1985 return false;
1986 }
1987
1988 void AOTCodeCache::preload_code(JavaThread* thread) {
1989 if (!is_using_code()) {
1990 return;
1991 }
1992 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
1993 if (comp == nullptr) {
1994 log_debug(aot, codecache, init)("AOT preload code skipped: C2 compiler disabled");
1995 return;
1996 }
1997
1998 if ((DisableAOTCode & (1 << 3)) != 0) {
1999 return; // no preloaded code (level 5);
2000 }
2001 _cache->preload_aot_code(thread);
2002 }
2003
2004 void AOTCodeCache::preload_aot_code(TRAPS) {
2005 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
2006 // Since we reuse the CompilerBroker API to install AOT code, we're required to have a JIT compiler for the
2007 // level we want (that is CompLevel_full_optimization).
2008 return;
2009 }
2010 TraceTime t1("Total time to preload AOT code", &_t_totalPreload, enable_timers(), false);
2011 assert(_for_use, "sanity");
2012 uint count = _load_header->entries_count();
2013 uint preload_entries_count = _load_header->preload_entries_count();
2014 if (preload_entries_count > 0) {
2015 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
2016 AOTCodeEntry* preload_entry = (AOTCodeEntry*)addr(_load_header->preload_entries_offset());
2017 uint count = MIN2(preload_entries_count, AOTCodePreloadStop);
2018 for (uint i = AOTCodePreloadStart; i < count; i++) {
2019 AOTCodeEntry* entry = &preload_entry[i];
2020 if (entry->not_entrant()) {
2021 continue;
2022 }
2023 methodHandle mh(THREAD, entry->method());
2024 assert((mh.not_null() && AOTMetaspace::in_aot_cache((address)mh())), "sanity");
2025 if (skip_preload(mh)) {
2026 continue; // Exclude preloading for this method
2027 }
2028 assert(mh->method_holder()->is_loaded(), "");
2029 if (!mh->method_holder()->is_linked()) {
2030 ResourceMark rm;
2031 log_debug(aot, codecache, init)("Preload AOT code for %s skipped: method holder is not linked",
2032 mh->name_and_sig_as_C_string());
2033 continue; // skip
2034 }
2035 CompileBroker::preload_aot_method(mh, entry, CHECK);
2036 }
2037 }
2038 }
2039
2040 // ------------ process code and data --------------
2041
2042 // Can't use -1. It is valid value for jump to iteself destination
2043 // used by static call stub: see NativeJump::jump_destination().
2044 #define BAD_ADDRESS_ID -2
2045
2046 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2047 GrowableArray<uint> reloc_data;
2048 RelocIterator iter(&code_blob);
2049 LogStreamHandle(Trace, aot, codecache, reloc) log;
2050 while (iter.next()) {
2051 int idx = reloc_data.append(0); // default value
2052 switch (iter.type()) {
2053 case relocInfo::none:
2054 break;
2055 case relocInfo::oop_type: {
2056 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2057 if (r->oop_is_immediate()) {
2058 assert(oop_list != nullptr, "sanity check");
2059 // store index of oop in the reloc immediate oop list
2060 Handle h(JavaThread::current(), r->oop_value());
2061 int oop_idx = oop_list->find(h);
2062 assert(oop_idx != -1, "sanity check");
2063 reloc_data.at_put(idx, (uint)oop_idx);
2064 }
2065 break;
2066 }
2067 case relocInfo::metadata_type: {
2068 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2069 if (r->metadata_is_immediate()) {
2070 assert(metadata_list != nullptr, "sanity check");
2071 // store index of metadata in the reloc immediate metadata list
2072 int metadata_idx = metadata_list->find(r->metadata_value());
2073 assert(metadata_idx != -1, "sanity check");
2074 reloc_data.at_put(idx, (uint)metadata_idx);
2075 }
2076 break;
2077 }
2078 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2079 case relocInfo::opt_virtual_call_type:
2080 case relocInfo::static_call_type: {
2081 CallRelocation* r = (CallRelocation*)iter.reloc();
2082 address dest = r->destination();
2083 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2084 dest = (address)-1; // do nothing in this case when loading this relocation
2085 }
2086 int id = _table->id_for_address(dest, iter, &code_blob);
2087 if (id == BAD_ADDRESS_ID) {
2088 return false;
2089 }
2090 reloc_data.at_put(idx, id);
2091 break;
2092 }
2093 case relocInfo::trampoline_stub_type: {
2094 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2095 int id = _table->id_for_address(dest, iter, &code_blob);
2096 if (id == BAD_ADDRESS_ID) {
2097 return false;
2098 }
2099 reloc_data.at_put(idx, id);
2100 break;
2101 }
2102 case relocInfo::static_stub_type:
2103 break;
2104 case relocInfo::runtime_call_type: {
2105 // Record offset of runtime destination
2106 CallRelocation* r = (CallRelocation*)iter.reloc();
2107 address dest = r->destination();
2108 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2109 dest = (address)-1; // do nothing in this case when loading this relocation
2110 }
2111 int id = _table->id_for_address(dest, iter, &code_blob);
2112 if (id == BAD_ADDRESS_ID) {
2113 return false;
2114 }
2115 reloc_data.at_put(idx, id);
2116 break;
2117 }
2118 case relocInfo::runtime_call_w_cp_type:
2119 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2120 return false;
2121 case relocInfo::external_word_type: {
2122 // Record offset of runtime target
2123 address target = ((external_word_Relocation*)iter.reloc())->target();
2124 int id = _table->id_for_address(target, iter, &code_blob);
2125 if (id == BAD_ADDRESS_ID) {
2126 return false;
2127 }
2128 reloc_data.at_put(idx, id);
2129 break;
2130 }
2131 case relocInfo::internal_word_type: {
2132 address target = ((internal_word_Relocation*)iter.reloc())->target();
2133 // assert to make sure that delta fits into 32 bits
2134 assert(CodeCache::contains((void *)target), "Wrong internal_word_type relocation");
2135 uint delta = (uint)(target - code_blob.content_begin());
2136 reloc_data.at_put(idx, delta);
2137 break;
2138 }
2139 case relocInfo::section_word_type: {
2140 address target = ((section_word_Relocation*)iter.reloc())->target();
2141 assert(CodeCache::contains((void *)target), "Wrong section_word_type relocation");
2142 uint delta = (uint)(target - code_blob.content_begin());
2143 reloc_data.at_put(idx, delta);
2144 break;
2145 }
2146 case relocInfo::poll_type:
2147 break;
2148 case relocInfo::poll_return_type:
2149 break;
2150 case relocInfo::post_call_nop_type:
2151 break;
2152 case relocInfo::entry_guard_type:
2153 break;
2154 default:
2155 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2156 return false;
2157 break;
2158 }
2159 if (log.is_enabled()) {
2160 iter.print_current_on(&log);
2161 }
2162 }
2163
2164 // Write additional relocation data: uint per relocation
2165 // Write the count first
2166 int count = reloc_data.length();
2167 write_bytes(&count, sizeof(int));
2168 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2169 iter != reloc_data.end(); ++iter) {
2170 uint value = *iter;
2171 int n = write_bytes(&value, sizeof(uint));
2172 if (n != sizeof(uint)) {
2173 return false;
2174 }
2175 }
2176 return true;
2177 }
2178
2179 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2180 LogStreamHandle(Trace, aot, reloc) log;
2181 uint offset = read_position();
2182 int count = *(int*)addr(offset);
2183 offset += sizeof(int);
2184 if (log.is_enabled()) {
2185 log.print_cr("======== extra relocations count=%d", count);
2186 }
2187 uint* reloc_data = (uint*)addr(offset);
2188 offset += (count * sizeof(uint));
2189 set_read_position(offset);
2190
2191 RelocIterator iter(code_blob);
2192 int j = 0;
2193 while (iter.next()) {
2194 switch (iter.type()) {
2195 case relocInfo::none:
2196 break;
2197 case relocInfo::oop_type: {
2198 assert(code_blob->is_nmethod(), "sanity check");
2199 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2200 if (r->oop_is_immediate()) {
2201 assert(oop_list != nullptr, "sanity check");
2202 Handle h = oop_list->at(reloc_data[j]);
2203 r->set_value(cast_from_oop<address>(h()));
2204 } else {
2205 r->fix_oop_relocation();
2206 }
2207 break;
2208 }
2209 case relocInfo::metadata_type: {
2210 assert(code_blob->is_nmethod(), "sanity check");
2211 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2212 Metadata* m;
2213 if (r->metadata_is_immediate()) {
2214 assert(metadata_list != nullptr, "sanity check");
2215 m = metadata_list->at(reloc_data[j]);
2216 } else {
2217 // Get already updated value from nmethod.
2218 int index = r->metadata_index();
2219 m = code_blob->as_nmethod()->metadata_at(index);
2220 }
2221 r->set_value((address)m);
2222 break;
2223 }
2224 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2225 case relocInfo::opt_virtual_call_type:
2226 case relocInfo::static_call_type: {
2227 address dest = _cache->address_for_id(reloc_data[j]);
2228 if (dest != (address)-1) {
2229 ((CallRelocation*)iter.reloc())->set_destination(dest);
2230 }
2231 break;
2232 }
2233 case relocInfo::trampoline_stub_type: {
2234 address dest = _cache->address_for_id(reloc_data[j]);
2235 if (dest != (address)-1) {
2236 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2237 }
2238 break;
2239 }
2240 case relocInfo::static_stub_type:
2241 break;
2242 case relocInfo::runtime_call_type: {
2243 address dest = _cache->address_for_id(reloc_data[j]);
2244 if (dest != (address)-1) {
2245 ((CallRelocation*)iter.reloc())->set_destination(dest);
2246 }
2247 break;
2248 }
2249 case relocInfo::runtime_call_w_cp_type:
2250 // this relocation should not be in cache (see write_relocations)
2251 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2252 break;
2253 case relocInfo::external_word_type: {
2254 address target = _cache->address_for_id(reloc_data[j]);
2255 // Add external address to global table
2256 int index = ExternalsRecorder::find_index(target);
2257 // Update index in relocation
2258 Relocation::add_jint(iter.data(), index);
2259 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2260 assert(reloc->target() == target, "sanity");
2261 reloc->set_value(target); // Patch address in the code
2262 break;
2263 }
2264 case relocInfo::internal_word_type: {
2265 uint delta = reloc_data[j];
2266 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2267 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2268 break;
2269 }
2270 case relocInfo::section_word_type: {
2271 uint delta = reloc_data[j];
2272 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2273 r->fix_relocation_after_aot_load(code_blob->content_begin(), delta);
2274 break;
2275 }
2276 case relocInfo::poll_type:
2277 break;
2278 case relocInfo::poll_return_type:
2279 break;
2280 case relocInfo::post_call_nop_type:
2281 break;
2282 case relocInfo::entry_guard_type:
2283 break;
2284 default:
2285 assert(false,"relocation %d unimplemented", (int)iter.type());
2286 break;
2287 }
2288 if (log.is_enabled()) {
2289 iter.print_current_on(&log);
2290 }
2291 j++;
2292 }
2293 assert(j == count, "sanity");
2294 }
2295
2296 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2297 int count = oop_list.length();
2298 if (!write_bytes(&count, sizeof(int))) {
2299 return false;
2300 }
2301 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2302 iter != oop_list.end(); ++iter) {
2303 Handle h = *iter;
2304 if (!write_oop(h())) {
2305 return false;
2306 }
2307 }
2308
2309 count = metadata_list.length();
2310 if (!write_bytes(&count, sizeof(int))) {
2311 return false;
2312 }
2313 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2314 iter != metadata_list.end(); ++iter) {
2315 Metadata* m = *iter;
2316 if (!write_metadata(m)) {
2317 return false;
2318 }
2319 }
2320 return true;
2321 }
2322
2323 bool AOTCodeCache::write_metadata(nmethod* nm) {
2324 int count = nm->metadata_count()-1;
2325 if (!write_bytes(&count, sizeof(int))) {
2326 return false;
2327 }
2328 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2329 if (!write_metadata(*p)) {
2330 return false;
2331 }
2332 }
2333 return true;
2334 }
2335
2336 bool AOTCodeCache::write_metadata(Metadata* m) {
2337 uint n = 0;
2338 if (m == nullptr) {
2339 DataKind kind = DataKind::Null;
2340 n = write_bytes(&kind, sizeof(int));
2341 if (n != sizeof(int)) {
2342 return false;
2343 }
2344 } else if (m == (Metadata*)Universe::non_oop_word()) {
2345 DataKind kind = DataKind::No_Data;
2346 n = write_bytes(&kind, sizeof(int));
2347 if (n != sizeof(int)) {
2348 return false;
2349 }
2350 } else if (m->is_klass()) {
2351 if (!write_klass((Klass*)m)) {
2352 return false;
2353 }
2354 } else if (m->is_method()) {
2355 if (!write_method((Method*)m)) {
2356 return false;
2357 }
2358 } else if (m->is_methodCounters()) {
2359 DataKind kind = DataKind::MethodCnts;
2360 n = write_bytes(&kind, sizeof(int));
2361 if (n != sizeof(int)) {
2362 return false;
2363 }
2364 if (!write_method(((MethodCounters*)m)->method())) {
2365 return false;
2366 }
2367 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2368 } else { // Not supported
2369 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2370 return false;
2371 }
2372 return true;
2373 }
2374
2375 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2376 uint code_offset = read_position();
2377 Metadata* m = nullptr;
2378 DataKind kind = *(DataKind*)addr(code_offset);
2379 code_offset += sizeof(DataKind);
2380 set_read_position(code_offset);
2381 if (kind == DataKind::Null) {
2382 m = (Metadata*)nullptr;
2383 } else if (kind == DataKind::No_Data) {
2384 m = (Metadata*)Universe::non_oop_word();
2385 } else if (kind == DataKind::Klass) {
2386 m = (Metadata*)read_klass(comp_method);
2387 } else if (kind == DataKind::Method) {
2388 m = (Metadata*)read_method(comp_method);
2389 } else if (kind == DataKind::MethodCnts) {
2390 kind = *(DataKind*)addr(code_offset);
2391 code_offset += sizeof(DataKind);
2392 set_read_position(code_offset);
2393 m = (Metadata*)read_method(comp_method);
2394 if (m != nullptr) {
2395 Method* method = (Method*)m;
2396 m = method->get_method_counters(Thread::current());
2397 if (m == nullptr) {
2398 set_lookup_failed();
2399 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2400 } else {
2401 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2402 }
2403 }
2404 } else {
2405 set_lookup_failed();
2406 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2407 }
2408 return m;
2409 }
2410
2411 bool AOTCodeCache::write_method(Method* method) {
2412 ResourceMark rm; // To method's name printing
2413 if (AOTCacheAccess::can_generate_aot_code(method)) {
2414 DataKind kind = DataKind::Method;
2415 uint n = write_bytes(&kind, sizeof(int));
2416 if (n != sizeof(int)) {
2417 return false;
2418 }
2419 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2420 n = write_bytes(&method_offset, sizeof(uint));
2421 if (n != sizeof(uint)) {
2422 return false;
2423 }
2424 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2425 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2426 return true;
2427 }
2428 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2429 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2430 set_lookup_failed();
2431 return false;
2432 }
2433
2434 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2435 uint code_offset = read_position();
2436 uint method_offset = *(uint*)addr(code_offset);
2437 code_offset += sizeof(uint);
2438 set_read_position(code_offset);
2439 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2440 if (!AOTMetaspace::in_aot_cache((address)m)) {
2441 // Something changed in CDS
2442 set_lookup_failed();
2443 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2444 return nullptr;
2445 }
2446 assert(m->is_method(), "sanity");
2447 ResourceMark rm;
2448 Klass* k = m->method_holder();
2449 if (!k->is_instance_klass()) {
2450 set_lookup_failed();
2451 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2452 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2453 return nullptr;
2454 } else if (!AOTMetaspace::in_aot_cache((address)k)) {
2455 set_lookup_failed();
2456 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2457 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2458 return nullptr;
2459 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2460 set_lookup_failed();
2461 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2462 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2463 return nullptr;
2464 } else if (!InstanceKlass::cast(k)->is_linked()) {
2465 set_lookup_failed();
2466 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2467 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2468 return nullptr;
2469 }
2470 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2471 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2472 return m;
2473 }
2474
2475 bool AOTCodeCache::write_klass(Klass* klass) {
2476 uint array_dim = 0;
2477 if (klass->is_objArray_klass()) {
2478 array_dim = ObjArrayKlass::cast(klass)->dimension();
2479 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2480 }
2481 uint init_state = 0;
2482 bool can_write = true;
2483 if (klass->is_instance_klass()) {
2484 InstanceKlass* ik = InstanceKlass::cast(klass);
2485 init_state = (ik->is_initialized() ? 1 : 0);
2486 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2487 } else {
2488 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2489 }
2490 ResourceMark rm;
2491 uint state = (array_dim << 1) | (init_state & 1);
2492 if (can_write) {
2493 DataKind kind = DataKind::Klass;
2494 uint n = write_bytes(&kind, sizeof(int));
2495 if (n != sizeof(int)) {
2496 return false;
2497 }
2498 // Record state of instance klass initialization and array dimentions.
2499 n = write_bytes(&state, sizeof(int));
2500 if (n != sizeof(int)) {
2501 return false;
2502 }
2503 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2504 n = write_bytes(&klass_offset, sizeof(uint));
2505 if (n != sizeof(uint)) {
2506 return false;
2507 }
2508 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2509 compile_id(), comp_level(), klass->external_name(),
2510 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2511 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2512 return true;
2513 }
2514 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2515 compile_id(), comp_level(), klass->external_name(),
2516 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2517 (array_dim > 0 ? " (object array)" : ""));
2518 set_lookup_failed();
2519 return false;
2520 }
2521
2522 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2523 uint code_offset = read_position();
2524 uint state = *(uint*)addr(code_offset);
2525 uint init_state = (state & 1);
2526 uint array_dim = (state >> 1);
2527 code_offset += sizeof(int);
2528 uint klass_offset = *(uint*)addr(code_offset);
2529 code_offset += sizeof(uint);
2530 set_read_position(code_offset);
2531 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2532 if (!AOTMetaspace::in_aot_cache((address)k)) {
2533 // Something changed in CDS
2534 set_lookup_failed();
2535 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2536 return nullptr;
2537 }
2538 assert(k->is_klass(), "sanity");
2539 ResourceMark rm;
2540 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2541 set_lookup_failed();
2542 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2543 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2544 return nullptr;
2545 } else
2546 // Allow not initialized klass which was uninitialized during code caching or for preload
2547 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2548 set_lookup_failed();
2549 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2550 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2551 return nullptr;
2552 }
2553 if (array_dim > 0) {
2554 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2555 Klass* ak = k->array_klass_or_null(array_dim);
2556 // FIXME: what would it take to create an array class on the fly?
2557 // Klass* ak = k->array_klass(dim, JavaThread::current());
2558 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2559 if (ak == nullptr) {
2560 set_lookup_failed();
2561 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2562 compile_id(), comp_level(), array_dim, k->external_name());
2563 }
2564 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2565 return ak;
2566 } else {
2567 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2568 compile_id(), comp_level(), k->external_name());
2569 return k;
2570 }
2571 }
2572
2573 bool AOTCodeCache::write_oop(jobject& jo) {
2574 oop obj = JNIHandles::resolve(jo);
2575 return write_oop(obj);
2576 }
2577
2578 bool AOTCodeCache::write_oop(oop obj) {
2579 DataKind kind;
2580 uint n = 0;
2581 if (obj == nullptr) {
2582 kind = DataKind::Null;
2583 n = write_bytes(&kind, sizeof(int));
2584 if (n != sizeof(int)) {
2585 return false;
2586 }
2587 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2588 kind = DataKind::No_Data;
2589 n = write_bytes(&kind, sizeof(int));
2590 if (n != sizeof(int)) {
2591 return false;
2592 }
2593 } else if (java_lang_Class::is_instance(obj)) {
2594 if (java_lang_Class::is_primitive(obj)) {
2595 int bt = (int)java_lang_Class::primitive_type(obj);
2596 kind = DataKind::Primitive;
2597 n = write_bytes(&kind, sizeof(int));
2598 if (n != sizeof(int)) {
2599 return false;
2600 }
2601 n = write_bytes(&bt, sizeof(int));
2602 if (n != sizeof(int)) {
2603 return false;
2604 }
2605 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2606 } else {
2607 Klass* klass = java_lang_Class::as_Klass(obj);
2608 if (!write_klass(klass)) {
2609 return false;
2610 }
2611 }
2612 } else if (java_lang_String::is_instance(obj)) { // herere
2613 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2614 ResourceMark rm;
2615 size_t length_sz = 0;
2616 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2617 if (k >= 0) {
2618 kind = DataKind::String;
2619 n = write_bytes(&kind, sizeof(int));
2620 if (n != sizeof(int)) {
2621 return false;
2622 }
2623 n = write_bytes(&k, sizeof(int));
2624 if (n != sizeof(int)) {
2625 return false;
2626 }
2627 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2628 return true;
2629 }
2630 // Not archived String object - bailout
2631 set_lookup_failed();
2632 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2633 compile_id(), comp_level(), p2i(obj), string);
2634 return false;
2635 } else if (java_lang_Module::is_instance(obj)) {
2636 fatal("Module object unimplemented");
2637 } else if (java_lang_ClassLoader::is_instance(obj)) {
2638 if (obj == SystemDictionary::java_system_loader()) {
2639 kind = DataKind::SysLoader;
2640 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2641 } else if (obj == SystemDictionary::java_platform_loader()) {
2642 kind = DataKind::PlaLoader;
2643 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2644 } else {
2645 ResourceMark rm;
2646 set_lookup_failed();
2647 log_debug(aot, codecache, oops)("%d (L%d): Not supported Class Loader: " PTR_FORMAT " : %s",
2648 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2649 return false;
2650 }
2651 n = write_bytes(&kind, sizeof(int));
2652 if (n != sizeof(int)) {
2653 return false;
2654 }
2655 } else { // herere
2656 ResourceMark rm;
2657 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2658 if (k >= 0) {
2659 kind = DataKind::MH_Oop;
2660 n = write_bytes(&kind, sizeof(int));
2661 if (n != sizeof(int)) {
2662 return false;
2663 }
2664 n = write_bytes(&k, sizeof(int));
2665 if (n != sizeof(int)) {
2666 return false;
2667 }
2668 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2669 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2670 return true;
2671 }
2672 // Not archived Java object - bailout
2673 set_lookup_failed();
2674 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2675 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2676 return false;
2677 }
2678 return true;
2679 }
2680
2681 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2682 uint code_offset = read_position();
2683 oop obj = nullptr;
2684 DataKind kind = *(DataKind*)addr(code_offset);
2685 code_offset += sizeof(DataKind);
2686 set_read_position(code_offset);
2687 if (kind == DataKind::Null) {
2688 return nullptr;
2689 } else if (kind == DataKind::No_Data) {
2690 return cast_to_oop(Universe::non_oop_word());
2691 } else if (kind == DataKind::Klass) {
2692 Klass* k = read_klass(comp_method);
2693 if (k == nullptr) {
2694 return nullptr;
2695 }
2696 obj = k->java_mirror();
2697 if (obj == nullptr) {
2698 set_lookup_failed();
2699 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2700 return nullptr;
2701 }
2702 } else if (kind == DataKind::Primitive) {
2703 code_offset = read_position();
2704 int t = *(int*)addr(code_offset);
2705 code_offset += sizeof(int);
2706 set_read_position(code_offset);
2707 BasicType bt = (BasicType)t;
2708 obj = java_lang_Class::primitive_mirror(bt);
2709 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2710 } else if (kind == DataKind::String) {
2711 code_offset = read_position();
2712 int k = *(int*)addr(code_offset);
2713 code_offset += sizeof(int);
2714 set_read_position(code_offset);
2715 obj = AOTCacheAccess::get_archived_object(k);
2716 if (obj == nullptr) {
2717 set_lookup_failed();
2718 log_debug(aot, codecache, oops)("Lookup failed for String object");
2719 return nullptr;
2720 }
2721 assert(java_lang_String::is_instance(obj), "must be string");
2722
2723 ResourceMark rm;
2724 size_t length_sz = 0;
2725 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2726 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2727 } else if (kind == DataKind::SysLoader) {
2728 obj = SystemDictionary::java_system_loader();
2729 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2730 } else if (kind == DataKind::PlaLoader) {
2731 obj = SystemDictionary::java_platform_loader();
2732 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2733 } else if (kind == DataKind::MH_Oop) {
2734 code_offset = read_position();
2735 int k = *(int*)addr(code_offset);
2736 code_offset += sizeof(int);
2737 set_read_position(code_offset);
2738 obj = AOTCacheAccess::get_archived_object(k);
2739 if (obj == nullptr) {
2740 set_lookup_failed();
2741 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2742 return nullptr;
2743 }
2744 ResourceMark rm;
2745 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2746 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2747 } else {
2748 set_lookup_failed();
2749 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2750 compile_id(), comp_level(), (int)kind);
2751 return nullptr;
2752 }
2753 return obj;
2754 }
2755
2756 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2757 methodHandle comp_method(JavaThread::current(), target->get_Method());
2758 JavaThread* current = JavaThread::current();
2759 uint offset = read_position();
2760 int count = *(int *)addr(offset);
2761 offset += sizeof(int);
2762 set_read_position(offset);
2763 for (int i = 0; i < count; i++) {
2764 oop obj = read_oop(current, comp_method);
2765 if (lookup_failed()) {
2766 return false;
2767 }
2768 Handle h(thread, obj);
2769 oop_list.append(h);
2770 if (oop_recorder != nullptr) {
2771 jobject jo = JNIHandles::make_local(thread, obj);
2772 if (oop_recorder->is_real(jo)) {
2773 oop_recorder->find_index(jo);
2774 } else {
2775 oop_recorder->allocate_oop_index(jo);
2776 }
2777 }
2778 LogStreamHandle(Debug, aot, codecache, oops) log;
2779 if (log.is_enabled()) {
2780 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2781 if (obj == Universe::non_oop_word()) {
2782 log.print("non-oop word");
2783 } else if (obj == nullptr) {
2784 log.print("nullptr-oop");
2785 } else {
2786 obj->print_value_on(&log);
2787 }
2788 log.cr();
2789 }
2790 }
2791
2792 offset = read_position();
2793 count = *(int *)addr(offset);
2794 offset += sizeof(int);
2795 set_read_position(offset);
2796 for (int i = 0; i < count; i++) {
2797 Metadata* m = read_metadata(comp_method);
2798 if (lookup_failed()) {
2799 return false;
2800 }
2801 metadata_list.append(m);
2802 if (oop_recorder != nullptr) {
2803 if (oop_recorder->is_real(m)) {
2804 oop_recorder->find_index(m);
2805 } else {
2806 oop_recorder->allocate_metadata_index(m);
2807 }
2808 }
2809 LogTarget(Debug, aot, codecache, metadata) log;
2810 if (log.is_enabled()) {
2811 LogStream ls(log);
2812 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2813 if (m == (Metadata*)Universe::non_oop_word()) {
2814 ls.print("non-metadata word");
2815 } else if (m == nullptr) {
2816 ls.print("nullptr-oop");
2817 } else {
2818 Metadata::print_value_on_maybe_null(&ls, m);
2819 }
2820 ls.cr();
2821 }
2822 }
2823 return true;
2824 }
2825
2826 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2827 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2828 int oopmaps_size = oopmaps->nr_of_bytes();
2829 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2830 return false;
2831 }
2832 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2833 if (n != (uint)oopmaps->nr_of_bytes()) {
2834 return false;
2835 }
2836 return true;
2837 }
2838
2839 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2840 uint offset = read_position();
2841 int size = *(int *)addr(offset);
2842 offset += sizeof(int);
2843 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2844 offset += size;
2845 set_read_position(offset);
2846 return oopmaps;
2847 }
2848
2849 bool AOTCodeCache::write_oops(nmethod* nm) {
2850 int count = nm->oops_count()-1;
2851 if (!write_bytes(&count, sizeof(int))) {
2852 return false;
2853 }
2854 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2855 if (!write_oop(*p)) {
2856 return false;
2857 }
2858 }
2859 return true;
2860 }
2861
2862 #ifndef PRODUCT
2863 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2864 // Write asm remarks
2865 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2866 if (count_ptr == nullptr) {
2867 return false;
2868 }
2869 uint count = 0;
2870 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2871 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2872 uint n = write_bytes(&offset, sizeof(uint));
2873 if (n != sizeof(uint)) {
2874 return false;
2875 }
2876 if (use_string_table) {
2877 const char* cstr = add_C_string(str);
2878 int id = _table->id_for_C_string((address)cstr);
2879 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2880 n = write_bytes(&id, sizeof(int));
2881 if (n != sizeof(int)) {
2882 return false;
2883 }
2884 } else {
2885 n = write_bytes(str, (uint)strlen(str) + 1);
2886 if (n != strlen(str) + 1) {
2887 return false;
2888 }
2889 }
2890 count += 1;
2891 return true;
2892 });
2893 *count_ptr = count;
2894 return result;
2895 }
2896
2897 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2898 // Read asm remarks
2899 uint offset = read_position();
2900 uint count = *(uint *)addr(offset);
2901 offset += sizeof(uint);
2902 for (uint i = 0; i < count; i++) {
2903 uint remark_offset = *(uint *)addr(offset);
2904 offset += sizeof(uint);
2905 const char* remark = nullptr;
2906 if (use_string_table) {
2907 int remark_string_id = *(uint *)addr(offset);
2908 offset += sizeof(int);
2909 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2910 } else {
2911 remark = (const char*)addr(offset);
2912 offset += (uint)strlen(remark)+1;
2913 }
2914 asm_remarks.insert(remark_offset, remark);
2915 }
2916 set_read_position(offset);
2917 }
2918
2919 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2920 // Write dbg strings
2921 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2922 if (count_ptr == nullptr) {
2923 return false;
2924 }
2925 uint count = 0;
2926 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2927 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2928 if (use_string_table) {
2929 const char* cstr = add_C_string(str);
2930 int id = _table->id_for_C_string((address)cstr);
2931 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2932 uint n = write_bytes(&id, sizeof(int));
2933 if (n != sizeof(int)) {
2934 return false;
2935 }
2936 } else {
2937 uint n = write_bytes(str, (uint)strlen(str) + 1);
2938 if (n != strlen(str) + 1) {
2939 return false;
2940 }
2941 }
2942 count += 1;
2943 return true;
2944 });
2945 *count_ptr = count;
2946 return result;
2947 }
2948
2949 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2950 // Read dbg strings
2951 uint offset = read_position();
2952 uint count = *(uint *)addr(offset);
2953 offset += sizeof(uint);
2954 for (uint i = 0; i < count; i++) {
2955 const char* str = nullptr;
2956 if (use_string_table) {
2957 int string_id = *(uint *)addr(offset);
2958 offset += sizeof(int);
2959 str = (const char*)_cache->address_for_C_string(string_id);
2960 } else {
2961 str = (const char*)addr(offset);
2962 offset += (uint)strlen(str)+1;
2963 }
2964 dbg_strings.insert(str);
2965 }
2966 set_read_position(offset);
2967 }
2968 #endif // PRODUCT
2969
2970 //======================= AOTCodeAddressTable ===============
2971
2972 // address table ids for generated routines, external addresses and C
2973 // string addresses are partitioned into positive integer ranges
2974 // defined by the following positive base and max values
2975 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2976 // [_stubs_base, _stubs_base + _stubs_max -1],
2977 // ...
2978 // [_c_str_base, _c_str_base + _c_str_max -1],
2979 #define _extrs_max 140
2980 #define _stubs_max 210
2981 #define _shared_blobs_max 25
2982 #define _C1_blobs_max 50
2983 #define _C2_blobs_max 25
2984 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2985 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2986
2987 #define _extrs_base 0
2988 #define _stubs_base (_extrs_base + _extrs_max)
2989 #define _shared_blobs_base (_stubs_base + _stubs_max)
2990 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2991 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2992 #define _blobs_end (_shared_blobs_base + _blobs_max)
2993 #if (_C2_blobs_base >= _all_max)
2994 #error AOTCodeAddressTable ranges need adjusting
2995 #endif
2996
2997 #define SET_ADDRESS(type, addr) \
2998 { \
2999 type##_addr[type##_length++] = (address) (addr); \
3000 assert(type##_length <= type##_max, "increase size"); \
3001 }
3002
3003 static bool initializing_extrs = false;
3004
3005 void AOTCodeAddressTable::init_extrs() {
3006 if (_extrs_complete || initializing_extrs) return; // Done already
3007
3008 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
3009
3010 initializing_extrs = true;
3011 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3012
3013 _extrs_length = 0;
3014
3015 // Record addresses of VM runtime methods
3016 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3017 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3018 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3019 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3020 {
3021 // Required by Shared blobs
3022 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
3023 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
3024 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
3025 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3026 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3027 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3028 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3029 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
3030 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
3031 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
3032 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
3033 SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError);
3034 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3035 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
3036 }
3037 {
3038 // Required by initial stubs
3039 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3040 #if defined(AMD64)
3041 SET_ADDRESS(_extrs, StubRoutines::crc32c_table_addr());
3042 #endif
3043 }
3044
3045 #ifdef COMPILER1
3046 {
3047 // Required by C1 blobs
3048 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3049 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3050 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3051 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3052 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3053 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3054 SET_ADDRESS(_extrs, Runtime1::new_instance);
3055 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3056 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3057 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3058 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3059 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3060 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3061 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3062 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3063 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3064 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3065 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3066 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3067 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3068 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3069 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3070 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3071 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3072 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3073 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3074 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3075 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3076 #ifdef X86
3077 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3078 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3079 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3080 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3081 #endif
3082 #ifndef PRODUCT
3083 SET_ADDRESS(_extrs, os::breakpoint);
3084 #endif
3085 }
3086 #endif // COMPILER1
3087
3088 #ifdef COMPILER2
3089 {
3090 // Required by C2 blobs
3091 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3092 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3093 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3094 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3095 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3096 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3097 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3098 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3099 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3100 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3101 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3102 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3103 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3104 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3105 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3106 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3107 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3108 SET_ADDRESS(_extrs, OptoRuntime::compile_method_C);
3109 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
3110 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
3111 SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
3112 SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
3113 SET_ADDRESS(_extrs, Parse::trap_stress_counter_address());
3114 #if defined(AMD64)
3115 // Use by C2 intinsic
3116 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3117 #endif
3118 }
3119 #endif // COMPILER2
3120 #if INCLUDE_G1GC
3121 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3122 SET_ADDRESS(_extrs, G1BarrierSetRuntime::clone_addr());
3123 #endif
3124
3125 #if INCLUDE_SHENANDOAHGC
3126 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3127 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3128 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3129 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3130 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3131 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3132 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3133 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3134 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3135 SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
3136 #endif
3137
3138 #if INCLUDE_ZGC
3139 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
3140 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3141 #if defined(AMD64)
3142 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3143 #endif
3144 #if defined(AARCH64)
3145 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
3146 SET_ADDRESS(_extrs, bs_asm->patching_epoch_addr());
3147 #endif
3148 #endif // INCLUDE_ZGC
3149
3150 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3151 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3152 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3153
3154 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3155 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3156 #if defined(AMD64) && !defined(ZERO)
3157 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3158 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3159 #endif // AMD64
3160 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3161 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3162 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3163 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3164 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3165 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3166 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3167 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3168 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3169 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3170 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3171 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3172 #ifndef ZERO
3173 SET_ADDRESS(_extrs, SharedRuntime::drem);
3174 SET_ADDRESS(_extrs, SharedRuntime::frem);
3175 #endif
3176 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3177 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3178 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3179 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3180 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3181
3182 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3183 SET_ADDRESS(_extrs, Thread::current);
3184 SET_ADDRESS(_extrs, ObjectMonitorTable::current_table_address());
3185
3186 SET_ADDRESS(_extrs, os::javaTimeMillis);
3187 SET_ADDRESS(_extrs, os::javaTimeNanos);
3188 // For JFR
3189 SET_ADDRESS(_extrs, os::elapsed_counter);
3190 #if defined(X86) && !defined(ZERO)
3191 SET_ADDRESS(_extrs, Rdtsc::elapsed_counter);
3192 #endif
3193
3194 #if INCLUDE_JVMTI
3195 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3196 #endif /* INCLUDE_JVMTI */
3197 SET_ADDRESS(_extrs, MountUnmountDisabler::notify_jvmti_events_address());
3198 SET_ADDRESS(_extrs, MountUnmountDisabler::global_vthread_transition_disable_count_address());
3199
3200 #ifndef PRODUCT
3201 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3202 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3203 #endif
3204
3205 #ifndef ZERO
3206 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3207 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3208 #endif
3209 #if defined(AMD64) || defined(AARCH64)
3210 SET_ADDRESS(_extrs, C2_MacroAssembler::abort_verify_int_in_range);
3211 SET_ADDRESS(_extrs, C2_MacroAssembler::abort_verify_long_in_range);
3212 #endif // defined(AMD64) || defined(AARCH64)
3213 #if defined(AARCH64)
3214 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3215 #endif
3216 #endif // ZERO
3217
3218 // addresses of fields in AOT runtime constants area
3219 address* p = AOTRuntimeConstants::field_addresses_list();
3220 while (*p != nullptr) {
3221 SET_ADDRESS(_extrs, *p++);
3222 }
3223
3224 _extrs_complete = true;
3225 log_info(aot, codecache, init)("External addresses recorded");
3226 }
3227
3228 static bool initializing_early_stubs = false;
3229
3230 void AOTCodeAddressTable::init_early_stubs() {
3231 if (_complete || initializing_early_stubs) return; // Done already
3232 initializing_early_stubs = true;
3233 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3234 _stubs_length = 0;
3235 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3236
3237 {
3238 // Required by C1 blobs
3239 #if defined(AMD64) && !defined(ZERO)
3240 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3241 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3242 #endif // AMD64
3243 }
3244
3245 _early_stubs_complete = true;
3246 log_info(aot, codecache, init)("Early stubs recorded");
3247 }
3248
3249 static bool initializing_shared_blobs = false;
3250
3251 void AOTCodeAddressTable::init_shared_blobs() {
3252 if (_complete || initializing_shared_blobs) return; // Done already
3253 initializing_shared_blobs = true;
3254 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3255
3256 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3257 _shared_blobs_addr = blobs_addr;
3258 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3259 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3260
3261 _shared_blobs_length = 0;
3262 _C1_blobs_length = 0;
3263 _C2_blobs_length = 0;
3264
3265 // clear the address table
3266 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3267
3268 // Record addresses of generated code blobs
3269 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3270 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3271 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3272 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3273 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3274 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3275 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3276 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3277 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3278 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3279 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3280 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3281 #ifdef COMPILER2
3282 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3283 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3284 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3285 }
3286 #endif
3287 #if INCLUDE_JVMCI
3288 if (EnableJVMCI) {
3289 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3290 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3291 }
3292 #endif
3293 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3294 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3295 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3296 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3297 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3298
3299 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3300 _shared_blobs_complete = true;
3301 log_info(aot, codecache, init)("All shared blobs recorded");
3302 }
3303
3304 static bool initializing_stubs = false;
3305 void AOTCodeAddressTable::init_stubs() {
3306 if (_complete || initializing_stubs) return; // Done already
3307 assert(_early_stubs_complete, "early stubs whould be initialized");
3308 initializing_stubs = true;
3309
3310 // Stubs
3311 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3312 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3313 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3314 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3315 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3316 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3317
3318 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3319 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3320 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3321
3322 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3323
3324 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3325 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3326 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3327 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3328 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3329 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3330
3331 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3332 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3333 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3334 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3335 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3336 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3337
3338 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3339 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3340 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3341 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3342 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3343 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3344
3345 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3346 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3347 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3348 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3349 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3350 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3351
3352 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3353 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3354
3355 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3356 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3357
3358 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3359 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3360 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3361 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3362 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3363 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3364
3365 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3366 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3367
3368 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3369 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3370 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3371 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3372 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3373 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3374 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3375 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3376 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3377 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3378 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3379 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3380 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3381 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3382 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3383 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3384 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3385 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3386 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3387 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3388 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3389 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3390 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3391 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3392 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3393 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3394 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3395 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3396 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3397 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3398 SET_ADDRESS(_stubs, StubRoutines::kyber12To16());
3399
3400 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3401 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3402 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3403
3404 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3405 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3406 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3407 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3408 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3409 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3410 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3411 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3412
3413 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3414
3415 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3416
3417 SET_ADDRESS(_stubs, StubRoutines::dexp());
3418 SET_ADDRESS(_stubs, StubRoutines::dlog());
3419 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3420 SET_ADDRESS(_stubs, StubRoutines::dpow());
3421 SET_ADDRESS(_stubs, StubRoutines::dsin());
3422 SET_ADDRESS(_stubs, StubRoutines::dcos());
3423 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3424 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3425 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3426 SET_ADDRESS(_stubs, StubRoutines::dtan());
3427
3428 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3429 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3430
3431 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3432 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3433 }
3434 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3435
3436 #if defined(AMD64) && !defined(ZERO)
3437 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3438 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3439 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3440 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3441 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3442 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3443 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3444 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3445 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3446 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3447 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3448 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_shuffle_mask());
3449 SET_ADDRESS(_stubs, StubRoutines::x86::vector_byte_shuffle_mask());
3450 SET_ADDRESS(_stubs, StubRoutines::x86::vector_short_shuffle_mask());
3451 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_shuffle_mask());
3452 SET_ADDRESS(_stubs, StubRoutines::x86::vector_long_sign_mask());
3453 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_byte_mask());
3454 SET_ADDRESS(_stubs, StubRoutines::x86::vector_int_to_short_mask());
3455 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3456 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3457 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3458 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3459 // See C2_MacroAssembler::load_iota_indices().
3460 for (int i = 0; i < 6; i++) {
3461 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3462 }
3463 #ifdef COMPILER2
3464 for (int i = 0; i < 4; i++) {
3465 SET_ADDRESS(_stubs, StubRoutines::_string_indexof_array[i]);
3466 }
3467 #endif
3468 #endif
3469 #if defined(AARCH64) && !defined(ZERO)
3470 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3471 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3472 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3473 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3474 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3475 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3476 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3477 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3478 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3479 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3480 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3481 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3482 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3483
3484 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3485 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3486 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3487 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3488 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3489 #endif
3490
3491 _complete = true;
3492 log_info(aot, codecache, init)("Stubs recorded");
3493 }
3494
3495 void AOTCodeAddressTable::init_early_c1() {
3496 #ifdef COMPILER1
3497 // Runtime1 Blobs
3498 StubId id = StubInfo::stub_base(StubGroup::C1);
3499 // include forward_exception in range we publish
3500 StubId limit = StubInfo::next(StubId::c1_forward_exception_id);
3501 for (; id != limit; id = StubInfo::next(id)) {
3502 if (Runtime1::blob_for(id) == nullptr) {
3503 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3504 continue;
3505 }
3506 if (Runtime1::entry_for(id) == nullptr) {
3507 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3508 continue;
3509 }
3510 address entry = Runtime1::entry_for(id);
3511 SET_ADDRESS(_C1_blobs, entry);
3512 }
3513 #endif // COMPILER1
3514 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3515 _early_c1_complete = true;
3516 }
3517
3518 void AOTCodeAddressTable::init_c1() {
3519 #ifdef COMPILER1
3520 // Runtime1 Blobs
3521 assert(_early_c1_complete, "early C1 blobs should be initialized");
3522 StubId id = StubInfo::next(StubId::c1_forward_exception_id);
3523 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
3524 for (; id != limit; id = StubInfo::next(id)) {
3525 if (Runtime1::blob_for(id) == nullptr) {
3526 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3527 continue;
3528 }
3529 if (Runtime1::entry_for(id) == nullptr) {
3530 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3531 continue;
3532 }
3533 address entry = Runtime1::entry_for(id);
3534 SET_ADDRESS(_C1_blobs, entry);
3535 }
3536 #if INCLUDE_G1GC
3537 if (UseG1GC) {
3538 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3539 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3540 SET_ADDRESS(_C1_blobs, entry);
3541 }
3542 #endif // INCLUDE_G1GC
3543 #if INCLUDE_ZGC
3544 if (UseZGC) {
3545 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3546 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3547 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3548 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3549 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3550 }
3551 #endif // INCLUDE_ZGC
3552 #if INCLUDE_SHENANDOAHGC
3553 if (UseShenandoahGC) {
3554 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3555 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3556 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3557 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3558 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3559 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3560 }
3561 #endif // INCLUDE_SHENANDOAHGC
3562 #endif // COMPILER1
3563
3564 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3565 _c1_complete = true;
3566 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3567 }
3568
3569 void AOTCodeAddressTable::init_c2() {
3570 #ifdef COMPILER2
3571 // OptoRuntime Blobs
3572 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3573 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3574 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3575 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3576 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3577 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3578 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3579 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3580 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3581 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3582 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3583 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3584 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3585 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3586 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3587 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3588 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3589 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3590 SET_ADDRESS(_C2_blobs, OptoRuntime::compile_method_Java());
3591 #if INCLUDE_JVMTI
3592 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_first_transition_Java());
3593 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_final_transition_Java());
3594 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_start_transition_Java());
3595 SET_ADDRESS(_C2_blobs, OptoRuntime::vthread_end_transition_Java());
3596 #endif /* INCLUDE_JVMTI */
3597 #endif
3598
3599 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3600 _c2_complete = true;
3601 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3602 }
3603 #undef SET_ADDRESS
3604
3605 AOTCodeAddressTable::~AOTCodeAddressTable() {
3606 if (_extrs_addr != nullptr) {
3607 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3608 }
3609 if (_stubs_addr != nullptr) {
3610 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3611 }
3612 if (_shared_blobs_addr != nullptr) {
3613 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3614 }
3615 }
3616
3617 #ifdef PRODUCT
3618 #define MAX_STR_COUNT 200
3619 #else
3620 #define MAX_STR_COUNT 500
3621 #endif
3622 #define _c_str_max MAX_STR_COUNT
3623 static const int _c_str_base = _all_max;
3624
3625 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3626 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3627 static int _C_strings_count = 0;
3628 static int _C_strings_s[MAX_STR_COUNT] = {0};
3629 static int _C_strings_id[MAX_STR_COUNT] = {0};
3630 static int _C_strings_used = 0;
3631
3632 void AOTCodeCache::load_strings() {
3633 uint strings_count = _load_header->strings_count();
3634 if (strings_count == 0) {
3635 return;
3636 }
3637 uint strings_offset = _load_header->strings_offset();
3638 uint* string_lengths = (uint*)addr(strings_offset);
3639 strings_offset += (strings_count * sizeof(uint));
3640 uint strings_size = _load_header->search_table_offset() - strings_offset;
3641 // We have to keep cached strings longer than _cache buffer
3642 // because they are refernced from compiled code which may
3643 // still be executed on VM exit after _cache is freed.
3644 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3645 memcpy(p, addr(strings_offset), strings_size);
3646 _C_strings_buf = p;
3647 assert(strings_count <= MAX_STR_COUNT, "sanity");
3648 for (uint i = 0; i < strings_count; i++) {
3649 _C_strings[i] = p;
3650 uint len = string_lengths[i];
3651 _C_strings_s[i] = i;
3652 _C_strings_id[i] = i;
3653 p += len;
3654 }
3655 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3656 _C_strings_count = strings_count;
3657 _C_strings_used = strings_count;
3658 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3659 }
3660
3762 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3763 if (idx == -1) {
3764 return (address)-1;
3765 }
3766 uint id = (uint)idx;
3767 // special case for symbols based relative to os::init
3768 if (id > (_c_str_base + _c_str_max)) {
3769 return (address)os::init + idx;
3770 }
3771 if (idx < 0) {
3772 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3773 return nullptr;
3774 }
3775 // no need to compare unsigned id against 0
3776 if (/* id >= _extrs_base && */ id < _extrs_length) {
3777 return _extrs_addr[id - _extrs_base];
3778 }
3779 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3780 return _stubs_addr[id - _stubs_base];
3781 }
3782 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3783 return _stubs_addr[id - _stubs_base];
3784 }
3785 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3786 return _shared_blobs_addr[id - _shared_blobs_base];
3787 }
3788 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3789 return _C1_blobs_addr[id - _C1_blobs_base];
3790 }
3791 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3792 return _C1_blobs_addr[id - _C1_blobs_base];
3793 }
3794 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3795 return _C2_blobs_addr[id - _C2_blobs_base];
3796 }
3797 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3798 return address_for_C_string(id - _c_str_base);
3799 }
3800 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3801 return nullptr;
3802 }
3803
3804 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3805 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3806 int id = -1;
3807 if (addr == (address)-1) { // Static call stub has jump to itself
3808 return id;
3809 }
3810 // Check card_table_base address first since it can point to any address
3811 BarrierSet* bs = BarrierSet::barrier_set();
3812 bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
3813 guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
3814
3815 // Seach for C string
3816 id = id_for_C_string(addr);
3817 if (id >= 0) {
3818 return id + _c_str_base;
3819 }
3820 if (StubRoutines::contains(addr)) {
3821 // Search in stubs
3822 id = search_address(addr, _stubs_addr, _stubs_length);
3823 if (id == BAD_ADDRESS_ID) {
3824 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3825 if (desc == nullptr) {
3826 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3827 }
3828 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3829 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3830 } else {
3831 return _stubs_base + id;
3832 }
3833 } else {
3834 CodeBlob* cb = CodeCache::find_blob(addr);
3835 if (cb != nullptr) {
3836 int id_base = _shared_blobs_base;
3837 // Search in code blobs
3838 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3839 if (id == BAD_ADDRESS_ID) {
3840 id_base = _C1_blobs_base;
3841 // search C1 blobs
3842 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3843 }
3844 if (id == BAD_ADDRESS_ID) {
3845 id_base = _C2_blobs_base;
3846 // search C2 blobs
3847 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3848 }
3849 if (id == BAD_ADDRESS_ID) {
3850 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3851 } else {
3852 return id_base + id;
3853 }
3854 } else {
3855 // Search in runtime functions
3856 id = search_address(addr, _extrs_addr, _extrs_length);
3857 if (id == BAD_ADDRESS_ID) {
3858 ResourceMark rm;
3859 const int buflen = 1024;
3860 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3861 int offset = 0;
3862 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3863 if (offset > 0) {
3864 // Could be address of C string
3865 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3866 CompileTask* task = ciEnv::current()->task();
3867 uint compile_id = 0;
3868 uint comp_level =0;
3869 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3870 compile_id = task->compile_id();
3871 comp_level = task->comp_level();
3872 }
3873 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3874 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3875 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3876 return dist;
3877 }
3878 reloc.print_current_on(tty);
3879 blob->print_on(tty);
3880 blob->print_code_on(tty);
3881 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3882 } else {
3883 reloc.print_current_on(tty);
3884 blob->print_on(tty);
3885 blob->print_code_on(tty);
3886 os::find(addr, tty);
3887 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3888 }
3889 } else {
3890 return _extrs_base + id;
3891 }
3892 }
3893 }
3894 return id;
3895 }
3896
3897 #undef _extrs_max
3898 #undef _stubs_max
3899 #undef _shared_blobs_max
3900 #undef _C1_blobs_max
3901 #undef _C2_blobs_max
3902 #undef _blobs_max
3903 #undef _extrs_base
3904 #undef _stubs_base
3905 #undef _shared_blobs_base
3906 #undef _C1_blobs_base
3907 #undef _C2_blobs_base
3908 #undef _blobs_end
3909
3910 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3911
3912 void AOTRuntimeConstants::initialize_from_runtime() {
3913 BarrierSet* bs = BarrierSet::barrier_set();
3914 address card_table_base = nullptr;
3915 uint grain_shift = 0;
3916 #if INCLUDE_G1GC
3917 if (bs->is_a(BarrierSet::G1BarrierSet)) {
3918 grain_shift = G1HeapRegion::LogOfHRGrainBytes;
3919 } else
3920 #endif
3921 #if INCLUDE_SHENANDOAHGC
3922 if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
3923 grain_shift = 0;
3924 } else
3925 #endif
3926 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3927 CardTable::CardValue* base = ci_card_table_address_const();
3928 assert(base != nullptr, "unexpected byte_map_base");
3929 card_table_base = base;
3930 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
3931 grain_shift = ctbs->grain_shift();
3932 }
3933 _aot_runtime_constants._card_table_base = card_table_base;
3934 _aot_runtime_constants._grain_shift = grain_shift;
3935 }
3936
3937 address AOTRuntimeConstants::_field_addresses_list[] = {
3938 ((address)&_aot_runtime_constants._card_table_base),
3939 ((address)&_aot_runtime_constants._grain_shift),
3940 nullptr
3941 };
3942
3943 address AOTRuntimeConstants::card_table_base_address() {
3944 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
3945 return (address)&_aot_runtime_constants._card_table_base;
3946 }
3947
3948 void AOTCodeCache::wait_for_no_nmethod_readers() {
3949 while (true) {
3950 int cur = AtomicAccess::load(&_nmethod_readers);
3951 int upd = -(cur + 1);
3952 if (cur >= 0 && AtomicAccess::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3953 // Success, no new readers should appear.
3954 break;
3955 }
3956 }
3957
3958 // Now wait for all readers to leave.
3959 SpinYield w;
3960 while (AtomicAccess::load(&_nmethod_readers) != -1) {
3961 w.wait();
3962 }
3963 }
3964
3965 AOTCodeCache::ReadingMark::ReadingMark() {
3966 while (true) {
3967 int cur = AtomicAccess::load(&_nmethod_readers);
3968 if (cur < 0) {
3969 // Cache is already closed, cannot proceed.
3970 _failed = true;
3971 return;
3972 }
3973 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3974 // Successfully recorded ourselves as entered.
3975 _failed = false;
3976 return;
3977 }
3978 }
3979 }
3980
3981 AOTCodeCache::ReadingMark::~ReadingMark() {
3982 if (_failed) {
3983 return;
3984 }
3985 while (true) {
3986 int cur = AtomicAccess::load(&_nmethod_readers);
3987 if (cur > 0) {
3988 // Cache is open, we are counting down towards 0.
3989 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3990 return;
3991 }
3992 } else {
3993 // Cache is closed, we are counting up towards -1.
3994 if (AtomicAccess::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3995 return;
3996 }
3997 }
3998 }
3999 }
4000
4001 void AOTCodeCache::print_timers_on(outputStream* st) {
4002 if (is_using_code()) {
4003 st->print_cr (" AOT Code Preload Time: %7.3f s", _t_totalPreload.seconds());
4004 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
4005 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
4006 st->print_cr (" find AOT code entry: %7.3f s", _t_totalFind.seconds());
4007 }
4008 if (is_dumping_code()) {
4009 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
4010 }
4011 }
4012
4013 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
4014 AOTCodeStats result;
4015 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4016 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
4017 }
4018
4019 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4020 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
4021 }
4022 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
4023 return result;
4024 }
4025
4026 void AOTCodeCache::log_stats_on_exit(AOTCodeStats& stats) {
4027 LogStreamHandle(Debug, aot, codecache, exit) log;
4028 if (log.is_enabled()) {
4029 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4030 log.print_cr(" %s: total=%u", aot_code_entry_kind_name[kind], stats.entry_count(kind));
4031 if (kind == AOTCodeEntry::Nmethod) {
4032 for (uint lvl = CompLevel_simple; lvl < AOTCompLevel_count; lvl++) {
4033 log.print(" Tier %d: total=%u", lvl, stats.nmethod_count(lvl));
4034 if (lvl == AOTCompLevel_count-1) { // AOT Preload
4035 log.print(", has_clinit_barriers=%u", stats.clinit_barriers_count());
4036 }
4037 log.cr();
4038 }
4039 }
4040 }
4041 }
4042 }
4043
4044 static void print_helper1(outputStream* st, const char* name, int count) {
4045 if (count > 0) {
4046 st->print(" %s=%d", name, count);
4047 }
4048 }
4049
4050 void AOTCodeCache::print_statistics_on(outputStream* st) {
4051 AOTCodeCache* cache = open_for_use();
4052 if (cache != nullptr) {
4053 ReadingMark rdmk;
4054 if (rdmk.failed()) {
4055 // Cache is closed, cannot touch anything.
4056 return;
4057 }
4058 AOTCodeStats stats;
4059
4060 uint preload_count = cache->_load_header->preload_entries_count();
4061 AOTCodeEntry* preload_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->preload_entries_offset());
4062 for (uint i = 0; i < preload_count; i++) {
4063 stats.collect_all_stats(&preload_entries[i]);
4064 }
4065
4066 uint count = cache->_load_header->entries_count();
4067 AOTCodeEntry* load_entries = (AOTCodeEntry*)cache->addr(cache->_load_header->entries_offset());
4068 for (uint i = 0; i < count; i++) {
4069 stats.collect_all_stats(&load_entries[i]);
4070 }
4071
4072 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4073 if (stats.entry_count(kind) > 0) {
4074 st->print(" %s:", aot_code_entry_kind_name[kind]);
4075 print_helper1(st, "total", stats.entry_count(kind));
4076 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4077 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4078 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4079 st->cr();
4080 }
4081 if (kind == AOTCodeEntry::Nmethod) {
4082 for (uint lvl = CompLevel_simple; lvl < AOTCompLevel_count; lvl++) {
4083 if (stats.nmethod_count(lvl) > 0) {
4084 st->print(" AOT Code T%d", lvl);
4085 print_helper1(st, "total", stats.nmethod_count(lvl));
4086 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4087 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4088 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4089 if (lvl == AOTCompLevel_count-1) {
4090 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4091 }
4092 st->cr();
4093 }
4094 }
4095 }
4096 }
4097 LogStreamHandle(Debug, aot, codecache, init) log;
4098 if (log.is_enabled()) {
4099 AOTCodeCache::print_unused_entries_on(&log);
4100 }
4101 LogStreamHandle(Trace, aot, codecache) aot_info;
4102 // need a lock to traverse the code cache
4103 if (aot_info.is_enabled()) {
4104 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4105 NMethodIterator iter(NMethodIterator::all);
4106 while (iter.next()) {
4107 nmethod* nm = iter.method();
4108 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4109 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4110 (nm->method()->in_aot_cache() ? 'S' : ' '),
4111 (nm->is_aot() ? 'A' : ' '),
4112 (nm->preloaded() ? 'P' : ' '),
4113 nm->comp_level());
4114 print_helper(nm, &aot_info);
4115 aot_info.print(": ");
4116 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4117 LogStreamHandle(Trace, aot, codecache) aot_debug;
4118 if (aot_debug.is_enabled()) {
4119 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4120 if (mtd != nullptr) {
4121 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4122 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4123 });
4124 }
4125 }
4126 }
4127 }
4128 }
4129 }
4130 }
4131
4132 void AOTCodeEntry::print(outputStream* st) const {
4133 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4134 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4135 (_not_entrant? "not_entrant" : "entrant"),
4136 (_loaded ? ", loaded" : ""),
4137 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4138 (_for_preload ? ", for_preload" : ""));
4139 }
4140
4141 // This is called after initialize() but before init2()
4142 // and _cache is not set yet.
4143 void AOTCodeCache::print_on(outputStream* st) {
4144 if (opened_cache != nullptr && opened_cache->for_use()) {
4145 ReadingMark rdmk;
4146 if (rdmk.failed()) {
4147 // Cache is closed, cannot touch anything.
4148 return;
4149 }
4150
4151 st->print_cr("\nAOT Code Cache Preload entries");
4152
4153 uint preload_count = opened_cache->_load_header->preload_entries_count();
4154 AOTCodeEntry* preload_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->preload_entries_offset());
4155 for (uint i = 0; i < preload_count; i++) {
4156 AOTCodeEntry* entry = &preload_entries[i];
4157
4158 uint entry_position = entry->offset();
4159 uint name_offset = entry->name_offset() + entry_position;
4160 const char* saved_name = opened_cache->addr(name_offset);
4161
4162 st->print_cr("%4u: %10s Id:%u AP%u size=%u '%s' %s%s%s",
4163 i, aot_code_entry_kind_name[entry->kind()], entry->id(), entry->comp_level(),
4164 entry->size(), saved_name,
4165 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4166 entry->is_loaded() ? " loaded" : "",
4167 entry->not_entrant() ? " not_entrant" : "");
4168
4169 st->print_raw(" ");
4170 AOTCodeReader reader(opened_cache, entry, nullptr);
4171 reader.print_on(st);
4172 }
4173
4174 st->print_cr("\nAOT Code Cache entries");
4175
4176 uint count = opened_cache->_load_header->entries_count();
4177 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->search_table_offset()); // [id, index]
4178 AOTCodeEntry* load_entries = (AOTCodeEntry*)opened_cache->addr(opened_cache->_load_header->entries_offset());
4179
4180 for (uint i = 0; i < count; i++) {
4181 int index = search_entries[2*i + 1];
4182 AOTCodeEntry* entry = &(load_entries[index]);
4183
4184 uint entry_position = entry->offset();
4185 uint name_offset = entry->name_offset() + entry_position;
4186 const char* saved_name = opened_cache->addr(name_offset);
4187
4188 st->print_cr("%4u: %10s idx:%4u Id:%u A%u size=%u '%s' %s%s",
4189 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4190 entry->size(), saved_name,
4191 entry->is_loaded() ? " loaded" : "",
4192 entry->not_entrant() ? " not_entrant" : "");
4193
4194 st->print_raw(" ");
4195 AOTCodeReader reader(opened_cache, entry, nullptr);
4196 reader.print_on(st);
4197 }
4198 }
4199 }
4200
4201 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4202 LogStreamHandle(Info, aot, codecache, init) info;
4203 if (info.is_enabled()) {
4204 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4205 if (entry->is_nmethod() && !entry->is_loaded()) {
4206 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4207 if (mtd != nullptr) {
4208 if (mtd->has_holder()) {
4209 if (mtd->holder()->method_holder()->is_initialized()) {
4210 ResourceMark rm;
4211 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4212 if ((uint)ctd->level() == entry->comp_level()) {
4213 if (ctd->init_deps_left_acquire() == 0) {
4214 nmethod* nm = mtd->holder()->code();
4215 if (nm == nullptr) {
4216 if (mtd->holder()->queued_for_compilation()) {
4217 return; // scheduled for compilation
4218 }
4219 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4220 return; // already online compiled and superseded by a more optimal method
4221 }
4222 info.print("AOT Code Cache entry not loaded: ");
4223 ctd->print_on(&info);
4224 info.cr();
4225 }
4226 }
4227 });
4228 } else {
4229 // not yet initialized
4230 }
4231 } else {
4232 info.print("AOT Code Cache entry doesn't have a holder: ");
4233 mtd->print_on(&info);
4234 info.cr();
4235 }
4236 }
4237 }
4238 });
4239 }
4240 }
4241
4242 void AOTCodeReader::print_on(outputStream* st) {
4243 uint entry_position = _entry->offset();
4244 set_read_position(entry_position);
4245
4246 // Read name
4247 uint name_offset = entry_position + _entry->name_offset();
4248 uint name_size = _entry->name_size(); // Includes '/0'
4249 const char* name = addr(name_offset);
4250
4251 st->print_cr(" name: %s", name);
4252 }
4253
|