1 /*
2 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "classfile/javaAssertions.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "code/codeCache.hpp"
35 #include "gc/shared/gcConfig.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/memoryReserver.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/flags/flagSetting.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "utilities/copy.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Runtime1.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
53 #if INCLUDE_G1GC
54 #include "gc/g1/g1BarrierSetRuntime.hpp"
55 #endif
56 #if INCLUDE_SHENANDOAHGC
57 #include "gc/shenandoah/shenandoahRuntime.hpp"
58 #endif
59 #if INCLUDE_ZGC
60 #include "gc/z/zBarrierSetRuntime.hpp"
61 #endif
62
63 #include <sys/stat.h>
64 #include <errno.h>
65
66 const char* aot_code_entry_kind_name[] = {
67 #define DECL_KIND_STRING(kind) XSTR(kind),
68 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
69 #undef DECL_KIND_STRING
70 };
71
72 static void report_load_failure() {
73 if (AbortVMOnAOTCodeFailure) {
74 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
75 }
76 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
77 AOTAdapterCaching = false;
78 AOTStubCaching = false;
79 }
80
81 static void report_store_failure() {
82 if (AbortVMOnAOTCodeFailure) {
83 tty->print_cr("Unable to create AOT Code Cache.");
84 vm_abort(false);
85 }
86 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
87 AOTAdapterCaching = false;
88 AOTStubCaching = false;
89 }
90
91 bool AOTCodeCache::is_dumping_adapter() {
92 return AOTAdapterCaching && is_on_for_dump();
93 }
94
95 bool AOTCodeCache::is_using_adapter() {
96 return AOTAdapterCaching && is_on_for_use();
97 }
98
99 bool AOTCodeCache::is_dumping_stub() {
100 return AOTStubCaching && is_on_for_dump();
101 }
102
103 bool AOTCodeCache::is_using_stub() {
104 return AOTStubCaching && is_on_for_use();
105 }
106
107 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
108 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
109 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
110 // becasue both id and kind are used to find an entry, and that combination should be unique
111 if (kind == AOTCodeEntry::Adapter) {
112 return id;
113 } else if (kind == AOTCodeEntry::SharedBlob) {
114 return id;
115 } else if (kind == AOTCodeEntry::C1Blob) {
116 return (int)SharedStubId::NUM_STUBIDS + id;
117 } else {
118 // kind must be AOTCodeEntry::C2Blob
119 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
120 }
121 }
122
123 static uint _max_aot_code_size = 0;
124 uint AOTCodeCache::max_aot_code_size() {
125 return _max_aot_code_size;
126 }
127
128 void AOTCodeCache::initialize() {
129 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
130 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
131 AOTAdapterCaching = false;
132 AOTStubCaching = false;
133 return;
134 #else
135 if (FLAG_IS_DEFAULT(AOTCache)) {
136 log_info(aot, codecache, init)("AOT Code Cache is not used: AOTCache is not specified.");
137 AOTAdapterCaching = false;
138 AOTStubCaching = false;
139 return; // AOTCache must be specified to dump and use AOT code
140 }
141
142 // Disable stubs caching until JDK-8357398 is fixed.
143 FLAG_SET_ERGO(AOTStubCaching, false);
144
145 if (VerifyOops) {
146 // Disable AOT stubs caching when VerifyOops flag is on.
147 // Verify oops code generated a lot of C strings which overflow
148 // AOT C string table (which has fixed size).
149 // AOT C string table will be reworked later to handle such cases.
150 //
151 // Note: AOT adapters are not affected - they don't have oop operations.
152 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
153 FLAG_SET_ERGO(AOTStubCaching, false);
154 }
155
156 bool is_dumping = false;
157 bool is_using = false;
158 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
159 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
160 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
161 is_dumping = true;
162 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
163 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
164 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
165 is_using = true;
166 } else {
167 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
168 return; // nothing to do
169 }
170 if (!AOTAdapterCaching && !AOTStubCaching) {
171 return; // AOT code caching disabled on command line
172 }
173 _max_aot_code_size = AOTCodeMaxSize;
174 if (!FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
175 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
176 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
177 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
178 }
179 }
180 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
181 if (is_using && aot_code_size == 0) {
182 log_info(aot, codecache, init)("AOT Code Cache is empty");
183 return;
184 }
185 if (!open_cache(is_dumping, is_using)) {
186 if (is_using) {
187 report_load_failure();
188 } else {
189 report_store_failure();
190 }
191 return;
192 }
193 if (is_dumping) {
194 FLAG_SET_DEFAULT(ForceUnreachable, true);
195 }
196 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
197 #endif // defined(AMD64) || defined(AARCH64)
198 }
199
200 void AOTCodeCache::init2() {
201 if (!is_on()) {
202 return;
203 }
204 if (!verify_vm_config()) {
205 close();
206 report_load_failure();
207 }
208
209 // initialize the table of external routines so we can save
210 // generated code blobs that reference them
211 init_extrs_table();
212 init_early_stubs_table();
213 }
214
215 AOTCodeCache* AOTCodeCache::_cache = nullptr;
216
217 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
218 AOTCodeCache* cache = new AOTCodeCache(is_dumping, is_using);
219 if (cache->failed()) {
220 delete cache;
221 _cache = nullptr;
222 return false;
223 }
224 _cache = cache;
225 return true;
226 }
227
228 void AOTCodeCache::close() {
229 if (is_on()) {
230 delete _cache; // Free memory
231 _cache = nullptr;
232 }
233 }
234
235 #define DATA_ALIGNMENT HeapWordSize
236
237 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
238 _load_header(nullptr),
239 _load_buffer(nullptr),
240 _store_buffer(nullptr),
241 _C_store_buffer(nullptr),
242 _write_position(0),
243 _load_size(0),
244 _store_size(0),
245 _for_use(is_using),
246 _for_dump(is_dumping),
247 _closing(false),
248 _failed(false),
249 _lookup_failed(false),
250 _table(nullptr),
251 _load_entries(nullptr),
252 _search_entries(nullptr),
253 _store_entries(nullptr),
254 _C_strings_buf(nullptr),
255 _store_entries_cnt(0)
256 {
257 // Read header at the begining of cache
258 if (_for_use) {
259 // Read cache
260 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
261 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
262 if (!rs.is_reserved()) {
263 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
264 set_failed();
265 return;
266 }
267 if (!AOTCacheAccess::map_aot_code_region(rs)) {
268 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
269 set_failed();
270 return;
271 }
272
273 _load_size = (uint)load_size;
274 _load_buffer = (char*)rs.base();
275 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
276 log_debug(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " at AOT Code Cache", _load_size, p2i(_load_buffer));
277
278 _load_header = (Header*)addr(0);
279 if (!_load_header->verify_config(_load_size)) {
280 set_failed();
281 return;
282 }
283 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
284 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
285 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
286 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
287 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
288 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
289
290 // Read strings
291 load_strings();
292 }
293 if (_for_dump) {
294 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
295 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
296 // Entries allocated at the end of buffer in reverse (as on stack).
297 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
298 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
299 }
300 _table = new AOTCodeAddressTable();
301 }
302
303 void AOTCodeCache::init_extrs_table() {
304 AOTCodeAddressTable* table = addr_table();
305 if (table != nullptr) {
306 table->init_extrs();
307 }
308 }
309
310 void AOTCodeCache::init_early_stubs_table() {
311 AOTCodeAddressTable* table = addr_table();
312 if (table != nullptr) {
313 table->init_early_stubs();
314 }
315 }
316
317 void AOTCodeCache::init_shared_blobs_table() {
318 AOTCodeAddressTable* table = addr_table();
319 if (table != nullptr) {
320 table->init_shared_blobs();
321 }
322 }
323
324 void AOTCodeCache::init_early_c1_table() {
325 AOTCodeAddressTable* table = addr_table();
326 if (table != nullptr) {
327 table->init_early_c1();
328 }
329 }
330
331 AOTCodeCache::~AOTCodeCache() {
332 if (_closing) {
333 return; // Already closed
334 }
335 // Stop any further access to cache.
336 _closing = true;
337
338 MutexLocker ml(Compile_lock);
339 if (for_dump()) { // Finalize cache
340 finish_write();
341 }
342 _load_buffer = nullptr;
343 if (_C_store_buffer != nullptr) {
344 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
345 _C_store_buffer = nullptr;
346 _store_buffer = nullptr;
347 }
348 if (_table != nullptr) {
349 delete _table;
350 _table = nullptr;
351 }
352 }
353
354 void AOTCodeCache::Config::record() {
355 _flags = 0;
356 #ifdef ASSERT
357 _flags |= debugVM;
358 #endif
359 if (UseCompressedOops) {
360 _flags |= compressedOops;
361 }
362 if (UseCompressedClassPointers) {
363 _flags |= compressedClassPointers;
364 }
365 if (UseTLAB) {
366 _flags |= useTLAB;
367 }
368 if (JavaAssertions::systemClassDefault()) {
369 _flags |= systemClassAssertions;
370 }
371 if (JavaAssertions::userClassDefault()) {
372 _flags |= userClassAssertions;
373 }
374 if (EnableContended) {
375 _flags |= enableContendedPadding;
376 }
377 if (RestrictContended) {
378 _flags |= restrictContendedPadding;
379 }
380 _compressedOopShift = CompressedOops::shift();
381 _compressedOopBase = CompressedOops::base();
382 _compressedKlassShift = CompressedKlassPointers::shift();
383 _contendedPaddingWidth = ContendedPaddingWidth;
384 _objectAlignment = ObjectAlignmentInBytes;
385 _gc = (uint)Universe::heap()->kind();
386 }
387
388 bool AOTCodeCache::Config::verify() const {
389 #ifdef ASSERT
390 if ((_flags & debugVM) == 0) {
391 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
392 return false;
393 }
394 #else
395 if ((_flags & debugVM) != 0) {
396 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
397 return false;
398 }
399 #endif
400
401 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
402 if (aot_gc != Universe::heap()->kind()) {
403 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
404 return false;
405 }
406
407 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
408 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
409 return false;
410 }
411 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
412 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
413 return false;
414 }
415
416 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
417 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s", JavaAssertions::systemClassDefault() ? "disabled" : "enabled");
418 return false;
419 }
420 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
421 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s", JavaAssertions::userClassDefault() ? "disabled" : "enabled");
422 return false;
423 }
424
425 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
426 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s", EnableContended ? "false" : "true");
427 return false;
428 }
429 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
430 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s", RestrictContended ? "false" : "true");
431 return false;
432 }
433 if (_compressedOopShift != (uint)CompressedOops::shift()) {
434 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
435 return false;
436 }
437 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
438 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
439 return false;
440 }
441 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
442 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
443 return false;
444 }
445 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
446 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
447 return false;
448 }
449
450 // This should be the last check as it only disables AOTStubCaching
451 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
452 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
453 AOTStubCaching = false;
454 }
455
456 return true;
457 }
458
459 bool AOTCodeCache::Header::verify_config(uint load_size) const {
460 if (_version != AOT_CODE_VERSION) {
461 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
462 return false;
463 }
464 if (load_size < _cache_size) {
465 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
466 return false;
467 }
468 return true;
469 }
470
471 AOTCodeCache* AOTCodeCache::open_for_use() {
472 if (AOTCodeCache::is_on_for_use()) {
473 return AOTCodeCache::cache();
474 }
475 return nullptr;
476 }
477
478 AOTCodeCache* AOTCodeCache::open_for_dump() {
479 if (AOTCodeCache::is_on_for_dump()) {
480 AOTCodeCache* cache = AOTCodeCache::cache();
481 cache->clear_lookup_failed(); // Reset bit
482 return cache;
483 }
484 return nullptr;
485 }
486
487 void copy_bytes(const char* from, address to, uint size) {
488 assert(size > 0, "sanity");
489 bool by_words = true;
490 if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) {
491 // Use wordwise copies if possible:
492 Copy::disjoint_words((HeapWord*)from,
493 (HeapWord*)to,
494 ((size_t)size + HeapWordSize-1) / HeapWordSize);
495 } else {
496 by_words = false;
497 Copy::conjoint_jbytes(from, to, (size_t)size);
498 }
499 log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
500 }
501
502 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
503 _cache = cache;
504 _entry = entry;
505 _load_buffer = cache->cache_buffer();
506 _read_position = 0;
507 _lookup_failed = false;
508 }
509
510 void AOTCodeReader::set_read_position(uint pos) {
511 if (pos == _read_position) {
512 return;
513 }
514 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
515 _read_position = pos;
516 }
517
518 bool AOTCodeCache::set_write_position(uint pos) {
519 if (pos == _write_position) {
520 return true;
521 }
522 if (_store_size < _write_position) {
523 _store_size = _write_position; // Adjust during write
524 }
525 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
526 _write_position = pos;
569 if (nbytes == 0) {
570 return 0;
571 }
572 uint new_position = _write_position + nbytes;
573 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
574 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
575 nbytes, _write_position);
576 set_failed();
577 report_store_failure();
578 return 0;
579 }
580 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
581 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
582 _write_position += nbytes;
583 if (_store_size < _write_position) {
584 _store_size = _write_position;
585 }
586 return nbytes;
587 }
588
589 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
590 return (void*)(cache->add_entry());
591 }
592
593 static bool check_entry(AOTCodeEntry::Kind kind, uint id, AOTCodeEntry* entry) {
594 if (entry->kind() == kind) {
595 assert(entry->id() == id, "sanity");
596 return true; // Found
597 }
598 return false;
599 }
600
601 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id) {
602 assert(_for_use, "sanity");
603 uint count = _load_header->entries_count();
604 if (_load_entries == nullptr) {
605 // Read it
606 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
607 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
608 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
609 }
610 // Binary search
611 int l = 0;
612 int h = count - 1;
613 while (l <= h) {
614 int mid = (l + h) >> 1;
615 int ix = mid * 2;
616 uint is = _search_entries[ix];
617 if (is == id) {
618 int index = _search_entries[ix + 1];
619 AOTCodeEntry* entry = &(_load_entries[index]);
620 if (check_entry(kind, id, entry)) {
621 return entry; // Found
622 }
623 // Linear search around to handle id collission
624 for (int i = mid - 1; i >= l; i--) { // search back
625 ix = i * 2;
626 is = _search_entries[ix];
627 if (is != id) {
628 break;
629 }
630 index = _search_entries[ix + 1];
631 AOTCodeEntry* entry = &(_load_entries[index]);
632 if (check_entry(kind, id, entry)) {
633 return entry; // Found
634 }
635 }
636 for (int i = mid + 1; i <= h; i++) { // search forward
637 ix = i * 2;
638 is = _search_entries[ix];
639 if (is != id) {
640 break;
641 }
642 index = _search_entries[ix + 1];
643 AOTCodeEntry* entry = &(_load_entries[index]);
644 if (check_entry(kind, id, entry)) {
645 return entry; // Found
646 }
647 }
648 break; // Not found match
649 } else if (is < id) {
650 l = mid + 1;
651 } else {
652 h = mid - 1;
653 }
654 }
655 return nullptr;
656 }
657
658 extern "C" {
659 static int uint_cmp(const void *i, const void *j) {
660 uint a = *(uint *)i;
661 uint b = *(uint *)j;
662 return a > b ? 1 : a < b ? -1 : 0;
663 }
664 }
665
666 bool AOTCodeCache::finish_write() {
667 if (!align_write()) {
668 return false;
669 }
670 uint strings_offset = _write_position;
671 int strings_count = store_strings();
672 if (strings_count < 0) {
673 return false;
674 }
675 if (!align_write()) {
676 return false;
677 }
678 uint strings_size = _write_position - strings_offset;
679
680 uint entries_count = 0; // Number of entrant (useful) code entries
681 uint entries_offset = _write_position;
682
683 uint store_count = _store_entries_cnt;
684 if (store_count > 0) {
685 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
686 uint code_count = store_count;
687 uint search_count = code_count * 2;
688 uint search_size = search_count * sizeof(uint);
689 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
690 // _write_position includes size of code and strings
691 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
692 uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
693 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
694
695 // Create ordered search table for entries [id, index];
696 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
697 // Allocate in AOT Cache buffer
698 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
699 char* start = align_up(buffer, DATA_ALIGNMENT);
700 char* current = start + header_size; // Skip header
701
702 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
703 uint adapters_count = 0;
704 uint shared_blobs_count = 0;
705 uint C1_blobs_count = 0;
706 uint C2_blobs_count = 0;
707 uint max_size = 0;
708 // AOTCodeEntry entries were allocated in reverse in store buffer.
709 // Process them in reverse order to cache first code first.
710 for (int i = store_count - 1; i >= 0; i--) {
711 entries_address[i].set_next(nullptr); // clear pointers before storing data
712 uint size = align_up(entries_address[i].size(), DATA_ALIGNMENT);
713 if (size > max_size) {
714 max_size = size;
715 }
716 copy_bytes((_store_buffer + entries_address[i].offset()), (address)current, size);
717 entries_address[i].set_offset(current - start); // New offset
718 current += size;
719 uint n = write_bytes(&(entries_address[i]), sizeof(AOTCodeEntry));
720 if (n != sizeof(AOTCodeEntry)) {
721 FREE_C_HEAP_ARRAY(uint, search);
722 return false;
723 }
724 search[entries_count*2 + 0] = entries_address[i].id();
725 search[entries_count*2 + 1] = entries_count;
726 entries_count++;
727 AOTCodeEntry::Kind kind = entries_address[i].kind();
728 if (kind == AOTCodeEntry::Adapter) {
729 adapters_count++;
730 } else if (kind == AOTCodeEntry::SharedBlob) {
731 shared_blobs_count++;
732 } else if (kind == AOTCodeEntry::C1Blob) {
733 C1_blobs_count++;
734 } else if (kind == AOTCodeEntry::C2Blob) {
735 C2_blobs_count++;
736 }
737 }
738 if (entries_count == 0) {
739 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
740 FREE_C_HEAP_ARRAY(uint, search);
741 return true; // Nothing to write
742 }
743 assert(entries_count <= store_count, "%d > %d", entries_count, store_count);
744 // Write strings
745 if (strings_count > 0) {
746 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
747 strings_offset = (current - start); // New offset
748 current += strings_size;
749 }
750
751 uint new_entries_offset = (current - start); // New offset
752 // Sort and store search table
753 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
754 search_size = 2 * entries_count * sizeof(uint);
755 copy_bytes((const char*)search, (address)current, search_size);
756 FREE_C_HEAP_ARRAY(uint, search);
757 current += search_size;
758
759 // Write entries
760 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
761 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
762 current += entries_size;
763 uint size = (current - start);
764 assert(size <= total_size, "%d > %d", size , total_size);
765
766 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
767 log_debug(aot, codecache, exit)(" Shared Blobs: total=%d", shared_blobs_count);
768 log_debug(aot, codecache, exit)(" C1 Blobs: total=%d", C1_blobs_count);
769 log_debug(aot, codecache, exit)(" C2 Blobs: total=%d", C2_blobs_count);
770 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
771
772 // Finalize header
773 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
774 header->init(size, (uint)strings_count, strings_offset,
775 entries_count, new_entries_offset,
776 adapters_count, shared_blobs_count,
777 C1_blobs_count, C2_blobs_count);
778
779 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
780 }
781 return true;
782 }
783
784 //------------------Store/Load AOT code ----------------------
785
786 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
787 AOTCodeCache* cache = open_for_dump();
788 if (cache == nullptr) {
789 return false;
790 }
791 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
792
793 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
794 return false;
795 }
796 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
797 return false;
798 }
799 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
800
801 #ifdef ASSERT
802 LogStreamHandle(Trace, aot, codecache, stubs) log;
803 if (log.is_enabled()) {
804 FlagSetting fs(PrintRelocations, true);
805 blob.print_on(&log);
806 }
807 #endif
808 // we need to take a lock to prevent race between compiler threads generating AOT code
809 // and the main thread generating adapter
810 MutexLocker ml(Compile_lock);
811 if (!cache->align_write()) {
812 return false;
813 }
814 uint entry_position = cache->_write_position;
815
816 // Write name
817 uint name_offset = cache->_write_position - entry_position;
818 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
819 uint n = cache->write_bytes(name, name_size);
820 if (n != name_size) {
821 return false;
822 }
823
824 // Write CodeBlob
825 if (!cache->align_write()) {
826 return false;
827 }
828 uint blob_offset = cache->_write_position - entry_position;
829 address archive_buffer = cache->reserve_bytes(blob.size());
830 if (archive_buffer == nullptr) {
831 return false;
832 }
833 CodeBlob::archive_blob(&blob, archive_buffer);
834
835 uint reloc_data_size = blob.relocation_size();
836 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
837 if (n != reloc_data_size) {
838 return false;
839 }
840
841 bool has_oop_maps = false;
842 if (blob.oop_maps() != nullptr) {
843 if (!cache->write_oop_map_set(blob)) {
844 return false;
845 }
846 has_oop_maps = true;
847 }
848
849 #ifndef PRODUCT
850 // Write asm remarks
851 if (!cache->write_asm_remarks(blob)) {
852 return false;
853 }
854 if (!cache->write_dbg_strings(blob)) {
855 return false;
856 }
857 #endif /* PRODUCT */
858
859 if (!cache->write_relocations(blob)) {
860 return false;
861 }
862
863 // Write entries offsets
864 n = cache->write_bytes(&entry_offset_count, sizeof(int));
865 if (n != sizeof(int)) {
866 return false;
867 }
868 for (int i = 0; i < entry_offset_count; i++) {
869 uint32_t off = (uint32_t)entry_offsets[i];
870 n = cache->write_bytes(&off, sizeof(uint32_t));
871 if (n != sizeof(uint32_t)) {
872 return false;
873 }
874 }
875 uint entry_size = cache->_write_position - entry_position;
876 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
877 entry_position, entry_size, name_offset, name_size,
878 blob_offset, has_oop_maps, blob.content_begin());
879 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
882
883 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
884 AOTCodeCache* cache = open_for_use();
885 if (cache == nullptr) {
886 return nullptr;
887 }
888 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
889
890 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
891 return nullptr;
892 }
893 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
894 return nullptr;
895 }
896 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
897
898 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
899 if (entry == nullptr) {
900 return nullptr;
901 }
902 AOTCodeReader reader(cache, entry);
903 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
904
905 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
906 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
907 return blob;
908 }
909
910 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
911 uint entry_position = _entry->offset();
912
913 // Read name
914 uint name_offset = entry_position + _entry->name_offset();
915 uint name_size = _entry->name_size(); // Includes '/0'
916 const char* stored_name = addr(name_offset);
917
918 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
919 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
920 stored_name, name);
921 set_lookup_failed(); // Skip this blob
922 return nullptr;
923 }
924
925 // Read archived code blob
926 uint offset = entry_position + _entry->blob_offset();
927 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
928 offset += archived_blob->size();
929
930 address reloc_data = (address)addr(offset);
931 offset += archived_blob->relocation_size();
932 set_read_position(offset);
933
934 ImmutableOopMapSet* oop_maps = nullptr;
935 if (_entry->has_oop_maps()) {
936 oop_maps = read_oop_map_set();
937 }
938
939 #ifndef PRODUCT
940 AsmRemarks asm_remarks;
941 read_asm_remarks(asm_remarks);
942 DbgStrings dbg_strings;
943 read_dbg_strings(dbg_strings);
944 #endif // PRODUCT
945
946 CodeBlob* code_blob = CodeBlob::create(archived_blob,
947 stored_name,
948 reloc_data,
949 oop_maps
950 #ifndef PRODUCT
951 , asm_remarks
952 , dbg_strings
953 #endif
954 );
955 if (code_blob == nullptr) { // no space left in CodeCache
956 return nullptr;
957 }
958
959 fix_relocations(code_blob);
960
961 // Read entries offsets
962 offset = read_position();
963 int stored_count = *(int*)addr(offset);
964 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count);
965 offset += sizeof(int);
966 set_read_position(offset);
967 for (int i = 0; i < stored_count; i++) {
968 uint32_t off = *(uint32_t*)addr(offset);
969 offset += sizeof(uint32_t);
970 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
971 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
972 stored_name, entry_name, _entry->id(), off);
973 entry_offsets[i] = off;
974 }
975
976 #ifdef ASSERT
977 LogStreamHandle(Trace, aot, codecache, stubs) log;
978 if (log.is_enabled()) {
979 FlagSetting fs(PrintRelocations, true);
980 code_blob->print_on(&log);
981 }
982 #endif
983 return code_blob;
984 }
985
986 // ------------ process code and data --------------
987
988 bool AOTCodeCache::write_relocations(CodeBlob& code_blob) {
989 GrowableArray<uint> reloc_data;
990 RelocIterator iter(&code_blob);
991 LogStreamHandle(Trace, aot, codecache, reloc) log;
992 while (iter.next()) {
993 int idx = reloc_data.append(0); // default value
994 switch (iter.type()) {
995 case relocInfo::none:
996 break;
997 case relocInfo::runtime_call_type: {
998 // Record offset of runtime destination
999 CallRelocation* r = (CallRelocation*)iter.reloc();
1000 address dest = r->destination();
1001 if (dest == r->addr()) { // possible call via trampoline on Aarch64
1002 dest = (address)-1; // do nothing in this case when loading this relocation
1003 }
1004 reloc_data.at_put(idx, _table->id_for_address(dest, iter, &code_blob));
1005 break;
1006 }
1007 case relocInfo::runtime_call_w_cp_type:
1008 fatal("runtime_call_w_cp_type unimplemented");
1009 break;
1010 case relocInfo::external_word_type: {
1011 // Record offset of runtime target
1012 address target = ((external_word_Relocation*)iter.reloc())->target();
1013 reloc_data.at_put(idx, _table->id_for_address(target, iter, &code_blob));
1014 break;
1015 }
1016 case relocInfo::internal_word_type:
1017 break;
1018 case relocInfo::section_word_type:
1019 break;
1020 case relocInfo::post_call_nop_type:
1021 break;
1022 default:
1023 fatal("relocation %d unimplemented", (int)iter.type());
1024 break;
1025 }
1026 if (log.is_enabled()) {
1027 iter.print_current_on(&log);
1028 }
1029 }
1030
1031 // Write additional relocation data: uint per relocation
1032 // Write the count first
1033 int count = reloc_data.length();
1034 write_bytes(&count, sizeof(int));
1035 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
1036 iter != reloc_data.end(); ++iter) {
1037 uint value = *iter;
1038 int n = write_bytes(&value, sizeof(uint));
1039 if (n != sizeof(uint)) {
1040 return false;
1041 }
1042 }
1043 return true;
1044 }
1045
1046 void AOTCodeReader::fix_relocations(CodeBlob* code_blob) {
1047 LogStreamHandle(Trace, aot, reloc) log;
1048 uint offset = read_position();
1049 int count = *(int*)addr(offset);
1050 offset += sizeof(int);
1051 if (log.is_enabled()) {
1052 log.print_cr("======== extra relocations count=%d", count);
1053 }
1054 uint* reloc_data = (uint*)addr(offset);
1055 offset += (count * sizeof(uint));
1056 set_read_position(offset);
1057
1058 RelocIterator iter(code_blob);
1059 int j = 0;
1060 while (iter.next()) {
1061 switch (iter.type()) {
1062 case relocInfo::none:
1063 break;
1064 case relocInfo::runtime_call_type: {
1065 address dest = _cache->address_for_id(reloc_data[j]);
1066 if (dest != (address)-1) {
1067 ((CallRelocation*)iter.reloc())->set_destination(dest);
1068 }
1069 break;
1070 }
1071 case relocInfo::runtime_call_w_cp_type:
1072 fatal("runtime_call_w_cp_type unimplemented");
1073 break;
1074 case relocInfo::external_word_type: {
1075 address target = _cache->address_for_id(reloc_data[j]);
1076 // Add external address to global table
1077 int index = ExternalsRecorder::find_index(target);
1078 // Update index in relocation
1079 Relocation::add_jint(iter.data(), index);
1080 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
1081 assert(reloc->target() == target, "sanity");
1082 reloc->set_value(target); // Patch address in the code
1083 break;
1084 }
1085 case relocInfo::internal_word_type: {
1086 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
1087 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1088 break;
1089 }
1090 case relocInfo::section_word_type: {
1091 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
1092 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
1093 break;
1094 }
1095 case relocInfo::post_call_nop_type:
1096 break;
1097 default:
1098 fatal("relocation %d unimplemented", (int)iter.type());
1099 break;
1100 }
1101 if (log.is_enabled()) {
1102 iter.print_current_on(&log);
1103 }
1104 j++;
1105 }
1106 assert(j == count, "sanity");
1107 }
1108
1109 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
1110 ImmutableOopMapSet* oopmaps = cb.oop_maps();
1111 int oopmaps_size = oopmaps->nr_of_bytes();
1112 if (!write_bytes(&oopmaps_size, sizeof(int))) {
1113 return false;
1114 }
1115 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
1116 if (n != (uint)oopmaps->nr_of_bytes()) {
1117 return false;
1118 }
1119 return true;
1120 }
1121
1122 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
1123 uint offset = read_position();
1124 int size = *(int *)addr(offset);
1125 offset += sizeof(int);
1126 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
1127 offset += size;
1128 set_read_position(offset);
1129 return oopmaps;
1130 }
1131
1132 #ifndef PRODUCT
1133 bool AOTCodeCache::write_asm_remarks(CodeBlob& cb) {
1134 // Write asm remarks
1135 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1136 if (count_ptr == nullptr) {
1137 return false;
1138 }
1139 uint count = 0;
1140 bool result = cb.asm_remarks().iterate([&] (uint offset, const char* str) -> bool {
1141 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
1142 uint n = write_bytes(&offset, sizeof(uint));
1143 if (n != sizeof(uint)) {
1144 return false;
1145 }
1146 const char* cstr = add_C_string(str);
1147 int id = _table->id_for_C_string((address)cstr);
1148 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
1149 n = write_bytes(&id, sizeof(int));
1150 if (n != sizeof(int)) {
1151 return false;
1152 }
1153 count += 1;
1154 return true;
1155 });
1156 *count_ptr = count;
1157 return result;
1158 }
1159
1160 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks) {
1161 // Read asm remarks
1162 uint offset = read_position();
1163 uint count = *(uint *)addr(offset);
1164 offset += sizeof(uint);
1165 for (uint i = 0; i < count; i++) {
1166 uint remark_offset = *(uint *)addr(offset);
1167 offset += sizeof(uint);
1168 int remark_string_id = *(uint *)addr(offset);
1169 offset += sizeof(int);
1170 const char* remark = (const char*)_cache->address_for_C_string(remark_string_id);
1171 asm_remarks.insert(remark_offset, remark);
1172 }
1173 set_read_position(offset);
1174 }
1175
1176 bool AOTCodeCache::write_dbg_strings(CodeBlob& cb) {
1177 // Write dbg strings
1178 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
1179 if (count_ptr == nullptr) {
1180 return false;
1181 }
1182 uint count = 0;
1183 bool result = cb.dbg_strings().iterate([&] (const char* str) -> bool {
1184 log_trace(aot, codecache, stubs)("dbg string=%s", str);
1185 const char* cstr = add_C_string(str);
1186 int id = _table->id_for_C_string((address)cstr);
1187 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
1188 uint n = write_bytes(&id, sizeof(int));
1189 if (n != sizeof(int)) {
1190 return false;
1191 }
1192 count += 1;
1193 return true;
1194 });
1195 *count_ptr = count;
1196 return result;
1197 }
1198
1199 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings) {
1200 // Read dbg strings
1201 uint offset = read_position();
1202 uint count = *(uint *)addr(offset);
1203 offset += sizeof(uint);
1204 for (uint i = 0; i < count; i++) {
1205 int string_id = *(uint *)addr(offset);
1206 offset += sizeof(int);
1207 const char* str = (const char*)_cache->address_for_C_string(string_id);
1208 dbg_strings.insert(str);
1209 }
1210 set_read_position(offset);
1211 }
1212 #endif // PRODUCT
1213
1214 //======================= AOTCodeAddressTable ===============
1215
1216 // address table ids for generated routines, external addresses and C
1217 // string addresses are partitioned into positive integer ranges
1218 // defined by the following positive base and max values
1219 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
1220 // [_blobs_base, _blobs_base + _blobs_max -1],
1221 // ...
1222 // [_c_str_base, _c_str_base + _c_str_max -1],
1223
1224 #define _extrs_max 100
1225 #define _stubs_max 3
1226
1227 #define _shared_blobs_max 20
1228 #define _C1_blobs_max 10
1229 #define _blobs_max (_shared_blobs_max+_C1_blobs_max)
1230 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
1231
1232 #define _extrs_base 0
1233 #define _stubs_base (_extrs_base + _extrs_max)
1234 #define _shared_blobs_base (_stubs_base + _stubs_max)
1235 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
1236 #define _blobs_end (_shared_blobs_base + _blobs_max)
1237
1238 #define SET_ADDRESS(type, addr) \
1239 { \
1240 type##_addr[type##_length++] = (address) (addr); \
1241 assert(type##_length <= type##_max, "increase size"); \
1242 }
1243
1244 static bool initializing_extrs = false;
1245
1246 void AOTCodeAddressTable::init_extrs() {
1247 if (_extrs_complete || initializing_extrs) return; // Done already
1248
1249 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
1250
1251 initializing_extrs = true;
1252 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
1253
1254 _extrs_length = 0;
1255
1256 // Record addresses of VM runtime methods
1257 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
1258 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
1259 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
1260 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
1261 #if defined(AARCH64) && !defined(ZERO)
1262 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
1263 #endif
1264 {
1265 // Required by Shared blobs
1266 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
1267 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
1268 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
1269 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
1270 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
1271 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
1272 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
1273 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
1274 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
1275 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
1276 }
1277
1278 #ifdef COMPILER1
1279 {
1280 // Required by C1 blobs
1281 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
1282 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
1283 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
1284 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1285 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
1286 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
1287 SET_ADDRESS(_extrs, Runtime1::new_instance);
1288 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
1289 SET_ADDRESS(_extrs, Runtime1::new_type_array);
1290 SET_ADDRESS(_extrs, Runtime1::new_object_array);
1291 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
1292 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
1293 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
1294 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
1295 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
1296 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
1297 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
1298 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
1299 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
1300 SET_ADDRESS(_extrs, Runtime1::monitorenter);
1301 SET_ADDRESS(_extrs, Runtime1::monitorexit);
1302 SET_ADDRESS(_extrs, Runtime1::deoptimize);
1303 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
1304 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
1305 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
1306 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
1307 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
1308 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
1309 SET_ADDRESS(_extrs, Thread::current);
1310 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
1311 #ifndef PRODUCT
1312 SET_ADDRESS(_extrs, os::breakpoint);
1313 #endif
1314 }
1315 #endif
1316
1317 #ifdef COMPILER2
1318 {
1319 // Required by C2 blobs
1320 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
1321 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
1322 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
1323 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
1324 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
1325 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
1326 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
1327 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
1328 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
1329 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
1330 #if INCLUDE_JVMTI
1331 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
1332 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
1333 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
1334 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
1335 #endif
1336 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
1337 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
1338 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
1339 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
1340 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
1341 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
1342 #if defined(AARCH64)
1343 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
1344 #endif // AARCH64
1345 }
1346 #endif // COMPILER2
1347
1348 #if INCLUDE_G1GC
1349 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
1350 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
1351 #endif
1352 #if INCLUDE_SHENANDOAHGC
1353 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
1354 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
1355 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1356 #endif
1357 #if INCLUDE_ZGC
1358 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
1359 #if defined(AMD64)
1360 SET_ADDRESS(_extrs, &ZPointerLoadShift);
1361 #endif
1362 #endif
1363 #ifndef ZERO
1364 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
1365 SET_ADDRESS(_extrs, MacroAssembler::debug64);
1366 #endif
1367 #endif // ZERO
1368
1369 _extrs_complete = true;
1370 log_debug(aot, codecache, init)("External addresses recorded");
1371 }
1372
1373 static bool initializing_early_stubs = false;
1374
1375 void AOTCodeAddressTable::init_early_stubs() {
1376 if (_complete || initializing_early_stubs) return; // Done already
1377 initializing_early_stubs = true;
1378 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
1379 _stubs_length = 0;
1380 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
1381
1382 {
1383 // Required by C1 blobs
1384 #if defined(AMD64) && !defined(ZERO)
1385 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
1386 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
1387 #endif // AMD64
1388 }
1389
1390 _early_stubs_complete = true;
1391 log_info(aot, codecache, init)("Early stubs recorded");
1392 }
1393
1394 static bool initializing_shared_blobs = false;
1395
1396 void AOTCodeAddressTable::init_shared_blobs() {
1397 if (_complete || initializing_shared_blobs) return; // Done already
1398 initializing_shared_blobs = true;
1399 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
1400 _shared_blobs_addr = blobs_addr;
1401 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;
1402 _shared_blobs_length = _C1_blobs_length = 0;
1403
1404 // clear the address table
1405 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
1406
1407 // Record addresses of generated code blobs
1408 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
1409 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
1410 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
1411 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
1412 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
1413 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
1414 #if INCLUDE_JVMCI
1415 if (EnableJVMCI) {
1416 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
1417 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
1418 }
1419 #endif
1420
1421 _shared_blobs_complete = true;
1422 log_debug(aot, codecache, init)("Early shared blobs recorded");
1423 _complete = true;
1424 }
1425
1426 void AOTCodeAddressTable::init_early_c1() {
1427 #ifdef COMPILER1
1428 // Runtime1 Blobs
1429 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
1430 C1StubId id = (C1StubId)i;
1431 if (Runtime1::blob_for(id) == nullptr) {
1432 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
1433 continue;
1434 }
1435 if (Runtime1::entry_for(id) == nullptr) {
1436 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
1437 continue;
1438 }
1439 address entry = Runtime1::entry_for(id);
1440 SET_ADDRESS(_C1_blobs, entry);
1441 }
1442 #endif // COMPILER1
1443 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
1444 _early_c1_complete = true;
1445 }
1446
1447 #undef SET_ADDRESS
1448
1449 AOTCodeAddressTable::~AOTCodeAddressTable() {
1450 if (_extrs_addr != nullptr) {
1451 FREE_C_HEAP_ARRAY(address, _extrs_addr);
1452 }
1453 if (_shared_blobs_addr != nullptr) {
1454 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
1455 }
1456 }
1457
1458 #ifdef PRODUCT
1459 #define MAX_STR_COUNT 200
1460 #else
1461 #define MAX_STR_COUNT 500
1462 #endif
1463 #define _c_str_max MAX_STR_COUNT
1464 static const int _c_str_base = _all_max;
1465
1466 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
1467 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
1468 static int _C_strings_count = 0;
1469 static int _C_strings_s[MAX_STR_COUNT] = {0};
1470 static int _C_strings_id[MAX_STR_COUNT] = {0};
1471 static int _C_strings_used = 0;
1472
1484 // still be executed on VM exit after _cache is freed.
1485 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
1486 memcpy(p, addr(strings_offset), strings_size);
1487 _C_strings_buf = p;
1488 assert(strings_count <= MAX_STR_COUNT, "sanity");
1489 for (uint i = 0; i < strings_count; i++) {
1490 _C_strings[i] = p;
1491 uint len = string_lengths[i];
1492 _C_strings_s[i] = i;
1493 _C_strings_id[i] = i;
1494 p += len;
1495 }
1496 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
1497 _C_strings_count = strings_count;
1498 _C_strings_used = strings_count;
1499 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
1500 }
1501
1502 int AOTCodeCache::store_strings() {
1503 if (_C_strings_used > 0) {
1504 uint offset = _write_position;
1505 uint length = 0;
1506 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
1507 if (lengths == nullptr) {
1508 return -1;
1509 }
1510 for (int i = 0; i < _C_strings_used; i++) {
1511 const char* str = _C_strings[_C_strings_s[i]];
1512 uint len = (uint)strlen(str) + 1;
1513 length += len;
1514 assert(len < 1000, "big string: %s", str);
1515 lengths[i] = len;
1516 uint n = write_bytes(str, len);
1517 if (n != len) {
1518 return -1;
1519 }
1520 }
1521 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
1522 _C_strings_used, length, offset);
1523 }
1524 return _C_strings_used;
1525 }
1526
1527 const char* AOTCodeCache::add_C_string(const char* str) {
1528 if (is_on_for_dump() && str != nullptr) {
1529 return _cache->_table->add_C_string(str);
1530 }
1531 return str;
1532 }
1533
1534 const char* AOTCodeAddressTable::add_C_string(const char* str) {
1535 if (_extrs_complete) {
1536 LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock
1537 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1538 // Check previous strings address
1539 for (int i = 0; i < _C_strings_count; i++) {
1540 if (_C_strings_in[i] == str) {
1541 return _C_strings[i]; // Found previous one - return our duplicate
1542 } else if (strcmp(_C_strings[i], str) == 0) {
1543 return _C_strings[i];
1544 }
1545 }
1546 // Add new one
1547 if (_C_strings_count < MAX_STR_COUNT) {
1548 // Passed in string can be freed and used space become inaccessible.
1549 // Keep original address but duplicate string for future compare.
1550 _C_strings_id[_C_strings_count] = -1; // Init
1551 _C_strings_in[_C_strings_count] = str;
1552 const char* dup = os::strdup(str);
1553 _C_strings[_C_strings_count++] = dup;
1554 if (log.is_enabled()) {
1555 log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
1556 }
1557 return dup;
1558 } else {
1559 fatal("Number of C strings >= MAX_STR_COUNT");
1560 }
1561 }
1562 return str;
1563 }
1564
1565 int AOTCodeAddressTable::id_for_C_string(address str) {
1566 if (str == nullptr) {
1567 return -1;
1568 }
1569 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
1570 for (int i = 0; i < _C_strings_count; i++) {
1571 if (_C_strings[i] == (const char*)str) { // found
1572 int id = _C_strings_id[i];
1573 if (id >= 0) {
1574 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
1575 return id; // Found recorded
1576 }
1577 // Not found in recorded, add new
1578 id = _C_strings_used++;
1579 _C_strings_s[id] = i;
1580 _C_strings_id[i] = id;
1581 return id;
1582 }
1583 }
1584 return -1;
1585 }
1586
1587 address AOTCodeAddressTable::address_for_C_string(int idx) {
1588 assert(idx < _C_strings_count, "sanity");
1589 return (address)_C_strings[idx];
1590 }
1591
1592 static int search_address(address addr, address* table, uint length) {
1593 for (int i = 0; i < (int)length; i++) {
1594 if (table[i] == addr) {
1595 return i;
1596 }
1597 }
1598 return -1;
1599 }
1600
1601 address AOTCodeAddressTable::address_for_id(int idx) {
1602 if (!_extrs_complete) {
1603 fatal("AOT Code Cache VM runtime addresses table is not complete");
1604 }
1605 if (idx == -1) {
1606 return (address)-1;
1607 }
1608 uint id = (uint)idx;
1609 // special case for symbols based relative to os::init
1610 if (id > (_c_str_base + _c_str_max)) {
1611 return (address)os::init + idx;
1612 }
1613 if (idx < 0) {
1614 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1615 }
1616 // no need to compare unsigned id against 0
1617 if (/* id >= _extrs_base && */ id < _extrs_length) {
1618 return _extrs_addr[id - _extrs_base];
1619 }
1620 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
1621 return _stubs_addr[id - _stubs_base];
1622 }
1623 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
1624 return _shared_blobs_addr[id - _shared_blobs_base];
1625 }
1626 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
1627 return _C1_blobs_addr[id - _C1_blobs_base];
1628 }
1629 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
1630 return address_for_C_string(id - _c_str_base);
1631 }
1632 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
1633 return nullptr;
1634 }
1635
1636 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* code_blob) {
1637 if (!_extrs_complete) {
1638 fatal("AOT Code Cache VM runtime addresses table is not complete");
1639 }
1640 int id = -1;
1641 if (addr == (address)-1) { // Static call stub has jump to itself
1642 return id;
1643 }
1644 // Seach for C string
1645 id = id_for_C_string(addr);
1646 if (id >= 0) {
1647 return id + _c_str_base;
1648 }
1649 if (StubRoutines::contains(addr)) {
1650 // Search in stubs
1651 id = search_address(addr, _stubs_addr, _stubs_length);
1652 if (id < 0) {
1653 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
1654 if (desc == nullptr) {
1655 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
1656 }
1657 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
1658 fatal("Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
1659 } else {
1660 return id + _stubs_base;
1661 }
1662 } else {
1663 CodeBlob* cb = CodeCache::find_blob(addr);
1664 if (cb != nullptr) {
1665 // Search in code blobs
1666 int id_base = _shared_blobs_base;
1667 id = search_address(addr, _shared_blobs_addr, _blobs_max);
1668 if (id < 0) {
1669 fatal("Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
1670 } else {
1671 return id_base + id;
1672 }
1673 } else {
1674 // Search in runtime functions
1675 id = search_address(addr, _extrs_addr, _extrs_length);
1676 if (id < 0) {
1677 ResourceMark rm;
1678 const int buflen = 1024;
1679 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
1680 int offset = 0;
1681 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
1682 if (offset > 0) {
1683 // Could be address of C string
1684 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
1685 log_debug(aot, codecache)("Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
1686 p2i(addr), dist, (const char*)addr);
1687 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
1688 return dist;
1689 }
1690 reloc.print_current_on(tty);
1691 code_blob->print_on(tty);
1692 code_blob->print_code_on(tty);
1693 fatal("Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
1694 } else {
1695 reloc.print_current_on(tty);
1696 code_blob->print_on(tty);
1697 code_blob->print_code_on(tty);
1698 os::find(addr, tty);
1699 fatal("Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
1700 }
1701 } else {
1702 return _extrs_base + id;
1703 }
1704 }
1705 }
1706 return id;
1707 }
1708
1709 void AOTCodeCache::print_on(outputStream* st) {
1710 AOTCodeCache* cache = open_for_use();
1711 if (cache != nullptr) {
1712 uint count = cache->_load_header->entries_count();
1713 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
1714 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
1715
1716 for (uint i = 0; i < count; i++) {
1717 // Use search_entries[] to order ouput
1718 int index = search_entries[2*i + 1];
1719 AOTCodeEntry* entry = &(load_entries[index]);
1720
1721 uint entry_position = entry->offset();
1722 uint name_offset = entry->name_offset() + entry_position;
1723 const char* saved_name = cache->addr(name_offset);
1724
1725 st->print_cr("%4u: entry_idx:%4u Kind:%u Id:%u size=%u '%s'",
1726 i, index, entry->kind(), entry->id(), entry->size(), saved_name);
1727 }
1728 } else {
1729 st->print_cr("failed to map code cache");
1730 }
1731 }
|
1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "asm/macroAssembler.hpp"
27 #include "cds/aotCacheAccess.hpp"
28 #include "cds/cds_globals.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.hpp"
31 #include "cds/metaspaceShared.hpp"
32 #include "ci/ciConstant.hpp"
33 #include "ci/ciEnv.hpp"
34 #include "ci/ciField.hpp"
35 #include "ci/ciMethod.hpp"
36 #include "ci/ciMethodData.hpp"
37 #include "ci/ciObject.hpp"
38 #include "ci/ciUtilities.inline.hpp"
39 #include "classfile/javaAssertions.hpp"
40 #include "classfile/stringTable.hpp"
41 #include "classfile/symbolTable.hpp"
42 #include "classfile/systemDictionary.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmIntrinsics.hpp"
45 #include "code/aotCodeCache.hpp"
46 #include "code/codeBlob.hpp"
47 #include "code/codeCache.hpp"
48 #include "code/oopRecorder.inline.hpp"
49 #include "compiler/abstractCompiler.hpp"
50 #include "compiler/compilationPolicy.hpp"
51 #include "compiler/compileBroker.hpp"
52 #include "compiler/compileTask.hpp"
53 #include "gc/g1/g1BarrierSetRuntime.hpp"
54 #include "gc/shared/gcConfig.hpp"
55 #include "logging/logStream.hpp"
56 #include "memory/memoryReserver.hpp"
57 #include "memory/universe.hpp"
58 #include "oops/klass.inline.hpp"
59 #include "oops/method.inline.hpp"
60 #include "oops/trainingData.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "runtime/atomic.hpp"
63 #include "runtime/deoptimization.hpp"
64 #include "runtime/flags/flagSetting.hpp"
65 #include "runtime/globals_extension.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/mutexLocker.hpp"
70 #include "runtime/os.inline.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/stubCodeGenerator.hpp"
73 #include "runtime/stubRoutines.hpp"
74 #include "runtime/timerTrace.hpp"
75 #include "runtime/threadIdentifier.hpp"
76 #include "utilities/copy.hpp"
77 #include "utilities/ostream.hpp"
78 #include "utilities/spinYield.hpp"
79 #ifdef COMPILER1
80 #include "c1/c1_Runtime1.hpp"
81 #include "c1/c1_LIRAssembler.hpp"
82 #include "gc/shared/c1/barrierSetC1.hpp"
83 #include "gc/g1/c1/g1BarrierSetC1.hpp"
84 #if INCLUDE_SHENANDOAHGC
85 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
86 #endif // INCLUDE_SHENANDOAHGC
87 #include "gc/z/c1/zBarrierSetC1.hpp"
88 #endif // COMPILER1
89 #ifdef COMPILER2
90 #include "opto/runtime.hpp"
91 #endif
92 #if INCLUDE_JVMCI
93 #include "jvmci/jvmci.hpp"
94 #endif
95 #if INCLUDE_G1GC
96 #include "gc/g1/g1BarrierSetRuntime.hpp"
97 #endif
98 #if INCLUDE_SHENANDOAHGC
99 #include "gc/shenandoah/shenandoahRuntime.hpp"
100 #endif
101 #if INCLUDE_ZGC
102 #include "gc/z/zBarrierSetRuntime.hpp"
103 #endif
104
105 #include <sys/stat.h>
106 #include <errno.h>
107
108 const char* aot_code_entry_kind_name[] = {
109 #define DECL_KIND_STRING(kind) XSTR(kind),
110 DO_AOTCODEENTRY_KIND(DECL_KIND_STRING)
111 #undef DECL_KIND_STRING
112 };
113
114 static elapsedTimer _t_totalLoad;
115 static elapsedTimer _t_totalRegister;
116 static elapsedTimer _t_totalFind;
117 static elapsedTimer _t_totalStore;
118
119 static bool enable_timers() {
120 return CITime || log_is_enabled(Info, init);
121 }
122
123 static void report_load_failure() {
124 if (AbortVMOnAOTCodeFailure) {
125 vm_exit_during_initialization("Unable to use AOT Code Cache.", nullptr);
126 }
127 log_info(aot, codecache, init)("Unable to use AOT Code Cache.");
128 AOTCodeCache::disable_caching();
129 }
130
131 static void report_store_failure() {
132 if (AbortVMOnAOTCodeFailure) {
133 tty->print_cr("Unable to create AOT Code Cache.");
134 vm_abort(false);
135 }
136 log_info(aot, codecache, exit)("Unable to create AOT Code Cache.");
137 AOTCodeCache::disable_caching();
138 }
139
140 // The sequence of AOT code caching flags and parametters settings.
141 //
142 // 1. The initial AOT code caching flags setting is done
143 // during call to CDSConfig::check_vm_args_consistency().
144 //
145 // 2. The earliest AOT code state check done in compilationPolicy_init()
146 // where we set number of compiler threads for AOT assembly phase.
147 //
148 // 3. We determine presence of AOT code in AOT Cache in
149 // MetaspaceShared::open_static_archive() which is calles
150 // after compilationPolicy_init() but before codeCache_init().
151 //
152 // 4. AOTCodeCache::initialize() is called during universe_init()
153 // and does final AOT state and flags settings.
154 //
155 // 5. Finally AOTCodeCache::init2() is called after universe_init()
156 // when all GC settings are finalized.
157
158 // Next methods determine which action we do with AOT code depending
159 // on phase of AOT process: assembly or production.
160
161 bool AOTCodeCache::is_dumping_adapter() {
162 return AOTAdapterCaching && is_on_for_dump();
163 }
164
165 bool AOTCodeCache::is_using_adapter() {
166 return AOTAdapterCaching && is_on_for_use();
167 }
168
169 bool AOTCodeCache::is_dumping_stub() {
170 return AOTStubCaching && is_on_for_dump();
171 }
172
173 bool AOTCodeCache::is_using_stub() {
174 return AOTStubCaching && is_on_for_use();
175 }
176
177 bool AOTCodeCache::is_dumping_code() {
178 return AOTCodeCaching && is_on_for_dump();
179 }
180
181 bool AOTCodeCache::is_using_code() {
182 return AOTCodeCaching && is_on_for_use();
183 }
184
185 // This is used before AOTCodeCahe is initialized
186 // but after AOT (CDS) Cache flags consistency is checked.
187 bool AOTCodeCache::maybe_dumping_code() {
188 return AOTCodeCaching && CDSConfig::is_dumping_final_static_archive();
189 }
190
191 // Next methods could be called regardless of AOT code cache status.
192 // Initially they are called during AOT flags parsing and finilized
193 // in AOTCodeCache::initialize().
194 void AOTCodeCache::enable_caching() {
195 FLAG_SET_ERGO_IF_DEFAULT(AOTCodeCaching, true);
196 FLAG_SET_ERGO_IF_DEFAULT(AOTStubCaching, true);
197 FLAG_SET_ERGO_IF_DEFAULT(AOTAdapterCaching, true);
198 }
199
200 void AOTCodeCache::disable_caching() {
201 FLAG_SET_ERGO(AOTCodeCaching, false);
202 FLAG_SET_ERGO(AOTStubCaching, false);
203 FLAG_SET_ERGO(AOTAdapterCaching, false);
204 }
205
206 bool AOTCodeCache::is_caching_enabled() {
207 return AOTCodeCaching || AOTStubCaching || AOTAdapterCaching;
208 }
209
210 static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) {
211 assert(AOTCodeEntry::is_valid_entry_kind(kind), "invalid AOTCodeEntry kind %d", (int)kind);
212 // There can be a conflict of id between an Adapter and *Blob, but that should not cause any functional issue
213 // becasue both id and kind are used to find an entry, and that combination should be unique
214 if (kind == AOTCodeEntry::Adapter) {
215 return id;
216 } else if (kind == AOTCodeEntry::SharedBlob) {
217 return id;
218 } else if (kind == AOTCodeEntry::C1Blob) {
219 return (int)SharedStubId::NUM_STUBIDS + id;
220 } else {
221 // kind must be AOTCodeEntry::C2Blob
222 return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id;
223 }
224 }
225
226 static uint _max_aot_code_size = 0;
227 uint AOTCodeCache::max_aot_code_size() {
228 return _max_aot_code_size;
229 }
230
231 bool AOTCodeCache::is_C3_on() {
232 #if INCLUDE_JVMCI
233 if (UseJVMCICompiler) {
234 return (AOTCodeCaching) && UseC2asC3;
235 }
236 #endif
237 return false;
238 }
239
240 bool AOTCodeCache::is_code_load_thread_on() {
241 // We cannot trust AOTCodeCache status here, due to bootstrapping circularity.
242 // Compilation policy init runs before AOT cache is fully initialized, so the
243 // normal AOT cache status check would always fail.
244 // See: https://bugs.openjdk.org/browse/JDK-8358690
245 // return UseCodeLoadThread && is_using_code();
246 return UseCodeLoadThread && AOTCodeCaching && CDSConfig::is_using_archive();
247 }
248
249 bool AOTCodeCache::allow_const_field(ciConstant& value) {
250 return !is_on() || !is_dumping_code() // Restrict only when we generate cache
251 // Can not trust primitive too || !is_reference_type(value.basic_type())
252 // May disable this too for now || is_reference_type(value.basic_type()) && value.as_object()->should_be_constant()
253 ;
254 }
255
256 // It is called from MetaspaceShared::initialize_shared_spaces()
257 // which is called from universe_init().
258 // At this point all AOT class linking seetings are finilized
259 // and AOT cache is open so we can map AOT code region.
260 void AOTCodeCache::initialize() {
261 if (!is_caching_enabled()) {
262 log_info(aot, codecache, init)("AOT Code Cache is not used: disabled.");
263 return;
264 }
265 #if defined(ZERO) || !(defined(AMD64) || defined(AARCH64))
266 log_info(aot, codecache, init)("AOT Code Cache is not supported on this platform.");
267 disable_caching();
268 return;
269 #else
270 assert(!FLAG_IS_DEFAULT(AOTCache), "AOTCache should be specified");
271
272 // Disable stubs caching until JDK-8357398 is fixed.
273 FLAG_SET_ERGO(AOTStubCaching, false);
274
275 if (VerifyOops) {
276 // Disable AOT stubs caching when VerifyOops flag is on.
277 // Verify oops code generated a lot of C strings which overflow
278 // AOT C string table (which has fixed size).
279 // AOT C string table will be reworked later to handle such cases.
280 //
281 // Note: AOT adapters are not affected - they don't have oop operations.
282 log_info(aot, codecache, init)("AOT Stubs Caching is not supported with VerifyOops.");
283 FLAG_SET_ERGO(AOTStubCaching, false);
284 }
285
286 bool is_dumping = false;
287 bool is_using = false;
288 if (CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes()) {
289 is_dumping = is_caching_enabled();
290 } else if (CDSConfig::is_using_archive() && CDSConfig::is_using_aot_linked_classes()) {
291 is_using = is_caching_enabled();
292 }
293 if (ClassInitBarrierMode > 0 && !(is_dumping && AOTCodeCaching)) {
294 log_info(aot, codecache, init)("Set ClassInitBarrierMode to 0 because AOT Code dumping is off.");
295 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
296 }
297 if (!(is_dumping || is_using)) {
298 log_info(aot, codecache, init)("AOT Code Cache is not used: AOT Class Linking is not used.");
299 disable_caching();
300 return; // AOT code caching disabled on command line
301 }
302 // Reserve AOT Cache region when we dumping AOT code.
303 _max_aot_code_size = AOTCodeMaxSize;
304 if (is_dumping && !FLAG_IS_DEFAULT(AOTCodeMaxSize)) {
305 if (!is_aligned(AOTCodeMaxSize, os::vm_allocation_granularity())) {
306 _max_aot_code_size = align_up(AOTCodeMaxSize, os::vm_allocation_granularity());
307 log_debug(aot,codecache,init)("Max AOT Code Cache size is aligned up to %uK", (int)(max_aot_code_size()/K));
308 }
309 }
310 size_t aot_code_size = is_using ? AOTCacheAccess::get_aot_code_region_size() : 0;
311 if (is_using && aot_code_size == 0) {
312 log_info(aot, codecache, init)("AOT Code Cache is empty");
313 disable_caching();
314 return;
315 }
316 if (!open_cache(is_dumping, is_using)) {
317 if (is_using) {
318 report_load_failure();
319 } else {
320 report_store_failure();
321 }
322 return;
323 }
324 if (is_dumping) {
325 FLAG_SET_DEFAULT(FoldStableValues, false);
326 FLAG_SET_DEFAULT(ForceUnreachable, true);
327 }
328 FLAG_SET_DEFAULT(DelayCompilerStubsGeneration, false);
329 #endif // defined(AMD64) || defined(AARCH64)
330 }
331
332 static AOTCodeCache* opened_cache = nullptr; // Use this until we verify the cache
333 AOTCodeCache* AOTCodeCache::_cache = nullptr;
334 DEBUG_ONLY( bool AOTCodeCache::_passed_init2 = false; )
335
336 // It is called after universe_init() when all GC settings are finalized.
337 void AOTCodeCache::init2() {
338 DEBUG_ONLY( _passed_init2 = true; )
339 if (opened_cache == nullptr) {
340 return;
341 }
342 // After Universe initialized
343 BarrierSet* bs = BarrierSet::barrier_set();
344 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
345 address byte_map_base = ci_card_table_address_as<address>();
346 if (is_on_for_dump() && !external_word_Relocation::can_be_relocated(byte_map_base)) {
347 // Bail out since we can't encode card table base address with relocation
348 log_warning(aot, codecache, init)("Can't create AOT Code Cache because card table base address is not relocatable: " INTPTR_FORMAT, p2i(byte_map_base));
349 close();
350 report_load_failure();
351 return;
352 }
353 }
354 if (!opened_cache->verify_config_on_use()) { // Check on AOT code loading
355 delete opened_cache;
356 opened_cache = nullptr;
357 report_load_failure();
358 return;
359 }
360
361 // initialize aot runtime constants as appropriate to this runtime
362 AOTRuntimeConstants::initialize_from_runtime();
363
364 // initialize the table of external routines and initial stubs so we can save
365 // generated code blobs that reference them
366 AOTCodeAddressTable* table = opened_cache->_table;
367 assert(table != nullptr, "should be initialized already");
368 table->init_extrs();
369
370 // Now cache and address table are ready for AOT code generation
371 _cache = opened_cache;
372
373 // Set ClassInitBarrierMode after all checks since it affects code generation
374 if (is_dumping_code()) {
375 FLAG_SET_ERGO_IF_DEFAULT(ClassInitBarrierMode, 1);
376 } else {
377 FLAG_SET_ERGO(ClassInitBarrierMode, 0);
378 }
379 }
380
381 bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
382 opened_cache = new AOTCodeCache(is_dumping, is_using);
383 if (opened_cache->failed()) {
384 delete opened_cache;
385 opened_cache = nullptr;
386 return false;
387 }
388 return true;
389 }
390
391 static void print_helper(nmethod* nm, outputStream* st) {
392 AOTCodeCache::iterate([&](AOTCodeEntry* e) {
393 if (e->method() == nm->method()) {
394 ResourceMark rm;
395 stringStream ss;
396 ss.print("A%s%d", (e->for_preload() ? "P" : ""), e->comp_level());
397 ss.print("[%s%s%s]",
398 (e->is_loaded() ? "L" : ""),
399 (e->load_fail() ? "F" : ""),
400 (e->not_entrant() ? "I" : ""));
401 ss.print("#%d", e->comp_id());
402
403 st->print(" %s", ss.freeze());
404 }
405 });
406 }
407
408 void AOTCodeCache::close() {
409 if (is_on()) {
410 delete _cache; // Free memory
411 _cache = nullptr;
412 opened_cache = nullptr;
413 }
414 }
415
416 class CachedCodeDirectory : public CachedCodeDirectoryInternal {
417 public:
418 uint _aot_code_size;
419 char* _aot_code_data;
420
421 void set_aot_code_data(uint size, char* aot_data) {
422 _aot_code_size = size;
423 AOTCacheAccess::set_pointer(&_aot_code_data, aot_data);
424 }
425
426 static CachedCodeDirectory* create();
427 };
428
429 // Storing AOT code in the cached code region of AOT Cache:
430 //
431 // [1] Use CachedCodeDirectory to keep track of all of data related to cached code.
432 // E.g., you can build a hashtable to record what methods have been archived.
433 //
434 // [2] Memory for all data for cached code, including CachedCodeDirectory, should be
435 // allocated using AOTCacheAccess::allocate_aot_code_region().
436 //
437 // [3] CachedCodeDirectory must be the very first allocation.
438 //
439 // [4] Two kinds of pointer can be stored:
440 // - A pointer p that points to metadata. AOTCacheAccess::can_generate_aot_code(p) must return true.
441 // - A pointer to a buffer returned by AOTCacheAccess::allocate_aot_code_region().
442 // (It's OK to point to an interior location within this buffer).
443 // Such pointers must be stored using AOTCacheAccess::set_pointer()
444 //
445 // The buffers allocated by AOTCacheAccess::allocate_aot_code_region() are in a contiguous region. At runtime, this
446 // region is mapped to the process address space. All the pointers in this buffer are relocated as necessary
447 // (e.g., to account for the runtime location of the CodeCache).
448 //
449 // This is always at the very beginning of the mmaped CDS "cc" (cached code) region
450 static CachedCodeDirectory* _aot_code_directory = nullptr;
451
452 CachedCodeDirectory* CachedCodeDirectory::create() {
453 assert(AOTCacheAccess::is_aot_code_region_empty(), "must be");
454 CachedCodeDirectory* dir = (CachedCodeDirectory*)AOTCacheAccess::allocate_aot_code_region(sizeof(CachedCodeDirectory));
455 dir->dumptime_init_internal();
456 return dir;
457 }
458
459 #define DATA_ALIGNMENT HeapWordSize
460
461 AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
462 _load_header(nullptr),
463 _load_buffer(nullptr),
464 _store_buffer(nullptr),
465 _C_store_buffer(nullptr),
466 _write_position(0),
467 _load_size(0),
468 _store_size(0),
469 _for_use(is_using),
470 _for_dump(is_dumping),
471 _closing(false),
472 _failed(false),
473 _lookup_failed(false),
474 _for_preload(false),
475 _has_clinit_barriers(false),
476 _table(nullptr),
477 _load_entries(nullptr),
478 _search_entries(nullptr),
479 _store_entries(nullptr),
480 _C_strings_buf(nullptr),
481 _store_entries_cnt(0),
482 _compile_id(0),
483 _comp_level(0)
484 {
485 // Read header at the begining of cache
486 if (_for_use) {
487 // Read cache
488 size_t load_size = AOTCacheAccess::get_aot_code_region_size();
489 ReservedSpace rs = MemoryReserver::reserve(load_size, mtCode);
490 if (!rs.is_reserved()) {
491 log_warning(aot, codecache, init)("Failed to reserved %u bytes of memory for mapping AOT code region into AOT Code Cache", (uint)load_size);
492 set_failed();
493 return;
494 }
495 if (!AOTCacheAccess::map_aot_code_region(rs)) {
496 log_warning(aot, codecache, init)("Failed to read/mmap cached code region into AOT Code Cache");
497 set_failed();
498 return;
499 }
500 _aot_code_directory = (CachedCodeDirectory*)rs.base();
501 _aot_code_directory->runtime_init_internal();
502
503 _load_size = _aot_code_directory->_aot_code_size;
504 _load_buffer = _aot_code_directory->_aot_code_data;
505 assert(is_aligned(_load_buffer, DATA_ALIGNMENT), "load_buffer is not aligned");
506 log_info(aot, codecache, init)("Mapped %u bytes at address " INTPTR_FORMAT " from AOT Code Cache", _load_size, p2i(_load_buffer));
507
508 _load_header = (Header*)addr(0);
509 if (!_load_header->verify(_load_size)) {
510 set_failed();
511 return;
512 }
513 log_info (aot, codecache, init)("Loaded %u AOT code entries from AOT Code Cache", _load_header->entries_count());
514 log_debug(aot, codecache, init)(" Adapters: total=%u", _load_header->adapters_count());
515 log_debug(aot, codecache, init)(" Shared Blobs: total=%u", _load_header->shared_blobs_count());
516 log_debug(aot, codecache, init)(" C1 Blobs: total=%u", _load_header->C1_blobs_count());
517 log_debug(aot, codecache, init)(" C2 Blobs: total=%u", _load_header->C2_blobs_count());
518 log_debug(aot, codecache, init)(" Stubs: total=%u", _load_header->stubs_count());
519 log_debug(aot, codecache, init)(" Nmethods: total=%u", _load_header->nmethods_count());
520 log_debug(aot, codecache, init)(" AOT code cache size: %u bytes", _load_header->cache_size());
521
522 // Read strings
523 load_strings();
524 }
525 if (_for_dump) {
526 _C_store_buffer = NEW_C_HEAP_ARRAY(char, max_aot_code_size() + DATA_ALIGNMENT, mtCode);
527 _store_buffer = align_up(_C_store_buffer, DATA_ALIGNMENT);
528 // Entries allocated at the end of buffer in reverse (as on stack).
529 _store_entries = (AOTCodeEntry*)align_up(_C_store_buffer + max_aot_code_size(), DATA_ALIGNMENT);
530 log_debug(aot, codecache, init)("Allocated store buffer at address " INTPTR_FORMAT " of size %u", p2i(_store_buffer), max_aot_code_size());
531 }
532 _table = new AOTCodeAddressTable();
533 }
534
535 void AOTCodeCache::invalidate(AOTCodeEntry* entry) {
536 // This could be concurent execution
537 if (entry != nullptr && is_on()) { // Request could come after cache is closed.
538 _cache->invalidate_entry(entry);
539 }
540 }
541
542 bool AOTCodeCache::is_loaded(AOTCodeEntry* entry) {
543 if (is_on() && _cache->cache_buffer() != nullptr) {
544 return (uint)((char*)entry - _cache->cache_buffer()) < _cache->load_size();
545 }
546 return false;
547 }
548
549 void AOTCodeCache::init_early_stubs_table() {
550 AOTCodeAddressTable* table = addr_table();
551 if (table != nullptr) {
552 table->init_early_stubs();
553 }
554 }
555
556 void AOTCodeCache::init_shared_blobs_table() {
557 AOTCodeAddressTable* table = addr_table();
558 if (table != nullptr) {
559 table->init_shared_blobs();
560 }
561 }
562
563 void AOTCodeCache::init_stubs_table() {
564 AOTCodeAddressTable* table = addr_table();
565 if (table != nullptr) {
566 table->init_stubs();
567 }
568 }
569
570 void AOTCodeCache::init_early_c1_table() {
571 AOTCodeAddressTable* table = addr_table();
572 if (table != nullptr) {
573 table->init_early_c1();
574 }
575 }
576
577 void AOTCodeCache::init_c1_table() {
578 AOTCodeAddressTable* table = addr_table();
579 if (table != nullptr) {
580 table->init_c1();
581 }
582 }
583
584 void AOTCodeCache::init_c2_table() {
585 AOTCodeAddressTable* table = addr_table();
586 if (table != nullptr) {
587 table->init_c2();
588 }
589 }
590
591 AOTCodeCache::~AOTCodeCache() {
592 if (_closing) {
593 return; // Already closed
594 }
595 // Stop any further access to cache.
596 // Checked on entry to load_nmethod() and store_nmethod().
597 _closing = true;
598 if (_for_use) {
599 // Wait for all load_nmethod() finish.
600 wait_for_no_nmethod_readers();
601 }
602 // Prevent writing code into cache while we are closing it.
603 // This lock held by ciEnv::register_method() which calls store_nmethod().
604 MutexLocker ml(Compile_lock);
605 if (for_dump()) { // Finalize cache
606 finish_write();
607 }
608 _load_buffer = nullptr;
609 if (_C_store_buffer != nullptr) {
610 FREE_C_HEAP_ARRAY(char, _C_store_buffer);
611 _C_store_buffer = nullptr;
612 _store_buffer = nullptr;
613 }
614 if (_table != nullptr) {
615 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
616 delete _table;
617 _table = nullptr;
618 }
619 }
620
621 void AOTCodeCache::Config::record() {
622 _flags = 0;
623 #ifdef ASSERT
624 _flags |= debugVM;
625 #endif
626 if (UseCompressedOops) {
627 _flags |= compressedOops;
628 }
629 if (UseCompressedClassPointers) {
630 _flags |= compressedClassPointers;
631 }
632 if (UseTLAB) {
633 _flags |= useTLAB;
634 }
635 if (JavaAssertions::systemClassDefault()) {
636 _flags |= systemClassAssertions;
637 }
638 if (JavaAssertions::userClassDefault()) {
639 _flags |= userClassAssertions;
640 }
641 if (EnableContended) {
642 _flags |= enableContendedPadding;
643 }
644 if (RestrictContended) {
645 _flags |= restrictContendedPadding;
646 }
647 if (PreserveFramePointer) {
648 _flags |= preserveFramePointer;
649 }
650 _codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
651 _compressedOopShift = CompressedOops::shift();
652 _compressedOopBase = CompressedOops::base();
653 _compressedKlassShift = CompressedKlassPointers::shift();
654 _compressedKlassBase = CompressedKlassPointers::base();
655 _contendedPaddingWidth = ContendedPaddingWidth;
656 _objectAlignment = ObjectAlignmentInBytes;
657 #if !defined(ZERO) && (defined(IA32) || defined(AMD64))
658 _useSSE = UseSSE;
659 _useAVX = UseAVX;
660 #endif
661 _gc = (uint)Universe::heap()->kind();
662 }
663
664 bool AOTCodeCache::Config::verify() const {
665 // First checks affect all cached AOT code
666 #ifdef ASSERT
667 if ((_flags & debugVM) == 0) {
668 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
669 return false;
670 }
671 #else
672 if ((_flags & debugVM) != 0) {
673 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
674 return false;
675 }
676 #endif
677
678 #if !defined(ZERO) && (defined(IA32) || defined(AMD64))
679 if (UseSSE < _useSSE) {
680 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseSSE = %d vs current %d", _useSSE, UseSSE);
681 return false;
682 }
683 if (UseAVX < _useAVX) {
684 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseAVX = %d vs current %d", _useAVX, UseAVX);
685 return false;
686 }
687 #endif
688
689 size_t codeCacheSize = pointer_delta(CodeCache::high_bound(), CodeCache::low_bound(), 1);
690 if (_codeCacheSize != codeCacheSize) {
691 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeCache size = %dKb vs current %dKb", (int)(_codeCacheSize/K), (int)(codeCacheSize/K));
692 return false;
693 }
694
695 CollectedHeap::Name aot_gc = (CollectedHeap::Name)_gc;
696 if (aot_gc != Universe::heap()->kind()) {
697 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different GC: %s vs current %s", GCConfig::hs_err_name(aot_gc), GCConfig::hs_err_name());
698 return false;
699 }
700
701 if (_objectAlignment != (uint)ObjectAlignmentInBytes) {
702 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ObjectAlignmentInBytes = %d vs current %d", _objectAlignment, ObjectAlignmentInBytes);
703 return false;
704 }
705
706 if (((_flags & enableContendedPadding) != 0) != EnableContended) {
707 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableContended = %s vs current %s", (EnableContended ? "false" : "true"), (EnableContended ? "true" : "false"));
708 return false;
709 }
710 if (((_flags & restrictContendedPadding) != 0) != RestrictContended) {
711 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with RestrictContended = %s vs current %s", (RestrictContended ? "false" : "true"), (RestrictContended ? "true" : "false"));
712 return false;
713 }
714 if (_contendedPaddingWidth != (uint)ContendedPaddingWidth) {
715 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ContendedPaddingWidth = %d vs current %d", _contendedPaddingWidth, ContendedPaddingWidth);
716 return false;
717 }
718
719 if (((_flags & preserveFramePointer) != 0) != PreserveFramePointer) {
720 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PreserveFramePointer = %s vs current %s", (PreserveFramePointer ? "false" : "true"), (PreserveFramePointer ? "true" : "false"));
721 return false;
722 }
723
724 if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
725 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s vs current %s", (UseCompressedClassPointers ? "false" : "true"), (UseCompressedClassPointers ? "true" : "false"));
726 return false;
727 }
728 if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
729 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
730 return false;
731 }
732 if ((_compressedKlassBase == nullptr || CompressedKlassPointers::base() == nullptr) && (_compressedKlassBase != CompressedKlassPointers::base())) {
733 log_debug(aot, codecache, init)("AOT Code Cache disabled: incompatible CompressedKlassPointers::base(): %p vs current %p", _compressedKlassBase, CompressedKlassPointers::base());
734 return false;
735 }
736
737 if (((_flags & compressedOops) != 0) != UseCompressedOops) {
738 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s vs current %s", (UseCompressedOops ? "false" : "true"), (UseCompressedOops ? "true" : "false"));
739 return false;
740 }
741 if (_compressedOopShift != (uint)CompressedOops::shift()) {
742 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with different CompressedOops::shift(): %d vs current %d", _compressedOopShift, CompressedOops::shift());
743 return false;
744 }
745 if ((_compressedOopBase == nullptr || CompressedOops::base() == nullptr) && (_compressedOopBase != CompressedOops::base())) {
746 log_debug(aot, codecache, init)("AOTStubCaching is disabled: incompatible CompressedOops::base(): %p vs current %p", _compressedOopBase, CompressedOops::base());
747 return false;
748 }
749
750 // Next affects only AOT nmethod
751 if (((_flags & systemClassAssertions) != 0) != JavaAssertions::systemClassDefault()) {
752 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::systemClassDefault() = %s vs current %s", (JavaAssertions::systemClassDefault() ? "disabled" : "enabled"), (JavaAssertions::systemClassDefault() ? "enabled" : "disabled"));
753 FLAG_SET_ERGO(AOTCodeCaching, false);
754 }
755 if (((_flags & userClassAssertions) != 0) != JavaAssertions::userClassDefault()) {
756 log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with JavaAssertions::userClassDefault() = %s vs current %s", (JavaAssertions::userClassDefault() ? "disabled" : "enabled"), (JavaAssertions::userClassDefault() ? "enabled" : "disabled"));
757 FLAG_SET_ERGO(AOTCodeCaching, false);
758 }
759
760 return true;
761 }
762
763 bool AOTCodeCache::Header::verify(uint load_size) const {
764 if (_version != AOT_CODE_VERSION) {
765 log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);
766 return false;
767 }
768 if (load_size < _cache_size) {
769 log_debug(aot, codecache, init)("AOT Code Cache disabled: AOT Code Cache size %d < %d recorded in AOT Code header", load_size, _cache_size);
770 return false;
771 }
772 return true;
773 }
774
775 volatile int AOTCodeCache::_nmethod_readers = 0;
776
777 AOTCodeCache* AOTCodeCache::open_for_use() {
778 if (AOTCodeCache::is_on_for_use()) {
779 return AOTCodeCache::cache();
780 }
781 return nullptr;
782 }
783
784 AOTCodeCache* AOTCodeCache::open_for_dump() {
785 if (AOTCodeCache::is_on_for_dump()) {
786 AOTCodeCache* cache = AOTCodeCache::cache();
787 cache->clear_lookup_failed(); // Reset bit
788 return cache;
789 }
790 return nullptr;
791 }
792
793 bool AOTCodeCache::is_address_in_aot_cache(address p) {
794 AOTCodeCache* cache = open_for_use();
795 if (cache == nullptr) {
796 return false;
797 }
798 if ((p >= (address)cache->cache_buffer()) &&
799 (p < (address)(cache->cache_buffer() + cache->load_size()))) {
800 return true;
801 }
802 return false;
803 }
804
805 static void copy_bytes(const char* from, address to, uint size) {
806 assert((int)size > 0, "sanity");
807 memcpy(to, from, size);
808 log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
809 }
810
811 AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry, CompileTask* task) {
812 _cache = cache;
813 _entry = entry;
814 _load_buffer = cache->cache_buffer();
815 _read_position = 0;
816 if (task != nullptr) {
817 _compile_id = task->compile_id();
818 _comp_level = task->comp_level();
819 _preload = task->preload();
820 } else {
821 _compile_id = 0;
822 _comp_level = 0;
823 _preload = false;
824 }
825 _lookup_failed = false;
826 }
827
828 void AOTCodeReader::set_read_position(uint pos) {
829 if (pos == _read_position) {
830 return;
831 }
832 assert(pos < _cache->load_size(), "offset:%d >= file size:%d", pos, _cache->load_size());
833 _read_position = pos;
834 }
835
836 bool AOTCodeCache::set_write_position(uint pos) {
837 if (pos == _write_position) {
838 return true;
839 }
840 if (_store_size < _write_position) {
841 _store_size = _write_position; // Adjust during write
842 }
843 assert(pos < _store_size, "offset:%d >= file size:%d", pos, _store_size);
844 _write_position = pos;
887 if (nbytes == 0) {
888 return 0;
889 }
890 uint new_position = _write_position + nbytes;
891 if (new_position >= (uint)((char*)_store_entries - _store_buffer)) {
892 log_warning(aot, codecache)("Failed to write %d bytes at offset %d to AOT Code Cache. Increase AOTCodeMaxSize.",
893 nbytes, _write_position);
894 set_failed();
895 report_store_failure();
896 return 0;
897 }
898 copy_bytes((const char* )buffer, (address)(_store_buffer + _write_position), nbytes);
899 log_trace(aot, codecache)("Wrote %d bytes at offset %d to AOT Code Cache", nbytes, _write_position);
900 _write_position += nbytes;
901 if (_store_size < _write_position) {
902 _store_size = _write_position;
903 }
904 return nbytes;
905 }
906
907 AOTCodeEntry* AOTCodeCache::find_code_entry(const methodHandle& method, uint comp_level) {
908 assert(is_using_code(), "AOT code caching should be enabled");
909 switch (comp_level) {
910 case CompLevel_simple:
911 if ((DisableCachedCode & (1 << 0)) != 0) {
912 return nullptr;
913 }
914 break;
915 case CompLevel_limited_profile:
916 if ((DisableCachedCode & (1 << 1)) != 0) {
917 return nullptr;
918 }
919 break;
920 case CompLevel_full_optimization:
921 if ((DisableCachedCode & (1 << 2)) != 0) {
922 return nullptr;
923 }
924 break;
925
926 default: return nullptr; // Level 1, 2, and 4 only
927 }
928 TraceTime t1("Total time to find AOT code", &_t_totalFind, enable_timers(), false);
929 if (is_on() && _cache->cache_buffer() != nullptr) {
930 ResourceMark rm;
931 const char* target_name = method->name_and_sig_as_C_string();
932 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
933 AOTCodeEntry* entry = _cache->find_entry(AOTCodeEntry::Code, hash, comp_level);
934 if (entry == nullptr) {
935 log_info(aot, codecache, nmethod)("Missing entry for '%s' (comp_level %d, hash: " UINT32_FORMAT_X_0 ")", target_name, (uint)comp_level, hash);
936 #ifdef ASSERT
937 } else {
938 uint name_offset = entry->offset() + entry->name_offset();
939 uint name_size = entry->name_size(); // Includes '/0'
940 const char* name = _cache->cache_buffer() + name_offset;
941 if (strncmp(target_name, name, name_size) != 0) {
942 assert(false, "AOTCodeCache: saved nmethod's name '%s' is different from '%s', hash: " UINT32_FORMAT_X_0, name, target_name, hash);
943 }
944 #endif
945 }
946
947 DirectiveSet* directives = DirectivesStack::getMatchingDirective(method, nullptr);
948 if (directives->IgnorePrecompiledOption) {
949 LogStreamHandle(Info, aot, codecache, compilation) log;
950 if (log.is_enabled()) {
951 log.print("Ignore cached code entry on level %d for ", comp_level);
952 method->print_value_on(&log);
953 }
954 return nullptr;
955 }
956
957 return entry;
958 }
959 return nullptr;
960 }
961
962 void* AOTCodeEntry::operator new(size_t x, AOTCodeCache* cache) {
963 return (void*)(cache->add_entry());
964 }
965
966 static bool check_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level, AOTCodeEntry* entry) {
967 if (entry->kind() == kind) {
968 assert(entry->id() == id, "sanity");
969 if (kind != AOTCodeEntry::Code || (!entry->not_entrant() && !entry->has_clinit_barriers() &&
970 (entry->comp_level() == comp_level))) {
971 return true; // Found
972 }
973 }
974 return false;
975 }
976
977 AOTCodeEntry* AOTCodeCache::find_entry(AOTCodeEntry::Kind kind, uint id, uint comp_level) {
978 assert(_for_use, "sanity");
979 uint count = _load_header->entries_count();
980 if (_load_entries == nullptr) {
981 // Read it
982 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
983 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
984 log_debug(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
985 }
986 // Binary search
987 int l = 0;
988 int h = count - 1;
989 while (l <= h) {
990 int mid = (l + h) >> 1;
991 int ix = mid * 2;
992 uint is = _search_entries[ix];
993 if (is == id) {
994 int index = _search_entries[ix + 1];
995 AOTCodeEntry* entry = &(_load_entries[index]);
996 if (check_entry(kind, id, comp_level, entry)) {
997 return entry; // Found
998 }
999 // Leaner search around
1000 for (int i = mid - 1; i >= l; i--) { // search back
1001 ix = i * 2;
1002 is = _search_entries[ix];
1003 if (is != id) {
1004 break;
1005 }
1006 index = _search_entries[ix + 1];
1007 AOTCodeEntry* entry = &(_load_entries[index]);
1008 if (check_entry(kind, id, comp_level, entry)) {
1009 return entry; // Found
1010 }
1011 }
1012 for (int i = mid + 1; i <= h; i++) { // search forward
1013 ix = i * 2;
1014 is = _search_entries[ix];
1015 if (is != id) {
1016 break;
1017 }
1018 index = _search_entries[ix + 1];
1019 AOTCodeEntry* entry = &(_load_entries[index]);
1020 if (check_entry(kind, id, comp_level, entry)) {
1021 return entry; // Found
1022 }
1023 }
1024 break; // No match found
1025 } else if (is < id) {
1026 l = mid + 1;
1027 } else {
1028 h = mid - 1;
1029 }
1030 }
1031 return nullptr;
1032 }
1033
1034 void AOTCodeCache::invalidate_entry(AOTCodeEntry* entry) {
1035 assert(entry!= nullptr, "all entries should be read already");
1036 if (entry->not_entrant()) {
1037 return; // Someone invalidated it already
1038 }
1039 #ifdef ASSERT
1040 bool found = false;
1041 if (_for_use) {
1042 uint count = _load_header->entries_count();
1043 uint i = 0;
1044 for(; i < count; i++) {
1045 if (entry == &(_load_entries[i])) {
1046 break;
1047 }
1048 }
1049 found = (i < count);
1050 }
1051 if (!found && _for_dump) {
1052 uint count = _store_entries_cnt;
1053 uint i = 0;
1054 for(; i < count; i++) {
1055 if (entry == &(_store_entries[i])) {
1056 break;
1057 }
1058 }
1059 found = (i < count);
1060 }
1061 assert(found, "entry should exist");
1062 #endif
1063 entry->set_not_entrant();
1064 {
1065 uint name_offset = entry->offset() + entry->name_offset();
1066 const char* name;
1067 if (AOTCodeCache::is_loaded(entry)) {
1068 name = _load_buffer + name_offset;
1069 } else {
1070 name = _store_buffer + name_offset;
1071 }
1072 uint level = entry->comp_level();
1073 uint comp_id = entry->comp_id();
1074 bool clinit_brs = entry->has_clinit_barriers();
1075 log_info(aot, codecache, nmethod)("Invalidated entry for '%s' (comp_id %d, comp_level %d, hash: " UINT32_FORMAT_X_0 "%s)",
1076 name, comp_id, level, entry->id(), (clinit_brs ? ", has clinit barriers" : ""));
1077 }
1078 if (entry->next() != nullptr) {
1079 entry = entry->next();
1080 assert(entry->has_clinit_barriers(), "expecting only such entries here");
1081 invalidate_entry(entry);
1082 }
1083 }
1084
1085 void AOTCodeEntry::update_method_for_writing() {
1086 if (_method != nullptr) {
1087 _method_offset = AOTCacheAccess::delta_from_base_address((address)_method);
1088 _method = nullptr;
1089 }
1090 }
1091
1092 static int uint_cmp(const void *i, const void *j) {
1093 uint a = *(uint *)i;
1094 uint b = *(uint *)j;
1095 return a > b ? 1 : a < b ? -1 : 0;
1096 }
1097
1098 bool AOTCodeCache::finish_write() {
1099 if (!align_write()) {
1100 return false;
1101 }
1102 uint strings_offset = _write_position;
1103 int strings_count = store_strings();
1104 if (strings_count < 0) {
1105 return false;
1106 }
1107 if (!align_write()) {
1108 return false;
1109 }
1110 uint strings_size = _write_position - strings_offset;
1111
1112 uint entries_count = 0; // Number of entrant (useful) code entries
1113 uint entries_offset = _write_position;
1114
1115 uint store_count = _store_entries_cnt;
1116 if (store_count > 0) {
1117 _aot_code_directory = CachedCodeDirectory::create();
1118 assert(_aot_code_directory != nullptr, "Sanity check");
1119
1120 uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
1121 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
1122 uint code_count = store_count + load_count;
1123 uint search_count = code_count * 2;
1124 uint search_size = search_count * sizeof(uint);
1125 uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
1126 uint preload_entries_cnt = 0;
1127 uint* preload_entries = NEW_C_HEAP_ARRAY(uint, code_count, mtCode);
1128 uint preload_entries_size = code_count * sizeof(uint);
1129 // _write_position should include code and strings
1130 uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
1131 uint total_size = _write_position + _load_size + header_size +
1132 code_alignment + search_size + preload_entries_size + entries_size;
1133 assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
1134
1135
1136 // Create ordered search table for entries [id, index];
1137 uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
1138 // Allocate in AOT Cache buffer
1139 char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
1140 char* start = align_up(buffer, DATA_ALIGNMENT);
1141 char* current = start + header_size; // Skip header
1142
1143 AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
1144 uint adapters_count = 0;
1145 uint shared_blobs_count = 0;
1146 uint C1_blobs_count = 0;
1147 uint C2_blobs_count = 0;
1148 uint stubs_count = 0;
1149 uint nmethods_count = 0;
1150 uint max_size = 0;
1151 // Add old entries first
1152 if (_for_use && (_load_header != nullptr)) {
1153 for(uint i = 0; i < load_count; i++) {
1154 AOTCodeEntry* entry = &(_load_entries[i]);
1155 if (entry->load_fail()) {
1156 continue;
1157 }
1158 if (entry->not_entrant()) {
1159 log_info(aot, codecache, exit)("Not entrant load entry id: %d, hash: " UINT32_FORMAT_X_0, i, entry->id());
1160 if (entry->for_preload()) {
1161 // Skip not entrant preload code:
1162 // we can't pre-load code which may have failing dependencies.
1163 continue;
1164 }
1165 entry->set_entrant(); // Reset
1166 } else if (entry->for_preload() && entry->method() != nullptr) {
1167 // record entrant first version code for pre-loading
1168 preload_entries[preload_entries_cnt++] = entries_count;
1169 }
1170 {
1171 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1172 if (size > max_size) {
1173 max_size = size;
1174 }
1175 copy_bytes((_load_buffer + entry->offset()), (address)current, size);
1176 entry->set_offset(current - start); // New offset
1177 current += size;
1178 uint n = write_bytes(entry, sizeof(AOTCodeEntry));
1179 if (n != sizeof(AOTCodeEntry)) {
1180 FREE_C_HEAP_ARRAY(uint, search);
1181 return false;
1182 }
1183 search[entries_count*2 + 0] = entry->id();
1184 search[entries_count*2 + 1] = entries_count;
1185 entries_count++;
1186 AOTCodeEntry::Kind kind = entry->kind();
1187 if (kind == AOTCodeEntry::Adapter) {
1188 adapters_count++;
1189 } else if (kind == AOTCodeEntry::SharedBlob) {
1190 shared_blobs_count++;
1191 } else if (kind == AOTCodeEntry::C1Blob) {
1192 C1_blobs_count++;
1193 } else if (kind == AOTCodeEntry::C2Blob) {
1194 C2_blobs_count++;
1195 } else if (kind == AOTCodeEntry::Stub) {
1196 stubs_count++;
1197 } else {
1198 assert(kind == AOTCodeEntry::Code, "sanity");
1199 nmethods_count++;
1200 }
1201 }
1202 }
1203 }
1204 // AOTCodeEntry entries were allocated in reverse in store buffer.
1205 // Process them in reverse order to cache first code first.
1206 for (int i = store_count - 1; i >= 0; i--) {
1207 AOTCodeEntry* entry = &entries_address[i];
1208 if (entry->load_fail()) {
1209 continue;
1210 }
1211 if (entry->not_entrant()) {
1212 log_info(aot, codecache, exit)("Not entrant new entry comp_id: %d, comp_level: %d, hash: " UINT32_FORMAT_X_0 "%s",
1213 entry->comp_id(), entry->comp_level(), entry->id(), (entry->has_clinit_barriers() ? ", has clinit barriers" : ""));
1214 if (entry->for_preload()) {
1215 // Skip not entrant preload code:
1216 // we can't pre-load code which may have failing dependencies.
1217 continue;
1218 }
1219 entry->set_entrant(); // Reset
1220 } else if (entry->for_preload() && entry->method() != nullptr) {
1221 // record entrant first version code for pre-loading
1222 preload_entries[preload_entries_cnt++] = entries_count;
1223 }
1224 {
1225 entry->set_next(nullptr); // clear pointers before storing data
1226 uint size = align_up(entry->size(), DATA_ALIGNMENT);
1227 if (size > max_size) {
1228 max_size = size;
1229 }
1230 copy_bytes((_store_buffer + entry->offset()), (address)current, size);
1231 entry->set_offset(current - start); // New offset
1232 entry->update_method_for_writing();
1233 current += size;
1234 uint n = write_bytes(entry, sizeof(AOTCodeEntry));
1235 if (n != sizeof(AOTCodeEntry)) {
1236 FREE_C_HEAP_ARRAY(uint, search);
1237 return false;
1238 }
1239 search[entries_count*2 + 0] = entry->id();
1240 search[entries_count*2 + 1] = entries_count;
1241 entries_count++;
1242 AOTCodeEntry::Kind kind = entry->kind();
1243 if (kind == AOTCodeEntry::Adapter) {
1244 adapters_count++;
1245 } else if (kind == AOTCodeEntry::SharedBlob) {
1246 shared_blobs_count++;
1247 } else if (kind == AOTCodeEntry::C1Blob) {
1248 C1_blobs_count++;
1249 } else if (kind == AOTCodeEntry::C2Blob) {
1250 C2_blobs_count++;
1251 } else if (kind == AOTCodeEntry::Stub) {
1252 stubs_count++;
1253 } else {
1254 assert(kind == AOTCodeEntry::Code, "sanity");
1255 nmethods_count++;
1256 }
1257 }
1258 }
1259
1260 if (entries_count == 0) {
1261 log_info(aot, codecache, exit)("AOT Code Cache was not created: no entires");
1262 FREE_C_HEAP_ARRAY(uint, search);
1263 return true; // Nothing to write
1264 }
1265 assert(entries_count <= (store_count + load_count), "%d > (%d + %d)", entries_count, store_count, load_count);
1266 // Write strings
1267 if (strings_count > 0) {
1268 copy_bytes((_store_buffer + strings_offset), (address)current, strings_size);
1269 strings_offset = (current - start); // New offset
1270 current += strings_size;
1271 }
1272 uint preload_entries_offset = (current - start);
1273 preload_entries_size = preload_entries_cnt * sizeof(uint);
1274 if (preload_entries_size > 0) {
1275 copy_bytes((const char*)preload_entries, (address)current, preload_entries_size);
1276 current += preload_entries_size;
1277 log_info(aot, codecache, exit)("Wrote %d preload entries to AOT Code Cache", preload_entries_cnt);
1278 }
1279 if (preload_entries != nullptr) {
1280 FREE_C_HEAP_ARRAY(uint, preload_entries);
1281 }
1282
1283 uint new_entries_offset = (current - start); // New offset
1284 // Sort and store search table
1285 qsort(search, entries_count, 2*sizeof(uint), uint_cmp);
1286 search_size = 2 * entries_count * sizeof(uint);
1287 copy_bytes((const char*)search, (address)current, search_size);
1288 FREE_C_HEAP_ARRAY(uint, search);
1289 current += search_size;
1290
1291 // Write entries
1292 entries_size = entries_count * sizeof(AOTCodeEntry); // New size
1293 copy_bytes((_store_buffer + entries_offset), (address)current, entries_size);
1294 current += entries_size;
1295
1296 log_stats_on_exit();
1297
1298 uint size = (current - start);
1299 assert(size <= total_size, "%d > %d", size , total_size);
1300 uint blobs_count = shared_blobs_count + C1_blobs_count + C2_blobs_count;
1301 assert(nmethods_count == (entries_count - (stubs_count + blobs_count + adapters_count)), "sanity");
1302 log_debug(aot, codecache, exit)(" Adapters: total=%u", adapters_count);
1303 log_debug(aot, codecache, exit)(" Shared Blobs: total=%u", shared_blobs_count);
1304 log_debug(aot, codecache, exit)(" C1 Blobs: total=%u", C1_blobs_count);
1305 log_debug(aot, codecache, exit)(" C2 Blobs: total=%u", C2_blobs_count);
1306 log_debug(aot, codecache, exit)(" Stubs: total=%u", stubs_count);
1307 log_debug(aot, codecache, exit)(" Nmethods: total=%u", nmethods_count);
1308 log_debug(aot, codecache, exit)(" AOT code cache size: %u bytes, max entry's size: %u bytes", size, max_size);
1309
1310 // Finalize header
1311 AOTCodeCache::Header* header = (AOTCodeCache::Header*)start;
1312 header->init(size, (uint)strings_count, strings_offset,
1313 entries_count, new_entries_offset,
1314 preload_entries_cnt, preload_entries_offset,
1315 adapters_count, shared_blobs_count,
1316 C1_blobs_count, C2_blobs_count, stubs_count);
1317
1318 log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
1319
1320 _aot_code_directory->set_aot_code_data(size, start);
1321 }
1322 return true;
1323 }
1324
1325 //------------------Store/Load AOT code ----------------------
1326
1327 bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1328 AOTCodeCache* cache = open_for_dump();
1329 if (cache == nullptr) {
1330 return false;
1331 }
1332 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1333
1334 if (AOTCodeEntry::is_adapter(entry_kind) && !is_dumping_adapter()) {
1335 return false;
1336 }
1337 if (AOTCodeEntry::is_blob(entry_kind) && !is_dumping_stub()) {
1338 return false;
1339 }
1340 log_debug(aot, codecache, stubs)("Writing blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1341
1342 #ifdef ASSERT
1343 LogStreamHandle(Trace, aot, codecache, stubs) log;
1344 if (log.is_enabled()) {
1345 FlagSetting fs(PrintRelocations, true);
1346 blob.print_on(&log);
1347 }
1348 #endif
1349 // we need to take a lock to prevent race between compiler threads generating AOT code
1350 // and the main thread generating adapter
1351 MutexLocker ml(Compile_lock);
1352 if (!is_on()) {
1353 return false; // AOT code cache was already dumped and closed.
1354 }
1355 if (!cache->align_write()) {
1356 return false;
1357 }
1358 uint entry_position = cache->_write_position;
1359
1360 // Write name
1361 uint name_offset = cache->_write_position - entry_position;
1362 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1363 uint n = cache->write_bytes(name, name_size);
1364 if (n != name_size) {
1365 return false;
1366 }
1367
1368 // Write CodeBlob
1369 if (!cache->align_write()) {
1370 return false;
1371 }
1372 uint blob_offset = cache->_write_position - entry_position;
1373 address archive_buffer = cache->reserve_bytes(blob.size());
1374 if (archive_buffer == nullptr) {
1375 return false;
1376 }
1377 CodeBlob::archive_blob(&blob, archive_buffer);
1378
1379 uint reloc_data_size = blob.relocation_size();
1380 n = cache->write_bytes((address)blob.relocation_begin(), reloc_data_size);
1381 if (n != reloc_data_size) {
1382 return false;
1383 }
1384
1385 bool has_oop_maps = false;
1386 if (blob.oop_maps() != nullptr) {
1387 if (!cache->write_oop_map_set(blob)) {
1388 return false;
1389 }
1390 has_oop_maps = true;
1391 }
1392
1393 #ifndef PRODUCT
1394 // Write asm remarks
1395 if (!cache->write_asm_remarks(blob.asm_remarks(), /* use_string_table */ true)) {
1396 return false;
1397 }
1398 if (!cache->write_dbg_strings(blob.dbg_strings(), /* use_string_table */ true)) {
1399 return false;
1400 }
1401 #endif /* PRODUCT */
1402
1403 if (!cache->write_relocations(blob)) {
1404 if (!cache->failed()) {
1405 // We may miss an address in AOT table - skip this code blob.
1406 cache->set_write_position(entry_position);
1407 }
1408 return false;
1409 }
1410
1411 // Write entries offsets
1412 n = cache->write_bytes(&entry_offset_count, sizeof(int));
1413 if (n != sizeof(int)) {
1414 return false;
1415 }
1416 for (int i = 0; i < entry_offset_count; i++) {
1417 uint32_t off = (uint32_t)entry_offsets[i];
1418 n = cache->write_bytes(&off, sizeof(uint32_t));
1419 if (n != sizeof(uint32_t)) {
1420 return false;
1421 }
1422 }
1423 uint entry_size = cache->_write_position - entry_position;
1424 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
1425 entry_position, entry_size, name_offset, name_size,
1426 blob_offset, has_oop_maps, blob.content_begin());
1427 log_debug(aot, codecache, stubs)("Wrote code blob '%s' (id=%u, kind=%s) to AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1430
1431 CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) {
1432 AOTCodeCache* cache = open_for_use();
1433 if (cache == nullptr) {
1434 return nullptr;
1435 }
1436 assert(AOTCodeEntry::is_valid_entry_kind(entry_kind), "invalid entry_kind %d", entry_kind);
1437
1438 if (AOTCodeEntry::is_adapter(entry_kind) && !is_using_adapter()) {
1439 return nullptr;
1440 }
1441 if (AOTCodeEntry::is_blob(entry_kind) && !is_using_stub()) {
1442 return nullptr;
1443 }
1444 log_debug(aot, codecache, stubs)("Reading blob '%s' (id=%u, kind=%s) from AOT Code Cache", name, id, aot_code_entry_kind_name[entry_kind]);
1445
1446 AOTCodeEntry* entry = cache->find_entry(entry_kind, encode_id(entry_kind, id));
1447 if (entry == nullptr) {
1448 return nullptr;
1449 }
1450 AOTCodeReader reader(cache, entry, nullptr);
1451 CodeBlob* blob = reader.compile_code_blob(name, entry_offset_count, entry_offsets);
1452
1453 log_debug(aot, codecache, stubs)("%sRead blob '%s' (id=%u, kind=%s) from AOT Code Cache",
1454 (blob == nullptr? "Failed to " : ""), name, id, aot_code_entry_kind_name[entry_kind]);
1455 return blob;
1456 }
1457
1458 CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) {
1459 uint entry_position = _entry->offset();
1460
1461 // Read name
1462 uint name_offset = entry_position + _entry->name_offset();
1463 uint name_size = _entry->name_size(); // Includes '/0'
1464 const char* stored_name = addr(name_offset);
1465
1466 if (strncmp(stored_name, name, (name_size - 1)) != 0) {
1467 log_warning(aot, codecache, stubs)("Saved blob's name '%s' is different from the expected name '%s'",
1468 stored_name, name);
1469 set_lookup_failed(); // Skip this blob
1470 return nullptr;
1471 }
1472
1473 // Read archived code blob
1474 uint offset = entry_position + _entry->code_offset();
1475 CodeBlob* archived_blob = (CodeBlob*)addr(offset);
1476 offset += archived_blob->size();
1477
1478 address reloc_data = (address)addr(offset);
1479 offset += archived_blob->relocation_size();
1480 set_read_position(offset);
1481
1482 ImmutableOopMapSet* oop_maps = nullptr;
1483 if (_entry->has_oop_maps()) {
1484 oop_maps = read_oop_map_set();
1485 }
1486
1487 CodeBlob* code_blob = CodeBlob::create(archived_blob,
1488 stored_name,
1489 reloc_data,
1490 oop_maps
1491 );
1492 if (code_blob == nullptr) { // no space left in CodeCache
1493 return nullptr;
1494 }
1495
1496 #ifndef PRODUCT
1497 code_blob->asm_remarks().init();
1498 read_asm_remarks(code_blob->asm_remarks(), /* use_string_table */ true);
1499 code_blob->dbg_strings().init();
1500 read_dbg_strings(code_blob->dbg_strings(), /* use_string_table */ true);
1501 #endif // PRODUCT
1502
1503 fix_relocations(code_blob);
1504
1505 // Read entries offsets
1506 offset = read_position();
1507 int stored_count = *(int*)addr(offset);
1508 assert(stored_count == entry_offset_count, "entry offset count mismatch, count in AOT code cache=%d, expected=%d", stored_count, entry_offset_count);
1509 offset += sizeof(int);
1510 set_read_position(offset);
1511 for (int i = 0; i < stored_count; i++) {
1512 uint32_t off = *(uint32_t*)addr(offset);
1513 offset += sizeof(uint32_t);
1514 const char* entry_name = (_entry->kind() == AOTCodeEntry::Adapter) ? AdapterHandlerEntry::entry_name(i) : "";
1515 log_trace(aot, codecache, stubs)("Reading adapter '%s:%s' (0x%x) offset: 0x%x from AOT Code Cache",
1516 stored_name, entry_name, _entry->id(), off);
1517 entry_offsets[i] = off;
1518 }
1519
1520 #ifdef ASSERT
1521 LogStreamHandle(Trace, aot, codecache, stubs) log;
1522 if (log.is_enabled()) {
1523 FlagSetting fs(PrintRelocations, true);
1524 code_blob->print_on(&log);
1525 }
1526 #endif
1527 return code_blob;
1528 }
1529
1530 bool AOTCodeCache::store_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1531 if (!is_dumping_stub()) {
1532 return false;
1533 }
1534 AOTCodeCache* cache = open_for_dump();
1535 if (cache == nullptr) {
1536 return false;
1537 }
1538 log_info(aot, codecache, stubs)("Writing stub '%s' id:%d to AOT Code Cache", name, (int)id);
1539 if (!cache->align_write()) {
1540 return false;
1541 }
1542 #ifdef ASSERT
1543 CodeSection* cs = cgen->assembler()->code_section();
1544 if (cs->has_locs()) {
1545 uint reloc_count = cs->locs_count();
1546 tty->print_cr("======== write stubs code section relocations [%d]:", reloc_count);
1547 // Collect additional data
1548 RelocIterator iter(cs);
1549 while (iter.next()) {
1550 switch (iter.type()) {
1551 case relocInfo::none:
1552 break;
1553 default: {
1554 iter.print_current_on(tty);
1555 fatal("stub's relocation %d unimplemented", (int)iter.type());
1556 break;
1557 }
1558 }
1559 }
1560 }
1561 #endif
1562 uint entry_position = cache->_write_position;
1563
1564 // Write code
1565 uint code_offset = 0;
1566 uint code_size = cgen->assembler()->pc() - start;
1567 uint n = cache->write_bytes(start, code_size);
1568 if (n != code_size) {
1569 return false;
1570 }
1571 // Write name
1572 uint name_offset = cache->_write_position - entry_position;
1573 uint name_size = (uint)strlen(name) + 1; // Includes '/0'
1574 n = cache->write_bytes(name, name_size);
1575 if (n != name_size) {
1576 return false;
1577 }
1578 uint entry_size = cache->_write_position - entry_position;
1579 AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_position, entry_size, name_offset, name_size,
1580 code_offset, code_size,
1581 AOTCodeEntry::Stub, (uint32_t)id);
1582 log_info(aot, codecache, stubs)("Wrote stub '%s' id:%d to AOT Code Cache", name, (int)id);
1583 return true;
1584 }
1585
1586 bool AOTCodeCache::load_stub(StubCodeGenerator* cgen, vmIntrinsicID id, const char* name, address start) {
1587 if (!is_using_stub()) {
1588 return false;
1589 }
1590 assert(start == cgen->assembler()->pc(), "wrong buffer");
1591 AOTCodeCache* cache = open_for_use();
1592 if (cache == nullptr) {
1593 return false;
1594 }
1595 AOTCodeEntry* entry = cache->find_entry(AOTCodeEntry::Stub, (uint)id);
1596 if (entry == nullptr) {
1597 return false;
1598 }
1599 uint entry_position = entry->offset();
1600 // Read name
1601 uint name_offset = entry->name_offset() + entry_position;
1602 uint name_size = entry->name_size(); // Includes '/0'
1603 const char* saved_name = cache->addr(name_offset);
1604 if (strncmp(name, saved_name, (name_size - 1)) != 0) {
1605 log_warning(aot, codecache)("Saved stub's name '%s' is different from '%s' for id:%d", saved_name, name, (int)id);
1606 cache->set_failed();
1607 report_load_failure();
1608 return false;
1609 }
1610 log_info(aot, codecache, stubs)("Reading stub '%s' id:%d from AOT Code Cache", name, (int)id);
1611 // Read code
1612 uint code_offset = entry->code_offset() + entry_position;
1613 uint code_size = entry->code_size();
1614 copy_bytes(cache->addr(code_offset), start, code_size);
1615 cgen->assembler()->code_section()->set_end(start + code_size);
1616 log_info(aot, codecache, stubs)("Read stub '%s' id:%d from AOT Code Cache", name, (int)id);
1617 return true;
1618 }
1619
1620 AOTCodeEntry* AOTCodeCache::store_nmethod(nmethod* nm, AbstractCompiler* compiler, bool for_preload) {
1621 if (!is_dumping_code()) {
1622 return nullptr;
1623 }
1624 if (!CDSConfig::is_dumping_aot_code()) {
1625 return nullptr; // The metadata and heap in the CDS image haven't been finalized yet.
1626 }
1627 AOTCodeCache* cache = open_for_dump();
1628 if (cache == nullptr) {
1629 return nullptr; // Cache file is closed
1630 }
1631 if (nm->is_osr_method()) {
1632 return nullptr; // No OSR
1633 }
1634 if (!compiler->is_c1() && !compiler->is_c2()) {
1635 // Only c1 and c2 compilers
1636 return nullptr;
1637 }
1638 int comp_level = nm->comp_level();
1639 if (comp_level == CompLevel_full_profile) {
1640 // Do not cache C1 compiles with full profile i.e. tier3
1641 return nullptr;
1642 }
1643 assert(comp_level == CompLevel_simple || comp_level == CompLevel_limited_profile || comp_level == CompLevel_full_optimization, "must be");
1644
1645 TraceTime t1("Total time to store AOT code", &_t_totalStore, enable_timers(), false);
1646 AOTCodeEntry* entry = nullptr;
1647 entry = cache->write_nmethod(nm, for_preload);
1648 if (entry == nullptr) {
1649 log_info(aot, codecache, nmethod)("%d (L%d): nmethod store attempt failed", nm->compile_id(), comp_level);
1650 }
1651 return entry;
1652 }
1653
1654 AOTCodeEntry* AOTCodeCache::write_nmethod(nmethod* nm, bool for_preload) {
1655 AOTCodeCache* cache = open_for_dump();
1656 assert(cache != nullptr, "sanity check");
1657 assert(!nm->has_clinit_barriers() || (ClassInitBarrierMode > 0), "sanity");
1658 uint comp_id = nm->compile_id();
1659 uint comp_level = nm->comp_level();
1660 Method* method = nm->method();
1661 bool method_in_cds = MetaspaceShared::is_in_shared_metaspace((address)method);
1662 InstanceKlass* holder = method->method_holder();
1663 bool klass_in_cds = holder->is_shared() && !holder->defined_by_other_loaders();
1664 bool builtin_loader = holder->class_loader_data()->is_builtin_class_loader_data();
1665 if (!builtin_loader) {
1666 ResourceMark rm;
1667 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' loaded by custom class loader %s", comp_id, (int)comp_level, method->name_and_sig_as_C_string(), holder->class_loader_data()->loader_name());
1668 return nullptr;
1669 }
1670 if (for_preload && !(method_in_cds && klass_in_cds)) {
1671 ResourceMark rm;
1672 log_info(aot, codecache, nmethod)("%d (L%d): Skip method '%s' for preload: not in CDS", comp_id, (int)comp_level, method->name_and_sig_as_C_string());
1673 return nullptr;
1674 }
1675 assert(!for_preload || method_in_cds, "sanity");
1676 _for_preload = for_preload;
1677 _has_clinit_barriers = nm->has_clinit_barriers();
1678
1679 if (!align_write()) {
1680 return nullptr;
1681 }
1682
1683 uint entry_position = _write_position;
1684
1685 // Write name
1686 uint name_offset = 0;
1687 uint name_size = 0;
1688 uint hash = 0;
1689 uint n;
1690 {
1691 ResourceMark rm;
1692 const char* name = method->name_and_sig_as_C_string();
1693 log_info(aot, codecache, nmethod)("%d (L%d): Writing nmethod '%s' (comp level: %d, %s) to AOT Code Cache",
1694 comp_id, (int)comp_level, name, comp_level,
1695 (nm->has_clinit_barriers() ? ", has clinit barriers" : ""));
1696
1697 LogStreamHandle(Info, aot, codecache, loader) log;
1698 if (log.is_enabled()) {
1699 oop loader = holder->class_loader();
1700 oop domain = holder->protection_domain();
1701 log.print("Holder: ");
1702 holder->print_value_on(&log);
1703 log.print(" loader: ");
1704 if (loader == nullptr) {
1705 log.print("nullptr");
1706 } else {
1707 loader->print_value_on(&log);
1708 }
1709 log.print(" domain: ");
1710 if (domain == nullptr) {
1711 log.print("nullptr");
1712 } else {
1713 domain->print_value_on(&log);
1714 }
1715 log.cr();
1716 }
1717 name_offset = _write_position - entry_position;
1718 name_size = (uint)strlen(name) + 1; // Includes '/0'
1719 n = write_bytes(name, name_size);
1720 if (n != name_size) {
1721 return nullptr;
1722 }
1723 hash = java_lang_String::hash_code((const jbyte*)name, (int)strlen(name));
1724 }
1725
1726 // Write CodeBlob
1727 if (!cache->align_write()) {
1728 return nullptr;
1729 }
1730 uint blob_offset = cache->_write_position - entry_position;
1731 address archive_buffer = cache->reserve_bytes(nm->size());
1732 if (archive_buffer == nullptr) {
1733 return nullptr;
1734 }
1735 CodeBlob::archive_blob(nm, archive_buffer);
1736
1737 uint reloc_data_size = nm->relocation_size();
1738 n = write_bytes((address)nm->relocation_begin(), reloc_data_size);
1739 if (n != reloc_data_size) {
1740 return nullptr;
1741 }
1742
1743 // Write oops and metadata present in the nmethod's data region
1744 if (!write_oops(nm)) {
1745 if (lookup_failed() && !failed()) {
1746 // Skip this method and reposition file
1747 set_write_position(entry_position);
1748 }
1749 return nullptr;
1750 }
1751 if (!write_metadata(nm)) {
1752 if (lookup_failed() && !failed()) {
1753 // Skip this method and reposition file
1754 set_write_position(entry_position);
1755 }
1756 return nullptr;
1757 }
1758
1759 bool has_oop_maps = false;
1760 if (nm->oop_maps() != nullptr) {
1761 if (!cache->write_oop_map_set(*nm)) {
1762 return nullptr;
1763 }
1764 has_oop_maps = true;
1765 }
1766
1767 uint immutable_data_size = nm->immutable_data_size();
1768 n = write_bytes(nm->immutable_data_begin(), immutable_data_size);
1769 if (n != immutable_data_size) {
1770 return nullptr;
1771 }
1772
1773 JavaThread* thread = JavaThread::current();
1774 HandleMark hm(thread);
1775 GrowableArray<Handle> oop_list;
1776 GrowableArray<Metadata*> metadata_list;
1777
1778 nm->create_reloc_immediates_list(thread, oop_list, metadata_list);
1779 if (!write_nmethod_reloc_immediates(oop_list, metadata_list)) {
1780 if (lookup_failed() && !failed()) {
1781 // Skip this method and reposition file
1782 set_write_position(entry_position);
1783 }
1784 return nullptr;
1785 }
1786
1787 if (!write_relocations(*nm, &oop_list, &metadata_list)) {
1788 return nullptr;
1789 }
1790
1791 #ifndef PRODUCT
1792 if (!cache->write_asm_remarks(nm->asm_remarks(), /* use_string_table */ false)) {
1793 return nullptr;
1794 }
1795 if (!cache->write_dbg_strings(nm->dbg_strings(), /* use_string_table */ false)) {
1796 return nullptr;
1797 }
1798 #endif /* PRODUCT */
1799
1800 uint entry_size = _write_position - entry_position;
1801 AOTCodeEntry* entry = new (this) AOTCodeEntry(AOTCodeEntry::Code, hash,
1802 entry_position, entry_size,
1803 name_offset, name_size,
1804 blob_offset, has_oop_maps,
1805 nm->content_begin(), comp_level, comp_id,
1806 nm->has_clinit_barriers(), for_preload);
1807 if (method_in_cds) {
1808 entry->set_method(method);
1809 }
1810 #ifdef ASSERT
1811 if (nm->has_clinit_barriers() || for_preload) {
1812 assert(for_preload, "sanity");
1813 assert(entry->method() != nullptr, "sanity");
1814 }
1815 #endif
1816 {
1817 ResourceMark rm;
1818 const char* name = nm->method()->name_and_sig_as_C_string();
1819 log_info(aot, codecache, nmethod)("%d (L%d): Wrote nmethod '%s'%s to AOT Code Cache",
1820 comp_id, (int)comp_level, name, (for_preload ? " (for preload)" : ""));
1821 }
1822 if (VerifyCachedCode) {
1823 return nullptr;
1824 }
1825 return entry;
1826 }
1827
1828 bool AOTCodeCache::load_nmethod(ciEnv* env, ciMethod* target, int entry_bci, AbstractCompiler* compiler, CompLevel comp_level) {
1829 if (!is_using_code()) {
1830 return false;
1831 }
1832 AOTCodeCache* cache = open_for_use();
1833 if (cache == nullptr) {
1834 return false;
1835 }
1836 assert(entry_bci == InvocationEntryBci, "unexpected entry_bci=%d", entry_bci);
1837 TraceTime t1("Total time to load AOT code", &_t_totalLoad, enable_timers(), false);
1838 CompileTask* task = env->task();
1839 task->mark_aot_load_start(os::elapsed_counter());
1840 AOTCodeEntry* entry = task->aot_code_entry();
1841 bool preload = task->preload();
1842 assert(entry != nullptr, "sanity");
1843 if (log_is_enabled(Info, aot, codecache, nmethod)) {
1844 VM_ENTRY_MARK;
1845 ResourceMark rm;
1846 methodHandle method(THREAD, target->get_Method());
1847 const char* target_name = method->name_and_sig_as_C_string();
1848 uint hash = java_lang_String::hash_code((const jbyte*)target_name, (int)strlen(target_name));
1849 bool clinit_brs = entry->has_clinit_barriers();
1850 log_info(aot, codecache, nmethod)("%d (L%d): %s nmethod '%s' (hash: " UINT32_FORMAT_X_0 "%s)",
1851 task->compile_id(), task->comp_level(), (preload ? "Preloading" : "Reading"),
1852 target_name, hash, (clinit_brs ? ", has clinit barriers" : ""));
1853 }
1854 ReadingMark rdmk;
1855 if (rdmk.failed()) {
1856 // Cache is closed, cannot touch anything.
1857 return false;
1858 }
1859
1860 AOTCodeReader reader(cache, entry, task);
1861 bool success = reader.compile_nmethod(env, target, compiler);
1862 if (success) {
1863 task->set_num_inlined_bytecodes(entry->num_inlined_bytecodes());
1864 } else {
1865 entry->set_load_fail();
1866 }
1867 task->mark_aot_load_finish(os::elapsed_counter());
1868 return success;
1869 }
1870
1871 bool AOTCodeReader::compile_nmethod(ciEnv* env, ciMethod* target, AbstractCompiler* compiler) {
1872 CompileTask* task = env->task();
1873 AOTCodeEntry* aot_code_entry = (AOTCodeEntry*)_entry;
1874 nmethod* nm = nullptr;
1875
1876 uint entry_position = aot_code_entry->offset();
1877 uint archived_nm_offset = entry_position + aot_code_entry->code_offset();
1878 nmethod* archived_nm = (nmethod*)addr(archived_nm_offset);
1879 set_read_position(archived_nm_offset + archived_nm->size());
1880
1881 OopRecorder* oop_recorder = new OopRecorder(env->arena());
1882 env->set_oop_recorder(oop_recorder);
1883
1884 uint offset;
1885
1886 offset = read_position();
1887 address reloc_data = (address)addr(offset);
1888 offset += archived_nm->relocation_size();
1889 set_read_position(offset);
1890
1891 // Read oops and metadata
1892 VM_ENTRY_MARK
1893 GrowableArray<Handle> oop_list;
1894 GrowableArray<Metadata*> metadata_list;
1895
1896 if (!read_oop_metadata_list(THREAD, target, oop_list, metadata_list, oop_recorder)) {
1897 return false;
1898 }
1899
1900 ImmutableOopMapSet* oopmaps = read_oop_map_set();
1901
1902 offset = read_position();
1903 address immutable_data = (address)addr(offset);
1904 offset += archived_nm->immutable_data_size();
1905 set_read_position(offset);
1906
1907 GrowableArray<Handle> reloc_immediate_oop_list;
1908 GrowableArray<Metadata*> reloc_immediate_metadata_list;
1909 if (!read_oop_metadata_list(THREAD, target, reloc_immediate_oop_list, reloc_immediate_metadata_list, nullptr)) {
1910 return false;
1911 }
1912
1913 // Read Dependencies (compressed already)
1914 Dependencies* dependencies = new Dependencies(env);
1915 dependencies->set_content(immutable_data, archived_nm->dependencies_size());
1916 env->set_dependencies(dependencies);
1917
1918 const char* name = addr(entry_position + aot_code_entry->name_offset());
1919
1920 if (VerifyCachedCode) {
1921 return false;
1922 }
1923
1924 TraceTime t1("Total time to register AOT nmethod", &_t_totalRegister, enable_timers(), false);
1925 nm = env->register_aot_method(THREAD,
1926 target,
1927 compiler,
1928 archived_nm,
1929 reloc_data,
1930 oop_list,
1931 metadata_list,
1932 oopmaps,
1933 immutable_data,
1934 reloc_immediate_oop_list,
1935 reloc_immediate_metadata_list,
1936 this);
1937 bool success = task->is_success();
1938 if (success) {
1939 aot_code_entry->set_loaded();
1940 log_info(aot, codecache, nmethod)("%d (L%d): Read nmethod '%s' from AOT Code Cache", compile_id(), comp_level(), name);
1941 #ifdef ASSERT
1942 LogStreamHandle(Debug, aot, codecache, nmethod) log;
1943 if (log.is_enabled()) {
1944 FlagSetting fs(PrintRelocations, true);
1945 nm->print_on(&log);
1946 nm->decode2(&log);
1947 }
1948 #endif
1949 }
1950
1951 return success;
1952 }
1953
1954 bool skip_preload(methodHandle mh) {
1955 if (!mh->method_holder()->is_loaded()) {
1956 return true;
1957 }
1958 DirectiveSet* directives = DirectivesStack::getMatchingDirective(mh, nullptr);
1959 if (directives->DontPreloadOption) {
1960 LogStreamHandle(Info, aot, codecache, init) log;
1961 if (log.is_enabled()) {
1962 log.print("Exclude preloading code for ");
1963 mh->print_value_on(&log);
1964 }
1965 return true;
1966 }
1967 return false;
1968 }
1969
1970 bool AOTCodeCache::gen_preload_code(ciMethod* m, int entry_bci) {
1971 VM_ENTRY_MARK;
1972 return (entry_bci == InvocationEntryBci) && is_dumping_code() && (ClassInitBarrierMode > 0) &&
1973 AOTCacheAccess::can_generate_aot_code(m->get_Method());
1974 }
1975
1976 void AOTCodeCache::preload_code(JavaThread* thread) {
1977 if (!is_using_code()) {
1978 return;
1979 }
1980 if ((DisableCachedCode & (1 << 3)) != 0) {
1981 return; // no preloaded code (level 5);
1982 }
1983 _cache->preload_startup_code(thread);
1984 }
1985
1986 void AOTCodeCache::preload_startup_code(TRAPS) {
1987 if (CompilationPolicy::compiler_count(CompLevel_full_optimization) == 0) {
1988 // Since we reuse the CompilerBroker API to install cached code, we're required to have a JIT compiler for the
1989 // level we want (that is CompLevel_full_optimization).
1990 return;
1991 }
1992 assert(_for_use, "sanity");
1993 uint count = _load_header->entries_count();
1994 if (_load_entries == nullptr) {
1995 // Read it
1996 _search_entries = (uint*)addr(_load_header->entries_offset()); // [id, index]
1997 _load_entries = (AOTCodeEntry*)(_search_entries + 2 * count);
1998 log_info(aot, codecache, init)("Read %d entries table at offset %d from AOT Code Cache", count, _load_header->entries_offset());
1999 }
2000 uint preload_entries_count = _load_header->preload_entries_count();
2001 if (preload_entries_count > 0) {
2002 uint* entries_index = (uint*)addr(_load_header->preload_entries_offset());
2003 log_info(aot, codecache, init)("Load %d preload entries from AOT Code Cache", preload_entries_count);
2004 uint count = MIN2(preload_entries_count, SCLoadStop);
2005 for (uint i = SCLoadStart; i < count; i++) {
2006 uint index = entries_index[i];
2007 AOTCodeEntry* entry = &(_load_entries[index]);
2008 if (entry->not_entrant()) {
2009 continue;
2010 }
2011 Method* m = AOTCacheAccess::convert_offset_to_method(entry->method_offset());
2012 entry->set_method(m);
2013 methodHandle mh(THREAD, entry->method());
2014 assert((mh.not_null() && MetaspaceShared::is_in_shared_metaspace((address)mh())), "sanity");
2015 if (skip_preload(mh)) {
2016 continue; // Exclude preloading for this method
2017 }
2018 assert(mh->method_holder()->is_loaded(), "");
2019 if (!mh->method_holder()->is_linked()) {
2020 assert(!HAS_PENDING_EXCEPTION, "");
2021 mh->method_holder()->link_class(THREAD);
2022 if (HAS_PENDING_EXCEPTION) {
2023 LogStreamHandle(Info, aot, codecache) log;
2024 if (log.is_enabled()) {
2025 ResourceMark rm;
2026 log.print("Linkage failed for %s: ", mh->method_holder()->external_name());
2027 THREAD->pending_exception()->print_value_on(&log);
2028 if (log_is_enabled(Debug, aot, codecache)) {
2029 THREAD->pending_exception()->print_on(&log);
2030 }
2031 }
2032 CLEAR_PENDING_EXCEPTION;
2033 }
2034 }
2035 if (mh->aot_code_entry() != nullptr) {
2036 // Second C2 compilation of the same method could happen for
2037 // different reasons without marking first entry as not entrant.
2038 continue; // Keep old entry to avoid issues
2039 }
2040 mh->set_aot_code_entry(entry);
2041 CompileBroker::compile_method(mh, InvocationEntryBci, CompLevel_full_optimization, 0, false, CompileTask::Reason_Preload, CHECK);
2042 }
2043 }
2044 }
2045
2046 // ------------ process code and data --------------
2047
2048 // Can't use -1. It is valid value for jump to iteself destination
2049 // used by static call stub: see NativeJump::jump_destination().
2050 #define BAD_ADDRESS_ID -2
2051
2052 bool AOTCodeCache::write_relocations(CodeBlob& code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2053 GrowableArray<uint> reloc_data;
2054 RelocIterator iter(&code_blob);
2055 LogStreamHandle(Trace, aot, codecache, reloc) log;
2056 while (iter.next()) {
2057 int idx = reloc_data.append(0); // default value
2058 switch (iter.type()) {
2059 case relocInfo::none:
2060 break;
2061 case relocInfo::oop_type: {
2062 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2063 if (r->oop_is_immediate()) {
2064 assert(oop_list != nullptr, "sanity check");
2065 // store index of oop in the reloc immediate oop list
2066 Handle h(JavaThread::current(), r->oop_value());
2067 int oop_idx = oop_list->find(h);
2068 assert(oop_idx != -1, "sanity check");
2069 reloc_data.at_put(idx, (uint)oop_idx);
2070 }
2071 break;
2072 }
2073 case relocInfo::metadata_type: {
2074 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2075 if (r->metadata_is_immediate()) {
2076 assert(metadata_list != nullptr, "sanity check");
2077 // store index of metadata in the reloc immediate metadata list
2078 int metadata_idx = metadata_list->find(r->metadata_value());
2079 assert(metadata_idx != -1, "sanity check");
2080 reloc_data.at_put(idx, (uint)metadata_idx);
2081 }
2082 break;
2083 }
2084 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2085 case relocInfo::opt_virtual_call_type:
2086 case relocInfo::static_call_type: {
2087 CallRelocation* r = (CallRelocation*)iter.reloc();
2088 address dest = r->destination();
2089 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2090 dest = (address)-1; // do nothing in this case when loading this relocation
2091 }
2092 int id = _table->id_for_address(dest, iter, &code_blob);
2093 if (id == BAD_ADDRESS_ID) {
2094 return false;
2095 }
2096 reloc_data.at_put(idx, id);
2097 break;
2098 }
2099 case relocInfo::trampoline_stub_type: {
2100 address dest = ((trampoline_stub_Relocation*)iter.reloc())->destination();
2101 int id = _table->id_for_address(dest, iter, &code_blob);
2102 if (id == BAD_ADDRESS_ID) {
2103 return false;
2104 }
2105 reloc_data.at_put(idx, id);
2106 break;
2107 }
2108 case relocInfo::static_stub_type:
2109 break;
2110 case relocInfo::runtime_call_type: {
2111 // Record offset of runtime destination
2112 CallRelocation* r = (CallRelocation*)iter.reloc();
2113 address dest = r->destination();
2114 if (dest == r->addr()) { // possible call via trampoline on Aarch64
2115 dest = (address)-1; // do nothing in this case when loading this relocation
2116 }
2117 int id = _table->id_for_address(dest, iter, &code_blob);
2118 if (id == BAD_ADDRESS_ID) {
2119 return false;
2120 }
2121 reloc_data.at_put(idx, id);
2122 break;
2123 }
2124 case relocInfo::runtime_call_w_cp_type:
2125 log_debug(aot, codecache, reloc)("runtime_call_w_cp_type relocation is not implemented");
2126 return false;
2127 case relocInfo::external_word_type: {
2128 // Record offset of runtime target
2129 address target = ((external_word_Relocation*)iter.reloc())->target();
2130 int id = _table->id_for_address(target, iter, &code_blob);
2131 if (id == BAD_ADDRESS_ID) {
2132 return false;
2133 }
2134 reloc_data.at_put(idx, id);
2135 break;
2136 }
2137 case relocInfo::internal_word_type:
2138 break;
2139 case relocInfo::section_word_type:
2140 break;
2141 case relocInfo::poll_type:
2142 break;
2143 case relocInfo::poll_return_type:
2144 break;
2145 case relocInfo::post_call_nop_type:
2146 break;
2147 case relocInfo::entry_guard_type:
2148 break;
2149 default:
2150 log_debug(aot, codecache, reloc)("relocation %d unimplemented", (int)iter.type());
2151 return false;
2152 break;
2153 }
2154 if (log.is_enabled()) {
2155 iter.print_current_on(&log);
2156 }
2157 }
2158
2159 // Write additional relocation data: uint per relocation
2160 // Write the count first
2161 int count = reloc_data.length();
2162 write_bytes(&count, sizeof(int));
2163 for (GrowableArrayIterator<uint> iter = reloc_data.begin();
2164 iter != reloc_data.end(); ++iter) {
2165 uint value = *iter;
2166 int n = write_bytes(&value, sizeof(uint));
2167 if (n != sizeof(uint)) {
2168 return false;
2169 }
2170 }
2171 return true;
2172 }
2173
2174 void AOTCodeReader::fix_relocations(CodeBlob* code_blob, GrowableArray<Handle>* oop_list, GrowableArray<Metadata*>* metadata_list) {
2175 LogStreamHandle(Trace, aot, reloc) log;
2176 uint offset = read_position();
2177 int count = *(int*)addr(offset);
2178 offset += sizeof(int);
2179 if (log.is_enabled()) {
2180 log.print_cr("======== extra relocations count=%d", count);
2181 }
2182 uint* reloc_data = (uint*)addr(offset);
2183 offset += (count * sizeof(uint));
2184 set_read_position(offset);
2185
2186 RelocIterator iter(code_blob);
2187 int j = 0;
2188 while (iter.next()) {
2189 switch (iter.type()) {
2190 case relocInfo::none:
2191 break;
2192 case relocInfo::oop_type: {
2193 assert(code_blob->is_nmethod(), "sanity check");
2194 oop_Relocation* r = (oop_Relocation*)iter.reloc();
2195 if (r->oop_is_immediate()) {
2196 assert(oop_list != nullptr, "sanity check");
2197 Handle h = oop_list->at(reloc_data[j]);
2198 r->set_value(cast_from_oop<address>(h()));
2199 } else {
2200 r->fix_oop_relocation();
2201 }
2202 break;
2203 }
2204 case relocInfo::metadata_type: {
2205 assert(code_blob->is_nmethod(), "sanity check");
2206 metadata_Relocation* r = (metadata_Relocation*)iter.reloc();
2207 Metadata* m;
2208 if (r->metadata_is_immediate()) {
2209 assert(metadata_list != nullptr, "sanity check");
2210 m = metadata_list->at(reloc_data[j]);
2211 } else {
2212 // Get already updated value from nmethod.
2213 int index = r->metadata_index();
2214 m = code_blob->as_nmethod()->metadata_at(index);
2215 }
2216 r->set_value((address)m);
2217 break;
2218 }
2219 case relocInfo::virtual_call_type: // Fall through. They all call resolve_*_call blobs.
2220 case relocInfo::opt_virtual_call_type:
2221 case relocInfo::static_call_type: {
2222 address dest = _cache->address_for_id(reloc_data[j]);
2223 if (dest != (address)-1) {
2224 ((CallRelocation*)iter.reloc())->set_destination(dest);
2225 }
2226 break;
2227 }
2228 case relocInfo::trampoline_stub_type: {
2229 address dest = _cache->address_for_id(reloc_data[j]);
2230 if (dest != (address)-1) {
2231 ((trampoline_stub_Relocation*)iter.reloc())->set_destination(dest);
2232 }
2233 break;
2234 }
2235 case relocInfo::static_stub_type:
2236 break;
2237 case relocInfo::runtime_call_type: {
2238 address dest = _cache->address_for_id(reloc_data[j]);
2239 if (dest != (address)-1) {
2240 ((CallRelocation*)iter.reloc())->set_destination(dest);
2241 }
2242 break;
2243 }
2244 case relocInfo::runtime_call_w_cp_type:
2245 // this relocation should not be in cache (see write_relocations)
2246 assert(false, "runtime_call_w_cp_type relocation is not implemented");
2247 break;
2248 case relocInfo::external_word_type: {
2249 address target = _cache->address_for_id(reloc_data[j]);
2250 // Add external address to global table
2251 int index = ExternalsRecorder::find_index(target);
2252 // Update index in relocation
2253 Relocation::add_jint(iter.data(), index);
2254 external_word_Relocation* reloc = (external_word_Relocation*)iter.reloc();
2255 assert(reloc->target() == target, "sanity");
2256 reloc->set_value(target); // Patch address in the code
2257 break;
2258 }
2259 case relocInfo::internal_word_type: {
2260 internal_word_Relocation* r = (internal_word_Relocation*)iter.reloc();
2261 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2262 break;
2263 }
2264 case relocInfo::section_word_type: {
2265 section_word_Relocation* r = (section_word_Relocation*)iter.reloc();
2266 r->fix_relocation_after_aot_load(aot_code_entry()->dumptime_content_start_addr(), code_blob->content_begin());
2267 break;
2268 }
2269 case relocInfo::poll_type:
2270 break;
2271 case relocInfo::poll_return_type:
2272 break;
2273 case relocInfo::post_call_nop_type:
2274 break;
2275 case relocInfo::entry_guard_type:
2276 break;
2277 default:
2278 assert(false,"relocation %d unimplemented", (int)iter.type());
2279 break;
2280 }
2281 if (log.is_enabled()) {
2282 iter.print_current_on(&log);
2283 }
2284 j++;
2285 }
2286 assert(j == count, "sanity");
2287 }
2288
2289 bool AOTCodeCache::write_nmethod_reloc_immediates(GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2290 int count = oop_list.length();
2291 if (!write_bytes(&count, sizeof(int))) {
2292 return false;
2293 }
2294 for (GrowableArrayIterator<Handle> iter = oop_list.begin();
2295 iter != oop_list.end(); ++iter) {
2296 Handle h = *iter;
2297 if (!write_oop(h())) {
2298 return false;
2299 }
2300 }
2301
2302 count = metadata_list.length();
2303 if (!write_bytes(&count, sizeof(int))) {
2304 return false;
2305 }
2306 for (GrowableArrayIterator<Metadata*> iter = metadata_list.begin();
2307 iter != metadata_list.end(); ++iter) {
2308 Metadata* m = *iter;
2309 if (!write_metadata(m)) {
2310 return false;
2311 }
2312 }
2313 return true;
2314 }
2315
2316 bool AOTCodeCache::write_metadata(nmethod* nm) {
2317 int count = nm->metadata_count()-1;
2318 if (!write_bytes(&count, sizeof(int))) {
2319 return false;
2320 }
2321 for (Metadata** p = nm->metadata_begin(); p < nm->metadata_end(); p++) {
2322 if (!write_metadata(*p)) {
2323 return false;
2324 }
2325 }
2326 return true;
2327 }
2328
2329 bool AOTCodeCache::write_metadata(Metadata* m) {
2330 uint n = 0;
2331 if (m == nullptr) {
2332 DataKind kind = DataKind::Null;
2333 n = write_bytes(&kind, sizeof(int));
2334 if (n != sizeof(int)) {
2335 return false;
2336 }
2337 } else if (m == (Metadata*)Universe::non_oop_word()) {
2338 DataKind kind = DataKind::No_Data;
2339 n = write_bytes(&kind, sizeof(int));
2340 if (n != sizeof(int)) {
2341 return false;
2342 }
2343 } else if (m->is_klass()) {
2344 if (!write_klass((Klass*)m)) {
2345 return false;
2346 }
2347 } else if (m->is_method()) {
2348 if (!write_method((Method*)m)) {
2349 return false;
2350 }
2351 } else if (m->is_methodCounters()) {
2352 DataKind kind = DataKind::MethodCnts;
2353 n = write_bytes(&kind, sizeof(int));
2354 if (n != sizeof(int)) {
2355 return false;
2356 }
2357 if (!write_method(((MethodCounters*)m)->method())) {
2358 return false;
2359 }
2360 log_debug(aot, codecache, metadata)("%d (L%d): Write MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2361 } else { // Not supported
2362 fatal("metadata : " INTPTR_FORMAT " unimplemented", p2i(m));
2363 return false;
2364 }
2365 return true;
2366 }
2367
2368 Metadata* AOTCodeReader::read_metadata(const methodHandle& comp_method) {
2369 uint code_offset = read_position();
2370 Metadata* m = nullptr;
2371 DataKind kind = *(DataKind*)addr(code_offset);
2372 code_offset += sizeof(DataKind);
2373 set_read_position(code_offset);
2374 if (kind == DataKind::Null) {
2375 m = (Metadata*)nullptr;
2376 } else if (kind == DataKind::No_Data) {
2377 m = (Metadata*)Universe::non_oop_word();
2378 } else if (kind == DataKind::Klass) {
2379 m = (Metadata*)read_klass(comp_method);
2380 } else if (kind == DataKind::Method) {
2381 m = (Metadata*)read_method(comp_method);
2382 } else if (kind == DataKind::MethodCnts) {
2383 kind = *(DataKind*)addr(code_offset);
2384 code_offset += sizeof(DataKind);
2385 set_read_position(code_offset);
2386 m = (Metadata*)read_method(comp_method);
2387 if (m != nullptr) {
2388 Method* method = (Method*)m;
2389 m = method->get_method_counters(Thread::current());
2390 if (m == nullptr) {
2391 set_lookup_failed();
2392 log_debug(aot, codecache, metadata)("%d (L%d): Failed to get MethodCounters", compile_id(), comp_level());
2393 } else {
2394 log_debug(aot, codecache, metadata)("%d (L%d): Read MethodCounters : " INTPTR_FORMAT, compile_id(), comp_level(), p2i(m));
2395 }
2396 }
2397 } else {
2398 set_lookup_failed();
2399 log_debug(aot, codecache, metadata)("%d (L%d): Unknown metadata's kind: %d", compile_id(), comp_level(), (int)kind);
2400 }
2401 return m;
2402 }
2403
2404 bool AOTCodeCache::write_method(Method* method) {
2405 ResourceMark rm; // To method's name printing
2406 if (AOTCacheAccess::can_generate_aot_code(method)) {
2407 DataKind kind = DataKind::Method;
2408 uint n = write_bytes(&kind, sizeof(int));
2409 if (n != sizeof(int)) {
2410 return false;
2411 }
2412 uint method_offset = AOTCacheAccess::delta_from_base_address((address)method);
2413 n = write_bytes(&method_offset, sizeof(uint));
2414 if (n != sizeof(uint)) {
2415 return false;
2416 }
2417 log_debug(aot, codecache, metadata)("%d (L%d): Wrote method: %s @ 0x%08x",
2418 compile_id(), comp_level(), method->name_and_sig_as_C_string(), method_offset);
2419 return true;
2420 }
2421 log_debug(aot, codecache, metadata)("%d (L%d): Method is not archived: %s",
2422 compile_id(), comp_level(), method->name_and_sig_as_C_string());
2423 set_lookup_failed();
2424 return false;
2425 }
2426
2427 Method* AOTCodeReader::read_method(const methodHandle& comp_method) {
2428 uint code_offset = read_position();
2429 uint method_offset = *(uint*)addr(code_offset);
2430 code_offset += sizeof(uint);
2431 set_read_position(code_offset);
2432 Method* m = AOTCacheAccess::convert_offset_to_method(method_offset);
2433 if (!MetaspaceShared::is_in_shared_metaspace((address)m)) {
2434 // Something changed in CDS
2435 set_lookup_failed();
2436 log_debug(aot, codecache, metadata)("Lookup failed for shared method: " INTPTR_FORMAT " is not in CDS ", p2i((address)m));
2437 return nullptr;
2438 }
2439 assert(m->is_method(), "sanity");
2440 ResourceMark rm;
2441 Klass* k = m->method_holder();
2442 if (!k->is_instance_klass()) {
2443 set_lookup_failed();
2444 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not instance klass",
2445 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2446 return nullptr;
2447 } else if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2448 set_lookup_failed();
2449 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not in CDS",
2450 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2451 return nullptr;
2452 } else if (!InstanceKlass::cast(k)->is_loaded()) {
2453 set_lookup_failed();
2454 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not loaded",
2455 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2456 return nullptr;
2457 } else if (!InstanceKlass::cast(k)->is_linked()) {
2458 set_lookup_failed();
2459 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for holder %s: not linked%s",
2460 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name(), (_preload ? " for code preload" : ""));
2461 return nullptr;
2462 }
2463 log_debug(aot, codecache, metadata)("%d (L%d): Shared method lookup: %s",
2464 compile_id(), comp_level(), m->name_and_sig_as_C_string());
2465 return m;
2466 }
2467
2468 bool AOTCodeCache::write_klass(Klass* klass) {
2469 uint array_dim = 0;
2470 if (klass->is_objArray_klass()) {
2471 array_dim = ObjArrayKlass::cast(klass)->dimension();
2472 klass = ObjArrayKlass::cast(klass)->bottom_klass(); // overwrites klass
2473 }
2474 uint init_state = 0;
2475 bool can_write = true;
2476 if (klass->is_instance_klass()) {
2477 InstanceKlass* ik = InstanceKlass::cast(klass);
2478 init_state = (ik->is_initialized() ? 1 : 0);
2479 can_write = AOTCacheAccess::can_generate_aot_code_for(ik);
2480 } else {
2481 can_write = AOTCacheAccess::can_generate_aot_code(klass);
2482 }
2483 ResourceMark rm;
2484 uint state = (array_dim << 1) | (init_state & 1);
2485 if (can_write) {
2486 DataKind kind = DataKind::Klass;
2487 uint n = write_bytes(&kind, sizeof(int));
2488 if (n != sizeof(int)) {
2489 return false;
2490 }
2491 // Record state of instance klass initialization and array dimentions.
2492 n = write_bytes(&state, sizeof(int));
2493 if (n != sizeof(int)) {
2494 return false;
2495 }
2496 uint klass_offset = AOTCacheAccess::delta_from_base_address((address)klass);
2497 n = write_bytes(&klass_offset, sizeof(uint));
2498 if (n != sizeof(uint)) {
2499 return false;
2500 }
2501 log_debug(aot, codecache, metadata)("%d (L%d): Registered klass: %s%s%s @ 0x%08x",
2502 compile_id(), comp_level(), klass->external_name(),
2503 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2504 (array_dim > 0 ? " (object array)" : ""), klass_offset);
2505 return true;
2506 }
2507 log_debug(aot, codecache, metadata)("%d (L%d): Klassis not archived: %s%s%s",
2508 compile_id(), comp_level(), klass->external_name(),
2509 (!klass->is_instance_klass() ? "" : (init_state == 1 ? " (initialized)" : " (not-initialized)")),
2510 (array_dim > 0 ? " (object array)" : ""));
2511 set_lookup_failed();
2512 return false;
2513 }
2514
2515 Klass* AOTCodeReader::read_klass(const methodHandle& comp_method) {
2516 uint code_offset = read_position();
2517 uint state = *(uint*)addr(code_offset);
2518 uint init_state = (state & 1);
2519 uint array_dim = (state >> 1);
2520 code_offset += sizeof(int);
2521 uint klass_offset = *(uint*)addr(code_offset);
2522 code_offset += sizeof(uint);
2523 set_read_position(code_offset);
2524 Klass* k = AOTCacheAccess::convert_offset_to_klass(klass_offset);
2525 if (!MetaspaceShared::is_in_shared_metaspace((address)k)) {
2526 // Something changed in CDS
2527 set_lookup_failed();
2528 log_debug(aot, codecache, metadata)("Lookup failed for shared klass: " INTPTR_FORMAT " is not in CDS ", p2i((address)k));
2529 return nullptr;
2530 }
2531 assert(k->is_klass(), "sanity");
2532 ResourceMark rm;
2533 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
2534 set_lookup_failed();
2535 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not loaded",
2536 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2537 return nullptr;
2538 } else
2539 // Allow not initialized klass which was uninitialized during code caching or for preload
2540 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_initialized() && (init_state == 1) && !_preload) {
2541 set_lookup_failed();
2542 log_debug(aot, codecache, metadata)("%d '%s' (L%d): Lookup failed for klass %s: not initialized",
2543 compile_id(), comp_method->name_and_sig_as_C_string(), comp_level(), k->external_name());
2544 return nullptr;
2545 }
2546 if (array_dim > 0) {
2547 assert(k->is_instance_klass() || k->is_typeArray_klass(), "sanity check");
2548 Klass* ak = k->array_klass_or_null(array_dim);
2549 // FIXME: what would it take to create an array class on the fly?
2550 // Klass* ak = k->array_klass(dim, JavaThread::current());
2551 // guarantee(JavaThread::current()->pending_exception() == nullptr, "");
2552 if (ak == nullptr) {
2553 set_lookup_failed();
2554 log_debug(aot, codecache, metadata)("%d (L%d): %d-dimension array klass lookup failed: %s",
2555 compile_id(), comp_level(), array_dim, k->external_name());
2556 }
2557 log_debug(aot, codecache, metadata)("%d (L%d): Klass lookup: %s (object array)", compile_id(), comp_level(), k->external_name());
2558 return ak;
2559 } else {
2560 log_debug(aot, codecache, metadata)("%d (L%d): Shared klass lookup: %s",
2561 compile_id(), comp_level(), k->external_name());
2562 return k;
2563 }
2564 }
2565
2566 bool AOTCodeCache::write_oop(jobject& jo) {
2567 oop obj = JNIHandles::resolve(jo);
2568 return write_oop(obj);
2569 }
2570
2571 bool AOTCodeCache::write_oop(oop obj) {
2572 DataKind kind;
2573 uint n = 0;
2574 if (obj == nullptr) {
2575 kind = DataKind::Null;
2576 n = write_bytes(&kind, sizeof(int));
2577 if (n != sizeof(int)) {
2578 return false;
2579 }
2580 } else if (cast_from_oop<void *>(obj) == Universe::non_oop_word()) {
2581 kind = DataKind::No_Data;
2582 n = write_bytes(&kind, sizeof(int));
2583 if (n != sizeof(int)) {
2584 return false;
2585 }
2586 } else if (java_lang_Class::is_instance(obj)) {
2587 if (java_lang_Class::is_primitive(obj)) {
2588 int bt = (int)java_lang_Class::primitive_type(obj);
2589 kind = DataKind::Primitive;
2590 n = write_bytes(&kind, sizeof(int));
2591 if (n != sizeof(int)) {
2592 return false;
2593 }
2594 n = write_bytes(&bt, sizeof(int));
2595 if (n != sizeof(int)) {
2596 return false;
2597 }
2598 log_debug(aot, codecache, oops)("%d (L%d): Write primitive type klass: %s", compile_id(), comp_level(), type2name((BasicType)bt));
2599 } else {
2600 Klass* klass = java_lang_Class::as_Klass(obj);
2601 if (!write_klass(klass)) {
2602 return false;
2603 }
2604 }
2605 } else if (java_lang_String::is_instance(obj)) { // herere
2606 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2607 ResourceMark rm;
2608 size_t length_sz = 0;
2609 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2610 if (k >= 0) {
2611 kind = DataKind::String;
2612 n = write_bytes(&kind, sizeof(int));
2613 if (n != sizeof(int)) {
2614 return false;
2615 }
2616 n = write_bytes(&k, sizeof(int));
2617 if (n != sizeof(int)) {
2618 return false;
2619 }
2620 log_debug(aot, codecache, oops)("%d (L%d): Write String object: " PTR_FORMAT " : %s", compile_id(), comp_level(), p2i(obj), string);
2621 return true;
2622 }
2623 // Not archived String object - bailout
2624 set_lookup_failed();
2625 log_debug(aot, codecache, oops)("%d (L%d): Not archived String object: " PTR_FORMAT " : %s",
2626 compile_id(), comp_level(), p2i(obj), string);
2627 return false;
2628 } else if (java_lang_Module::is_instance(obj)) {
2629 fatal("Module object unimplemented");
2630 } else if (java_lang_ClassLoader::is_instance(obj)) {
2631 if (obj == SystemDictionary::java_system_loader()) {
2632 kind = DataKind::SysLoader;
2633 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_system_loader", compile_id(), comp_level());
2634 } else if (obj == SystemDictionary::java_platform_loader()) {
2635 kind = DataKind::PlaLoader;
2636 log_debug(aot, codecache, oops)("%d (L%d): Write ClassLoader: java_platform_loader", compile_id(), comp_level());
2637 } else {
2638 fatal("ClassLoader object unimplemented");
2639 return false;
2640 }
2641 n = write_bytes(&kind, sizeof(int));
2642 if (n != sizeof(int)) {
2643 return false;
2644 }
2645 } else { // herere
2646 int k = AOTCacheAccess::get_archived_object_permanent_index(obj); // k >= 0 means obj is a "permanent heap object"
2647 if (k >= 0) {
2648 kind = DataKind::MH_Oop;
2649 n = write_bytes(&kind, sizeof(int));
2650 if (n != sizeof(int)) {
2651 return false;
2652 }
2653 n = write_bytes(&k, sizeof(int));
2654 if (n != sizeof(int)) {
2655 return false;
2656 }
2657 log_debug(aot, codecache, oops)("%d (L%d): Write MH object: " PTR_FORMAT " : %s",
2658 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2659 return true;
2660 }
2661 // Not archived Java object - bailout
2662 set_lookup_failed();
2663 log_debug(aot, codecache, oops)("%d (L%d): Not archived Java object: " PTR_FORMAT " : %s",
2664 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2665 return false;
2666 }
2667 return true;
2668 }
2669
2670 oop AOTCodeReader::read_oop(JavaThread* thread, const methodHandle& comp_method) {
2671 uint code_offset = read_position();
2672 oop obj = nullptr;
2673 DataKind kind = *(DataKind*)addr(code_offset);
2674 code_offset += sizeof(DataKind);
2675 set_read_position(code_offset);
2676 if (kind == DataKind::Null) {
2677 return nullptr;
2678 } else if (kind == DataKind::No_Data) {
2679 return cast_to_oop(Universe::non_oop_word());
2680 } else if (kind == DataKind::Klass) {
2681 Klass* k = read_klass(comp_method);
2682 if (k == nullptr) {
2683 return nullptr;
2684 }
2685 obj = k->java_mirror();
2686 if (obj == nullptr) {
2687 set_lookup_failed();
2688 log_debug(aot, codecache, oops)("Lookup failed for java_mirror of klass %s", k->external_name());
2689 return nullptr;
2690 }
2691 } else if (kind == DataKind::Primitive) {
2692 code_offset = read_position();
2693 int t = *(int*)addr(code_offset);
2694 code_offset += sizeof(int);
2695 set_read_position(code_offset);
2696 BasicType bt = (BasicType)t;
2697 obj = java_lang_Class::primitive_mirror(bt);
2698 log_debug(aot, codecache, oops)("%d (L%d): Read primitive type klass: %s", compile_id(), comp_level(), type2name(bt));
2699 } else if (kind == DataKind::String) {
2700 code_offset = read_position();
2701 int k = *(int*)addr(code_offset);
2702 code_offset += sizeof(int);
2703 set_read_position(code_offset);
2704 obj = AOTCacheAccess::get_archived_object(k);
2705 if (obj == nullptr) {
2706 set_lookup_failed();
2707 log_debug(aot, codecache, oops)("Lookup failed for String object");
2708 return nullptr;
2709 }
2710 assert(java_lang_String::is_instance(obj), "must be string");
2711
2712 ResourceMark rm;
2713 size_t length_sz = 0;
2714 const char* string = java_lang_String::as_utf8_string(obj, length_sz);
2715 log_debug(aot, codecache, oops)("%d (L%d): Read String object: %s", compile_id(), comp_level(), string);
2716 } else if (kind == DataKind::SysLoader) {
2717 obj = SystemDictionary::java_system_loader();
2718 log_debug(aot, codecache, oops)("%d (L%d): Read java_system_loader", compile_id(), comp_level());
2719 } else if (kind == DataKind::PlaLoader) {
2720 obj = SystemDictionary::java_platform_loader();
2721 log_debug(aot, codecache, oops)("%d (L%d): Read java_platform_loader", compile_id(), comp_level());
2722 } else if (kind == DataKind::MH_Oop) {
2723 code_offset = read_position();
2724 int k = *(int*)addr(code_offset);
2725 code_offset += sizeof(int);
2726 set_read_position(code_offset);
2727 obj = AOTCacheAccess::get_archived_object(k);
2728 if (obj == nullptr) {
2729 set_lookup_failed();
2730 log_debug(aot, codecache, oops)("Lookup failed for MH object");
2731 return nullptr;
2732 }
2733 log_debug(aot, codecache, oops)("%d (L%d): Read MH object: " PTR_FORMAT " : %s",
2734 compile_id(), comp_level(), p2i(obj), obj->klass()->external_name());
2735 } else {
2736 set_lookup_failed();
2737 log_debug(aot, codecache, oops)("%d (L%d): Unknown oop's kind: %d",
2738 compile_id(), comp_level(), (int)kind);
2739 return nullptr;
2740 }
2741 return obj;
2742 }
2743
2744 bool AOTCodeReader::read_oop_metadata_list(JavaThread* thread, ciMethod* target, GrowableArray<Handle> &oop_list, GrowableArray<Metadata*> &metadata_list, OopRecorder* oop_recorder) {
2745 methodHandle comp_method(JavaThread::current(), target->get_Method());
2746 JavaThread* current = JavaThread::current();
2747 uint offset = read_position();
2748 int count = *(int *)addr(offset);
2749 offset += sizeof(int);
2750 set_read_position(offset);
2751 for (int i = 0; i < count; i++) {
2752 oop obj = read_oop(current, comp_method);
2753 if (lookup_failed()) {
2754 return false;
2755 }
2756 Handle h(thread, obj);
2757 oop_list.append(h);
2758 if (oop_recorder != nullptr) {
2759 jobject jo = JNIHandles::make_local(thread, obj);
2760 if (oop_recorder->is_real(jo)) {
2761 oop_recorder->find_index(jo);
2762 } else {
2763 oop_recorder->allocate_oop_index(jo);
2764 }
2765 }
2766 LogStreamHandle(Debug, aot, codecache, oops) log;
2767 if (log.is_enabled()) {
2768 log.print("%d: " INTPTR_FORMAT " ", i, p2i(obj));
2769 if (obj == Universe::non_oop_word()) {
2770 log.print("non-oop word");
2771 } else if (obj == nullptr) {
2772 log.print("nullptr-oop");
2773 } else {
2774 obj->print_value_on(&log);
2775 }
2776 log.cr();
2777 }
2778 }
2779
2780 offset = read_position();
2781 count = *(int *)addr(offset);
2782 offset += sizeof(int);
2783 set_read_position(offset);
2784 for (int i = 0; i < count; i++) {
2785 Metadata* m = read_metadata(comp_method);
2786 if (lookup_failed()) {
2787 return false;
2788 }
2789 metadata_list.append(m);
2790 if (oop_recorder != nullptr) {
2791 if (oop_recorder->is_real(m)) {
2792 oop_recorder->find_index(m);
2793 } else {
2794 oop_recorder->allocate_metadata_index(m);
2795 }
2796 }
2797 LogTarget(Debug, aot, codecache, metadata) log;
2798 if (log.is_enabled()) {
2799 LogStream ls(log);
2800 ls.print("%d: " INTPTR_FORMAT " ", i, p2i(m));
2801 if (m == (Metadata*)Universe::non_oop_word()) {
2802 ls.print("non-metadata word");
2803 } else if (m == nullptr) {
2804 ls.print("nullptr-oop");
2805 } else {
2806 Metadata::print_value_on_maybe_null(&ls, m);
2807 }
2808 ls.cr();
2809 }
2810 }
2811 return true;
2812 }
2813
2814 bool AOTCodeCache::write_oop_map_set(CodeBlob& cb) {
2815 ImmutableOopMapSet* oopmaps = cb.oop_maps();
2816 int oopmaps_size = oopmaps->nr_of_bytes();
2817 if (!write_bytes(&oopmaps_size, sizeof(int))) {
2818 return false;
2819 }
2820 uint n = write_bytes(oopmaps, oopmaps->nr_of_bytes());
2821 if (n != (uint)oopmaps->nr_of_bytes()) {
2822 return false;
2823 }
2824 return true;
2825 }
2826
2827 ImmutableOopMapSet* AOTCodeReader::read_oop_map_set() {
2828 uint offset = read_position();
2829 int size = *(int *)addr(offset);
2830 offset += sizeof(int);
2831 ImmutableOopMapSet* oopmaps = (ImmutableOopMapSet *)addr(offset);
2832 offset += size;
2833 set_read_position(offset);
2834 return oopmaps;
2835 }
2836
2837 bool AOTCodeCache::write_oops(nmethod* nm) {
2838 int count = nm->oops_count()-1;
2839 if (!write_bytes(&count, sizeof(int))) {
2840 return false;
2841 }
2842 for (oop* p = nm->oops_begin(); p < nm->oops_end(); p++) {
2843 if (!write_oop(*p)) {
2844 return false;
2845 }
2846 }
2847 return true;
2848 }
2849
2850 #ifndef PRODUCT
2851 bool AOTCodeCache::write_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2852 // Write asm remarks
2853 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2854 if (count_ptr == nullptr) {
2855 return false;
2856 }
2857 uint count = 0;
2858 bool result = asm_remarks.iterate([&] (uint offset, const char* str) -> bool {
2859 log_trace(aot, codecache, stubs)("asm remark offset=%d, str='%s'", offset, str);
2860 uint n = write_bytes(&offset, sizeof(uint));
2861 if (n != sizeof(uint)) {
2862 return false;
2863 }
2864 if (use_string_table) {
2865 const char* cstr = add_C_string(str);
2866 int id = _table->id_for_C_string((address)cstr);
2867 assert(id != -1, "asm remark string '%s' not found in AOTCodeAddressTable", str);
2868 n = write_bytes(&id, sizeof(int));
2869 if (n != sizeof(int)) {
2870 return false;
2871 }
2872 } else {
2873 n = write_bytes(str, (uint)strlen(str) + 1);
2874 if (n != strlen(str) + 1) {
2875 return false;
2876 }
2877 }
2878 count += 1;
2879 return true;
2880 });
2881 *count_ptr = count;
2882 return result;
2883 }
2884
2885 void AOTCodeReader::read_asm_remarks(AsmRemarks& asm_remarks, bool use_string_table) {
2886 // Read asm remarks
2887 uint offset = read_position();
2888 uint count = *(uint *)addr(offset);
2889 offset += sizeof(uint);
2890 for (uint i = 0; i < count; i++) {
2891 uint remark_offset = *(uint *)addr(offset);
2892 offset += sizeof(uint);
2893 const char* remark = nullptr;
2894 if (use_string_table) {
2895 int remark_string_id = *(uint *)addr(offset);
2896 offset += sizeof(int);
2897 remark = (const char*)_cache->address_for_C_string(remark_string_id);
2898 } else {
2899 remark = (const char*)addr(offset);
2900 offset += (uint)strlen(remark)+1;
2901 }
2902 asm_remarks.insert(remark_offset, remark);
2903 }
2904 set_read_position(offset);
2905 }
2906
2907 bool AOTCodeCache::write_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2908 // Write dbg strings
2909 uint* count_ptr = (uint *)reserve_bytes(sizeof(uint));
2910 if (count_ptr == nullptr) {
2911 return false;
2912 }
2913 uint count = 0;
2914 bool result = dbg_strings.iterate([&] (const char* str) -> bool {
2915 log_trace(aot, codecache, stubs)("dbg string=%s", str);
2916 if (use_string_table) {
2917 const char* cstr = add_C_string(str);
2918 int id = _table->id_for_C_string((address)cstr);
2919 assert(id != -1, "db string '%s' not found in AOTCodeAddressTable", str);
2920 uint n = write_bytes(&id, sizeof(int));
2921 if (n != sizeof(int)) {
2922 return false;
2923 }
2924 } else {
2925 uint n = write_bytes(str, (uint)strlen(str) + 1);
2926 if (n != strlen(str) + 1) {
2927 return false;
2928 }
2929 }
2930 count += 1;
2931 return true;
2932 });
2933 *count_ptr = count;
2934 return result;
2935 }
2936
2937 void AOTCodeReader::read_dbg_strings(DbgStrings& dbg_strings, bool use_string_table) {
2938 // Read dbg strings
2939 uint offset = read_position();
2940 uint count = *(uint *)addr(offset);
2941 offset += sizeof(uint);
2942 for (uint i = 0; i < count; i++) {
2943 const char* str = nullptr;
2944 if (use_string_table) {
2945 int string_id = *(uint *)addr(offset);
2946 offset += sizeof(int);
2947 str = (const char*)_cache->address_for_C_string(string_id);
2948 } else {
2949 str = (const char*)addr(offset);
2950 offset += (uint)strlen(str)+1;
2951 }
2952 dbg_strings.insert(str);
2953 }
2954 set_read_position(offset);
2955 }
2956 #endif // PRODUCT
2957
2958 //======================= AOTCodeAddressTable ===============
2959
2960 // address table ids for generated routines, external addresses and C
2961 // string addresses are partitioned into positive integer ranges
2962 // defined by the following positive base and max values
2963 // i.e. [_extrs_base, _extrs_base + _extrs_max -1],
2964 // [_stubs_base, _stubs_base + _stubs_max -1],
2965 // ...
2966 // [_c_str_base, _c_str_base + _c_str_max -1],
2967 #define _extrs_max 140
2968 #define _stubs_max 210
2969 #define _shared_blobs_max 25
2970 #define _C1_blobs_max 50
2971 #define _C2_blobs_max 25
2972 #define _blobs_max (_shared_blobs_max+_C1_blobs_max+_C2_blobs_max)
2973 #define _all_max (_extrs_max+_stubs_max+_blobs_max)
2974
2975 #define _extrs_base 0
2976 #define _stubs_base (_extrs_base + _extrs_max)
2977 #define _shared_blobs_base (_stubs_base + _stubs_max)
2978 #define _C1_blobs_base (_shared_blobs_base + _shared_blobs_max)
2979 #define _C2_blobs_base (_C1_blobs_base + _C1_blobs_max)
2980 #define _blobs_end (_shared_blobs_base + _blobs_max)
2981 #if (_C2_blobs_base >= _all_max)
2982 #error AOTCodeAddressTable ranges need adjusting
2983 #endif
2984
2985 #define SET_ADDRESS(type, addr) \
2986 { \
2987 type##_addr[type##_length++] = (address) (addr); \
2988 assert(type##_length <= type##_max, "increase size"); \
2989 }
2990
2991 static bool initializing_extrs = false;
2992
2993 void AOTCodeAddressTable::init_extrs() {
2994 if (_extrs_complete || initializing_extrs) return; // Done already
2995
2996 assert(_blobs_end <= _all_max, "AOTCodeAddress table ranges need adjusting");
2997
2998 initializing_extrs = true;
2999 _extrs_addr = NEW_C_HEAP_ARRAY(address, _extrs_max, mtCode);
3000
3001 _extrs_length = 0;
3002
3003 // Record addresses of VM runtime methods
3004 SET_ADDRESS(_extrs, SharedRuntime::fixup_callers_callsite);
3005 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method);
3006 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_abstract);
3007 SET_ADDRESS(_extrs, SharedRuntime::handle_wrong_method_ic_miss);
3008 #if defined(AARCH64) && !defined(ZERO)
3009 SET_ADDRESS(_extrs, JavaThread::aarch64_get_thread_helper);
3010 #endif
3011 {
3012 // Required by Shared blobs
3013 SET_ADDRESS(_extrs, Deoptimization::fetch_unroll_info);
3014 SET_ADDRESS(_extrs, Deoptimization::unpack_frames);
3015 SET_ADDRESS(_extrs, SafepointSynchronize::handle_polling_page_exception);
3016 SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C);
3017 SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C);
3018 SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C);
3019 SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError);
3020 SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError);
3021 SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError);
3022 SET_ADDRESS(_extrs, SharedRuntime::throw_NullPointerException_at_call);
3023 SET_ADDRESS(_extrs, CompressedOops::base_addr());
3024 SET_ADDRESS(_extrs, CompressedKlassPointers::base_addr());
3025
3026 }
3027
3028 #ifdef COMPILER1
3029 {
3030 // Required by C1 blobs
3031 SET_ADDRESS(_extrs, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc));
3032 SET_ADDRESS(_extrs, SharedRuntime::exception_handler_for_return_address);
3033 SET_ADDRESS(_extrs, SharedRuntime::register_finalizer);
3034 SET_ADDRESS(_extrs, Runtime1::is_instance_of);
3035 SET_ADDRESS(_extrs, Runtime1::exception_handler_for_pc);
3036 SET_ADDRESS(_extrs, Runtime1::check_abort_on_vm_exception);
3037 SET_ADDRESS(_extrs, Runtime1::new_instance);
3038 SET_ADDRESS(_extrs, Runtime1::counter_overflow);
3039 SET_ADDRESS(_extrs, Runtime1::new_type_array);
3040 SET_ADDRESS(_extrs, Runtime1::new_object_array);
3041 SET_ADDRESS(_extrs, Runtime1::new_multi_array);
3042 SET_ADDRESS(_extrs, Runtime1::throw_range_check_exception);
3043 SET_ADDRESS(_extrs, Runtime1::throw_index_exception);
3044 SET_ADDRESS(_extrs, Runtime1::throw_div0_exception);
3045 SET_ADDRESS(_extrs, Runtime1::throw_null_pointer_exception);
3046 SET_ADDRESS(_extrs, Runtime1::throw_array_store_exception);
3047 SET_ADDRESS(_extrs, Runtime1::throw_class_cast_exception);
3048 SET_ADDRESS(_extrs, Runtime1::throw_incompatible_class_change_error);
3049 SET_ADDRESS(_extrs, Runtime1::monitorenter);
3050 SET_ADDRESS(_extrs, Runtime1::monitorexit);
3051 SET_ADDRESS(_extrs, Runtime1::deoptimize);
3052 SET_ADDRESS(_extrs, Runtime1::access_field_patching);
3053 SET_ADDRESS(_extrs, Runtime1::move_klass_patching);
3054 SET_ADDRESS(_extrs, Runtime1::move_mirror_patching);
3055 SET_ADDRESS(_extrs, Runtime1::move_appendix_patching);
3056 SET_ADDRESS(_extrs, Runtime1::predicate_failed_trap);
3057 SET_ADDRESS(_extrs, Runtime1::unimplemented_entry);
3058 SET_ADDRESS(_extrs, Runtime1::trace_block_entry);
3059 #ifndef PRODUCT
3060 SET_ADDRESS(_extrs, os::breakpoint);
3061 #endif
3062 }
3063 #endif // COMPILER1
3064
3065 #ifdef COMPILER2
3066 {
3067 // Required by C2 blobs
3068 SET_ADDRESS(_extrs, Deoptimization::uncommon_trap);
3069 SET_ADDRESS(_extrs, OptoRuntime::handle_exception_C);
3070 SET_ADDRESS(_extrs, OptoRuntime::new_instance_C);
3071 SET_ADDRESS(_extrs, OptoRuntime::new_array_C);
3072 SET_ADDRESS(_extrs, OptoRuntime::new_array_nozero_C);
3073 SET_ADDRESS(_extrs, OptoRuntime::multianewarray2_C);
3074 SET_ADDRESS(_extrs, OptoRuntime::multianewarray3_C);
3075 SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
3076 SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
3077 SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
3078 #if INCLUDE_JVMTI
3079 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
3080 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
3081 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
3082 SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
3083 #endif
3084 SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
3085 SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
3086 SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
3087 SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
3088 SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
3089 SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
3090 SET_ADDRESS(_extrs, OptoRuntime::class_init_barrier_C);
3091 }
3092 #endif // COMPILER2
3093
3094 #if INCLUDE_G1GC
3095 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_post_entry);
3096 SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
3097 #endif
3098
3099 #if INCLUDE_SHENANDOAHGC
3100 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_oop);
3101 SET_ADDRESS(_extrs, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
3102 SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
3103 SET_ADDRESS(_extrs, ShenandoahRuntime::clone_barrier);
3104 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong);
3105 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_strong_narrow);
3106 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak);
3107 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_weak_narrow);
3108 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
3109 SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
3110 #endif
3111
3112 #if INCLUDE_ZGC
3113 SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
3114 #if defined(AMD64)
3115 SET_ADDRESS(_extrs, &ZPointerLoadShift);
3116 #endif
3117 #endif // INCLUDE_ZGC
3118
3119 SET_ADDRESS(_extrs, SharedRuntime::log_jni_monitor_still_held);
3120 SET_ADDRESS(_extrs, SharedRuntime::rc_trace_method_entry);
3121 SET_ADDRESS(_extrs, SharedRuntime::reguard_yellow_pages);
3122 SET_ADDRESS(_extrs, SharedRuntime::dtrace_method_exit);
3123
3124 SET_ADDRESS(_extrs, SharedRuntime::complete_monitor_unlocking_C);
3125 SET_ADDRESS(_extrs, SharedRuntime::enable_stack_reserved_zone);
3126 #if defined(AMD64) && !defined(ZERO)
3127 SET_ADDRESS(_extrs, SharedRuntime::montgomery_multiply);
3128 SET_ADDRESS(_extrs, SharedRuntime::montgomery_square);
3129 #endif // AMD64
3130 SET_ADDRESS(_extrs, SharedRuntime::d2f);
3131 SET_ADDRESS(_extrs, SharedRuntime::d2i);
3132 SET_ADDRESS(_extrs, SharedRuntime::d2l);
3133 SET_ADDRESS(_extrs, SharedRuntime::dcos);
3134 SET_ADDRESS(_extrs, SharedRuntime::dexp);
3135 SET_ADDRESS(_extrs, SharedRuntime::dlog);
3136 SET_ADDRESS(_extrs, SharedRuntime::dlog10);
3137 SET_ADDRESS(_extrs, SharedRuntime::dpow);
3138 SET_ADDRESS(_extrs, SharedRuntime::dsin);
3139 SET_ADDRESS(_extrs, SharedRuntime::dtan);
3140 SET_ADDRESS(_extrs, SharedRuntime::f2i);
3141 SET_ADDRESS(_extrs, SharedRuntime::f2l);
3142 #ifndef ZERO
3143 SET_ADDRESS(_extrs, SharedRuntime::drem);
3144 SET_ADDRESS(_extrs, SharedRuntime::frem);
3145 #endif
3146 SET_ADDRESS(_extrs, SharedRuntime::l2d);
3147 SET_ADDRESS(_extrs, SharedRuntime::l2f);
3148 SET_ADDRESS(_extrs, SharedRuntime::ldiv);
3149 SET_ADDRESS(_extrs, SharedRuntime::lmul);
3150 SET_ADDRESS(_extrs, SharedRuntime::lrem);
3151 #if INCLUDE_JVMTI
3152 SET_ADDRESS(_extrs, &JvmtiExport::_should_notify_object_alloc);
3153 #endif /* INCLUDE_JVMTI */
3154 BarrierSet* bs = BarrierSet::barrier_set();
3155 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3156 SET_ADDRESS(_extrs, ci_card_table_address_as<address>());
3157 }
3158 SET_ADDRESS(_extrs, ThreadIdentifier::unsafe_offset());
3159 SET_ADDRESS(_extrs, Thread::current);
3160
3161 SET_ADDRESS(_extrs, os::javaTimeMillis);
3162 SET_ADDRESS(_extrs, os::javaTimeNanos);
3163 // For JFR
3164 SET_ADDRESS(_extrs, os::elapsed_counter);
3165
3166 #if INCLUDE_JVMTI
3167 SET_ADDRESS(_extrs, &JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events);
3168 #endif /* INCLUDE_JVMTI */
3169 SET_ADDRESS(_extrs, StubRoutines::crc_table_addr());
3170 #ifndef PRODUCT
3171 SET_ADDRESS(_extrs, &SharedRuntime::_partial_subtype_ctr);
3172 SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
3173 #endif
3174
3175 #ifndef ZERO
3176 #if defined(AMD64) || defined(AARCH64) || defined(RISCV64)
3177 SET_ADDRESS(_extrs, MacroAssembler::debug64);
3178 #endif
3179 #if defined(AMD64)
3180 SET_ADDRESS(_extrs, StubRoutines::x86::arrays_hashcode_powers_of_31());
3181 #endif
3182 #endif // ZERO
3183
3184 #ifdef COMPILER1
3185 #ifdef X86
3186 SET_ADDRESS(_extrs, LIR_Assembler::float_signmask_pool);
3187 SET_ADDRESS(_extrs, LIR_Assembler::double_signmask_pool);
3188 SET_ADDRESS(_extrs, LIR_Assembler::float_signflip_pool);
3189 SET_ADDRESS(_extrs, LIR_Assembler::double_signflip_pool);
3190 #endif
3191 #endif
3192
3193 // addresses of fields in AOT runtime constants area
3194 address* p = AOTRuntimeConstants::field_addresses_list();
3195 while (*p != nullptr) {
3196 SET_ADDRESS(_extrs, *p++);
3197 }
3198
3199 _extrs_complete = true;
3200 log_info(aot, codecache, init)("External addresses recorded");
3201 }
3202
3203 static bool initializing_early_stubs = false;
3204
3205 void AOTCodeAddressTable::init_early_stubs() {
3206 if (_complete || initializing_early_stubs) return; // Done already
3207 initializing_early_stubs = true;
3208 _stubs_addr = NEW_C_HEAP_ARRAY(address, _stubs_max, mtCode);
3209 _stubs_length = 0;
3210 SET_ADDRESS(_stubs, StubRoutines::forward_exception_entry());
3211
3212 {
3213 // Required by C1 blobs
3214 #if defined(AMD64) && !defined(ZERO)
3215 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_flip());
3216 SET_ADDRESS(_stubs, StubRoutines::x86::d2l_fixup());
3217 #endif // AMD64
3218 }
3219
3220 _early_stubs_complete = true;
3221 log_info(aot, codecache, init)("Early stubs recorded");
3222 }
3223
3224 static bool initializing_shared_blobs = false;
3225
3226 void AOTCodeAddressTable::init_shared_blobs() {
3227 if (_complete || initializing_shared_blobs) return; // Done already
3228 initializing_shared_blobs = true;
3229 address* blobs_addr = NEW_C_HEAP_ARRAY(address, _blobs_max, mtCode);
3230
3231 // Divide _shared_blobs_addr array to chunks because they could be initialized in parrallel
3232 _shared_blobs_addr = blobs_addr;
3233 _C1_blobs_addr = _shared_blobs_addr + _shared_blobs_max;// C1 blobs addresses stored after shared blobs
3234 _C2_blobs_addr = _C1_blobs_addr + _C1_blobs_max; // C2 blobs addresses stored after C1 blobs
3235
3236 _shared_blobs_length = 0;
3237 _C1_blobs_length = 0;
3238 _C2_blobs_length = 0;
3239
3240 // clear the address table
3241 memset(blobs_addr, 0, sizeof(address)* _blobs_max);
3242
3243 // Record addresses of generated code blobs
3244 SET_ADDRESS(_shared_blobs, SharedRuntime::get_handle_wrong_method_stub());
3245 SET_ADDRESS(_shared_blobs, SharedRuntime::get_ic_miss_stub());
3246 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack());
3247 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception());
3248 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_reexecution());
3249 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
3250 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_opt_virtual_call_stub());
3251 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_virtual_call_stub());
3252 SET_ADDRESS(_shared_blobs, SharedRuntime::get_resolve_static_call_stub());
3253 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->entry_point());
3254 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_safepoint_handler_blob()->entry_point());
3255 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_return_handler_blob()->entry_point());
3256 #ifdef COMPILER2
3257 // polling_page_vectors_safepoint_handler_blob can be nullptr if AVX feature is not present or is disabled
3258 if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr) {
3259 SET_ADDRESS(_shared_blobs, SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point());
3260 }
3261 #endif
3262 #if INCLUDE_JVMCI
3263 if (EnableJVMCI) {
3264 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->uncommon_trap());
3265 SET_ADDRESS(_shared_blobs, SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
3266 }
3267 #endif
3268 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_AbstractMethodError_entry());
3269 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_IncompatibleClassChangeError_entry());
3270 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_NullPointerException_at_call_entry());
3271 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_StackOverflowError_entry());
3272 SET_ADDRESS(_shared_blobs, SharedRuntime::throw_delayed_StackOverflowError_entry());
3273
3274 assert(_shared_blobs_length <= _shared_blobs_max, "increase _shared_blobs_max to %d", _shared_blobs_length);
3275 _shared_blobs_complete = true;
3276 log_info(aot, codecache, init)("All shared blobs recorded");
3277 }
3278
3279 static bool initializing_stubs = false;
3280 void AOTCodeAddressTable::init_stubs() {
3281 if (_complete || initializing_stubs) return; // Done already
3282 assert(_early_stubs_complete, "early stubs whould be initialized");
3283 initializing_stubs = true;
3284
3285 // Stubs
3286 SET_ADDRESS(_stubs, StubRoutines::method_entry_barrier());
3287 SET_ADDRESS(_stubs, StubRoutines::atomic_xchg_entry());
3288 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_entry());
3289 SET_ADDRESS(_stubs, StubRoutines::atomic_cmpxchg_long_entry());
3290 SET_ADDRESS(_stubs, StubRoutines::atomic_add_entry());
3291 SET_ADDRESS(_stubs, StubRoutines::fence_entry());
3292
3293 SET_ADDRESS(_stubs, StubRoutines::cont_thaw());
3294 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrier());
3295 SET_ADDRESS(_stubs, StubRoutines::cont_returnBarrierExc());
3296
3297 JFR_ONLY(SET_ADDRESS(_stubs, SharedRuntime::jfr_write_checkpoint());)
3298
3299 SET_ADDRESS(_stubs, StubRoutines::jbyte_arraycopy());
3300 SET_ADDRESS(_stubs, StubRoutines::jshort_arraycopy());
3301 SET_ADDRESS(_stubs, StubRoutines::jint_arraycopy());
3302 SET_ADDRESS(_stubs, StubRoutines::jlong_arraycopy());
3303 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy);
3304 SET_ADDRESS(_stubs, StubRoutines::_oop_arraycopy_uninit);
3305
3306 SET_ADDRESS(_stubs, StubRoutines::jbyte_disjoint_arraycopy());
3307 SET_ADDRESS(_stubs, StubRoutines::jshort_disjoint_arraycopy());
3308 SET_ADDRESS(_stubs, StubRoutines::jint_disjoint_arraycopy());
3309 SET_ADDRESS(_stubs, StubRoutines::jlong_disjoint_arraycopy());
3310 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy);
3311 SET_ADDRESS(_stubs, StubRoutines::_oop_disjoint_arraycopy_uninit);
3312
3313 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_arraycopy());
3314 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_arraycopy());
3315 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_arraycopy());
3316 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_arraycopy());
3317 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy);
3318 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_arraycopy_uninit);
3319
3320 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_disjoint_arraycopy());
3321 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_disjoint_arraycopy());
3322 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_disjoint_arraycopy());
3323 SET_ADDRESS(_stubs, StubRoutines::arrayof_jlong_disjoint_arraycopy());
3324 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3325 SET_ADDRESS(_stubs, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
3326
3327 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy);
3328 SET_ADDRESS(_stubs, StubRoutines::_checkcast_arraycopy_uninit);
3329
3330 SET_ADDRESS(_stubs, StubRoutines::unsafe_arraycopy());
3331 SET_ADDRESS(_stubs, StubRoutines::generic_arraycopy());
3332
3333 SET_ADDRESS(_stubs, StubRoutines::jbyte_fill());
3334 SET_ADDRESS(_stubs, StubRoutines::jshort_fill());
3335 SET_ADDRESS(_stubs, StubRoutines::jint_fill());
3336 SET_ADDRESS(_stubs, StubRoutines::arrayof_jbyte_fill());
3337 SET_ADDRESS(_stubs, StubRoutines::arrayof_jshort_fill());
3338 SET_ADDRESS(_stubs, StubRoutines::arrayof_jint_fill());
3339
3340 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback());
3341 SET_ADDRESS(_stubs, StubRoutines::data_cache_writeback_sync());
3342
3343 SET_ADDRESS(_stubs, StubRoutines::aescrypt_encryptBlock());
3344 SET_ADDRESS(_stubs, StubRoutines::aescrypt_decryptBlock());
3345 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_encryptAESCrypt());
3346 SET_ADDRESS(_stubs, StubRoutines::cipherBlockChaining_decryptAESCrypt());
3347 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_encryptAESCrypt());
3348 SET_ADDRESS(_stubs, StubRoutines::electronicCodeBook_decryptAESCrypt());
3349 SET_ADDRESS(_stubs, StubRoutines::poly1305_processBlocks());
3350 SET_ADDRESS(_stubs, StubRoutines::counterMode_AESCrypt());
3351 SET_ADDRESS(_stubs, StubRoutines::ghash_processBlocks());
3352 SET_ADDRESS(_stubs, StubRoutines::chacha20Block());
3353 SET_ADDRESS(_stubs, StubRoutines::base64_encodeBlock());
3354 SET_ADDRESS(_stubs, StubRoutines::base64_decodeBlock());
3355 SET_ADDRESS(_stubs, StubRoutines::md5_implCompress());
3356 SET_ADDRESS(_stubs, StubRoutines::md5_implCompressMB());
3357 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompress());
3358 SET_ADDRESS(_stubs, StubRoutines::sha1_implCompressMB());
3359 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompress());
3360 SET_ADDRESS(_stubs, StubRoutines::sha256_implCompressMB());
3361 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompress());
3362 SET_ADDRESS(_stubs, StubRoutines::sha512_implCompressMB());
3363 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompress());
3364 SET_ADDRESS(_stubs, StubRoutines::sha3_implCompressMB());
3365 SET_ADDRESS(_stubs, StubRoutines::double_keccak());
3366 SET_ADDRESS(_stubs, StubRoutines::intpoly_assign());
3367 SET_ADDRESS(_stubs, StubRoutines::intpoly_montgomeryMult_P256());
3368 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostNtt());
3369 SET_ADDRESS(_stubs, StubRoutines::dilithiumAlmostInverseNtt());
3370 SET_ADDRESS(_stubs, StubRoutines::dilithiumNttMult());
3371 SET_ADDRESS(_stubs, StubRoutines::dilithiumMontMulByConstant());
3372 SET_ADDRESS(_stubs, StubRoutines::dilithiumDecomposePoly());
3373
3374 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32());
3375
3376 SET_ADDRESS(_stubs, StubRoutines::crc32c_table_addr());
3377 SET_ADDRESS(_stubs, StubRoutines::updateBytesCRC32C());
3378 SET_ADDRESS(_stubs, StubRoutines::updateBytesAdler32());
3379
3380 SET_ADDRESS(_stubs, StubRoutines::multiplyToLen());
3381 SET_ADDRESS(_stubs, StubRoutines::squareToLen());
3382 SET_ADDRESS(_stubs, StubRoutines::mulAdd());
3383 SET_ADDRESS(_stubs, StubRoutines::montgomeryMultiply());
3384 SET_ADDRESS(_stubs, StubRoutines::montgomerySquare());
3385 SET_ADDRESS(_stubs, StubRoutines::bigIntegerRightShift());
3386 SET_ADDRESS(_stubs, StubRoutines::bigIntegerLeftShift());
3387 SET_ADDRESS(_stubs, StubRoutines::galoisCounterMode_AESCrypt());
3388
3389 SET_ADDRESS(_stubs, StubRoutines::vectorizedMismatch());
3390
3391 SET_ADDRESS(_stubs, StubRoutines::unsafe_setmemory());
3392
3393 SET_ADDRESS(_stubs, StubRoutines::dexp());
3394 SET_ADDRESS(_stubs, StubRoutines::dlog());
3395 SET_ADDRESS(_stubs, StubRoutines::dlog10());
3396 SET_ADDRESS(_stubs, StubRoutines::dpow());
3397 SET_ADDRESS(_stubs, StubRoutines::dsin());
3398 SET_ADDRESS(_stubs, StubRoutines::dcos());
3399 SET_ADDRESS(_stubs, StubRoutines::dlibm_reduce_pi04l());
3400 SET_ADDRESS(_stubs, StubRoutines::dlibm_sin_cos_huge());
3401 SET_ADDRESS(_stubs, StubRoutines::dlibm_tan_cot_huge());
3402 SET_ADDRESS(_stubs, StubRoutines::dtan());
3403
3404 SET_ADDRESS(_stubs, StubRoutines::f2hf_adr());
3405 SET_ADDRESS(_stubs, StubRoutines::hf2f_adr());
3406
3407 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
3408 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_stub(slot));
3409 }
3410 SET_ADDRESS(_stubs, StubRoutines::lookup_secondary_supers_table_slow_path_stub());
3411
3412 #if defined(AMD64) && !defined(ZERO)
3413 SET_ADDRESS(_stubs, StubRoutines::x86::d2i_fixup());
3414 SET_ADDRESS(_stubs, StubRoutines::x86::f2i_fixup());
3415 SET_ADDRESS(_stubs, StubRoutines::x86::f2l_fixup());
3416 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_mask());
3417 SET_ADDRESS(_stubs, StubRoutines::x86::float_sign_flip());
3418 SET_ADDRESS(_stubs, StubRoutines::x86::double_sign_mask());
3419 SET_ADDRESS(_stubs, StubRoutines::x86::vector_popcount_lut());
3420 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_mask());
3421 SET_ADDRESS(_stubs, StubRoutines::x86::vector_float_sign_flip());
3422 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_mask());
3423 SET_ADDRESS(_stubs, StubRoutines::x86::vector_double_sign_flip());
3424 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_int());
3425 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_short());
3426 SET_ADDRESS(_stubs, StubRoutines::x86::vector_reverse_byte_perm_mask_long());
3427 // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64.
3428 // See C2_MacroAssembler::load_iota_indices().
3429 for (int i = 0; i < 6; i++) {
3430 SET_ADDRESS(_stubs, StubRoutines::x86::vector_iota_indices() + i * 64);
3431 }
3432 #endif
3433 #if defined(AARCH64) && !defined(ZERO)
3434 SET_ADDRESS(_stubs, StubRoutines::aarch64::zero_blocks());
3435 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives());
3436 SET_ADDRESS(_stubs, StubRoutines::aarch64::count_positives_long());
3437 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_array_equals());
3438 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LL());
3439 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UU());
3440 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_LU());
3441 SET_ADDRESS(_stubs, StubRoutines::aarch64::compare_long_string_UL());
3442 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ul());
3443 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_ll());
3444 SET_ADDRESS(_stubs, StubRoutines::aarch64::string_indexof_linear_uu());
3445 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_byte_array_inflate());
3446 SET_ADDRESS(_stubs, StubRoutines::aarch64::spin_wait());
3447
3448 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BOOLEAN));
3449 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_BYTE));
3450 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_SHORT));
3451 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_CHAR));
3452 SET_ADDRESS(_stubs, StubRoutines::aarch64::large_arrays_hashcode(T_INT));
3453 #endif
3454
3455 _complete = true;
3456 log_info(aot, codecache, init)("Stubs recorded");
3457 }
3458
3459 void AOTCodeAddressTable::init_early_c1() {
3460 #ifdef COMPILER1
3461 // Runtime1 Blobs
3462 for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) {
3463 C1StubId id = (C1StubId)i;
3464 if (Runtime1::blob_for(id) == nullptr) {
3465 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3466 continue;
3467 }
3468 if (Runtime1::entry_for(id) == nullptr) {
3469 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3470 continue;
3471 }
3472 address entry = Runtime1::entry_for(id);
3473 SET_ADDRESS(_C1_blobs, entry);
3474 }
3475 #endif // COMPILER1
3476 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3477 _early_c1_complete = true;
3478 }
3479
3480 void AOTCodeAddressTable::init_c1() {
3481 #ifdef COMPILER1
3482 // Runtime1 Blobs
3483 assert(_early_c1_complete, "early C1 blobs should be initialized");
3484 for (int i = (int)C1StubId::forward_exception_id + 1; i < (int)(C1StubId::NUM_STUBIDS); i++) {
3485 C1StubId id = (C1StubId)i;
3486 if (Runtime1::blob_for(id) == nullptr) {
3487 log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id));
3488 continue;
3489 }
3490 if (Runtime1::entry_for(id) == nullptr) {
3491 log_info(aot, codecache, init)("C1 blob %s is missing entry", Runtime1::name_for(id));
3492 continue;
3493 }
3494 address entry = Runtime1::entry_for(id);
3495 SET_ADDRESS(_C1_blobs, entry);
3496 }
3497 #if INCLUDE_G1GC
3498 if (UseG1GC) {
3499 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3500 address entry = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
3501 SET_ADDRESS(_C1_blobs, entry);
3502 entry = bs->post_barrier_c1_runtime_code_blob()->code_begin();
3503 SET_ADDRESS(_C1_blobs, entry);
3504 }
3505 #endif // INCLUDE_G1GC
3506 #if INCLUDE_ZGC
3507 if (UseZGC) {
3508 ZBarrierSetC1* bs = (ZBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3509 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_oop_field_preloaded_runtime_stub);
3510 SET_ADDRESS(_C1_blobs, bs->_load_barrier_on_weak_oop_field_preloaded_runtime_stub);
3511 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_with_healing);
3512 SET_ADDRESS(_C1_blobs, bs->_store_barrier_on_oop_field_without_healing);
3513 }
3514 #endif // INCLUDE_ZGC
3515 #if INCLUDE_SHENANDOAHGC
3516 if (UseShenandoahGC) {
3517 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
3518 SET_ADDRESS(_C1_blobs, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
3519 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_rt_code_blob()->code_begin());
3520 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin());
3521 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_weak_rt_code_blob()->code_begin());
3522 SET_ADDRESS(_C1_blobs, bs->load_reference_barrier_phantom_rt_code_blob()->code_begin());
3523 }
3524 #endif // INCLUDE_SHENANDOAHGC
3525 #endif // COMPILER1
3526
3527 assert(_C1_blobs_length <= _C1_blobs_max, "increase _C1_blobs_max to %d", _C1_blobs_length);
3528 _c1_complete = true;
3529 log_info(aot, codecache, init)("Runtime1 Blobs recorded");
3530 }
3531
3532 void AOTCodeAddressTable::init_c2() {
3533 #ifdef COMPILER2
3534 // OptoRuntime Blobs
3535 SET_ADDRESS(_C2_blobs, OptoRuntime::uncommon_trap_blob()->entry_point());
3536 SET_ADDRESS(_C2_blobs, OptoRuntime::exception_blob()->entry_point());
3537 SET_ADDRESS(_C2_blobs, OptoRuntime::new_instance_Java());
3538 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_Java());
3539 SET_ADDRESS(_C2_blobs, OptoRuntime::new_array_nozero_Java());
3540 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray2_Java());
3541 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray3_Java());
3542 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray4_Java());
3543 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarray5_Java());
3544 SET_ADDRESS(_C2_blobs, OptoRuntime::multianewarrayN_Java());
3545 SET_ADDRESS(_C2_blobs, OptoRuntime::vtable_must_compile_stub());
3546 SET_ADDRESS(_C2_blobs, OptoRuntime::complete_monitor_locking_Java());
3547 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notify_Java());
3548 SET_ADDRESS(_C2_blobs, OptoRuntime::monitor_notifyAll_Java());
3549 SET_ADDRESS(_C2_blobs, OptoRuntime::rethrow_stub());
3550 SET_ADDRESS(_C2_blobs, OptoRuntime::slow_arraycopy_Java());
3551 SET_ADDRESS(_C2_blobs, OptoRuntime::register_finalizer_Java());
3552 SET_ADDRESS(_C2_blobs, OptoRuntime::class_init_barrier_Java());
3553 #if INCLUDE_JVMTI
3554 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_start());
3555 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_end());
3556 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_mount());
3557 SET_ADDRESS(_C2_blobs, OptoRuntime::notify_jvmti_vthread_unmount());
3558 #endif /* INCLUDE_JVMTI */
3559 #endif
3560
3561 assert(_C2_blobs_length <= _C2_blobs_max, "increase _C2_blobs_max to %d", _C2_blobs_length);
3562 _c2_complete = true;
3563 log_info(aot, codecache, init)("OptoRuntime Blobs recorded");
3564 }
3565 #undef SET_ADDRESS
3566
3567 AOTCodeAddressTable::~AOTCodeAddressTable() {
3568 if (_extrs_addr != nullptr) {
3569 FREE_C_HEAP_ARRAY(address, _extrs_addr);
3570 }
3571 if (_stubs_addr != nullptr) {
3572 FREE_C_HEAP_ARRAY(address, _stubs_addr);
3573 }
3574 if (_shared_blobs_addr != nullptr) {
3575 FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
3576 }
3577 }
3578
3579 #ifdef PRODUCT
3580 #define MAX_STR_COUNT 200
3581 #else
3582 #define MAX_STR_COUNT 500
3583 #endif
3584 #define _c_str_max MAX_STR_COUNT
3585 static const int _c_str_base = _all_max;
3586
3587 static const char* _C_strings_in[MAX_STR_COUNT] = {nullptr}; // Incoming strings
3588 static const char* _C_strings[MAX_STR_COUNT] = {nullptr}; // Our duplicates
3589 static int _C_strings_count = 0;
3590 static int _C_strings_s[MAX_STR_COUNT] = {0};
3591 static int _C_strings_id[MAX_STR_COUNT] = {0};
3592 static int _C_strings_used = 0;
3593
3605 // still be executed on VM exit after _cache is freed.
3606 char* p = NEW_C_HEAP_ARRAY(char, strings_size+1, mtCode);
3607 memcpy(p, addr(strings_offset), strings_size);
3608 _C_strings_buf = p;
3609 assert(strings_count <= MAX_STR_COUNT, "sanity");
3610 for (uint i = 0; i < strings_count; i++) {
3611 _C_strings[i] = p;
3612 uint len = string_lengths[i];
3613 _C_strings_s[i] = i;
3614 _C_strings_id[i] = i;
3615 p += len;
3616 }
3617 assert((uint)(p - _C_strings_buf) <= strings_size, "(" INTPTR_FORMAT " - " INTPTR_FORMAT ") = %d > %d ", p2i(p), p2i(_C_strings_buf), (uint)(p - _C_strings_buf), strings_size);
3618 _C_strings_count = strings_count;
3619 _C_strings_used = strings_count;
3620 log_debug(aot, codecache, init)(" Loaded %d C strings of total length %d at offset %d from AOT Code Cache", _C_strings_count, strings_size, strings_offset);
3621 }
3622
3623 int AOTCodeCache::store_strings() {
3624 if (_C_strings_used > 0) {
3625 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3626 uint offset = _write_position;
3627 uint length = 0;
3628 uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
3629 if (lengths == nullptr) {
3630 return -1;
3631 }
3632 for (int i = 0; i < _C_strings_used; i++) {
3633 const char* str = _C_strings[_C_strings_s[i]];
3634 uint len = (uint)strlen(str) + 1;
3635 length += len;
3636 assert(len < 1000, "big string: %s", str);
3637 lengths[i] = len;
3638 uint n = write_bytes(str, len);
3639 if (n != len) {
3640 return -1;
3641 }
3642 }
3643 log_debug(aot, codecache, exit)(" Wrote %d C strings of total length %d at offset %d to AOT Code Cache",
3644 _C_strings_used, length, offset);
3645 }
3646 return _C_strings_used;
3647 }
3648
3649 const char* AOTCodeCache::add_C_string(const char* str) {
3650 if (is_on_for_dump() && str != nullptr) {
3651 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3652 AOTCodeAddressTable* table = addr_table();
3653 if (table != nullptr) {
3654 return table->add_C_string(str);
3655 }
3656 }
3657 return str;
3658 }
3659
3660 const char* AOTCodeAddressTable::add_C_string(const char* str) {
3661 if (_extrs_complete) {
3662 // Check previous strings address
3663 for (int i = 0; i < _C_strings_count; i++) {
3664 if (_C_strings_in[i] == str) {
3665 return _C_strings[i]; // Found previous one - return our duplicate
3666 } else if (strcmp(_C_strings[i], str) == 0) {
3667 return _C_strings[i];
3668 }
3669 }
3670 // Add new one
3671 if (_C_strings_count < MAX_STR_COUNT) {
3672 // Passed in string can be freed and used space become inaccessible.
3673 // Keep original address but duplicate string for future compare.
3674 _C_strings_id[_C_strings_count] = -1; // Init
3675 _C_strings_in[_C_strings_count] = str;
3676 const char* dup = os::strdup(str);
3677 _C_strings[_C_strings_count++] = dup;
3678 log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
3679 return dup;
3680 } else {
3681 assert(false, "Number of C strings >= MAX_STR_COUNT");
3682 }
3683 }
3684 return str;
3685 }
3686
3687 int AOTCodeAddressTable::id_for_C_string(address str) {
3688 if (str == nullptr) {
3689 return -1;
3690 }
3691 MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
3692 for (int i = 0; i < _C_strings_count; i++) {
3693 if (_C_strings[i] == (const char*)str) { // found
3694 int id = _C_strings_id[i];
3695 if (id >= 0) {
3696 assert(id < _C_strings_used, "%d >= %d", id , _C_strings_used);
3697 return id; // Found recorded
3698 }
3699 // Not found in recorded, add new
3700 id = _C_strings_used++;
3701 _C_strings_s[id] = i;
3702 _C_strings_id[i] = id;
3703 return id;
3704 }
3705 }
3706 return -1;
3707 }
3708
3709 address AOTCodeAddressTable::address_for_C_string(int idx) {
3710 assert(idx < _C_strings_count, "sanity");
3711 return (address)_C_strings[idx];
3712 }
3713
3714 static int search_address(address addr, address* table, uint length) {
3715 for (int i = 0; i < (int)length; i++) {
3716 if (table[i] == addr) {
3717 return i;
3718 }
3719 }
3720 return BAD_ADDRESS_ID;
3721 }
3722
3723 address AOTCodeAddressTable::address_for_id(int idx) {
3724 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3725 if (idx == -1) {
3726 return (address)-1;
3727 }
3728 uint id = (uint)idx;
3729 // special case for symbols based relative to os::init
3730 if (id > (_c_str_base + _c_str_max)) {
3731 return (address)os::init + idx;
3732 }
3733 if (idx < 0) {
3734 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3735 return nullptr;
3736 }
3737 // no need to compare unsigned id against 0
3738 if (/* id >= _extrs_base && */ id < _extrs_length) {
3739 return _extrs_addr[id - _extrs_base];
3740 }
3741 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3742 return _stubs_addr[id - _stubs_base];
3743 }
3744 if (id >= _stubs_base && id < _stubs_base + _stubs_length) {
3745 return _stubs_addr[id - _stubs_base];
3746 }
3747 if (id >= _shared_blobs_base && id < _shared_blobs_base + _shared_blobs_length) {
3748 return _shared_blobs_addr[id - _shared_blobs_base];
3749 }
3750 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3751 return _C1_blobs_addr[id - _C1_blobs_base];
3752 }
3753 if (id >= _C1_blobs_base && id < _C1_blobs_base + _C1_blobs_length) {
3754 return _C1_blobs_addr[id - _C1_blobs_base];
3755 }
3756 if (id >= _C2_blobs_base && id < _C2_blobs_base + _C2_blobs_length) {
3757 return _C2_blobs_addr[id - _C2_blobs_base];
3758 }
3759 if (id >= _c_str_base && id < (_c_str_base + (uint)_C_strings_count)) {
3760 return address_for_C_string(id - _c_str_base);
3761 }
3762 fatal("Incorrect id %d for AOT Code Cache addresses table", id);
3763 return nullptr;
3764 }
3765
3766 int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeBlob* blob) {
3767 assert(_extrs_complete, "AOT Code Cache VM runtime addresses table is not complete");
3768 int id = -1;
3769 if (addr == (address)-1) { // Static call stub has jump to itself
3770 return id;
3771 }
3772 // Check card_table_base address first since it can point to any address
3773 BarrierSet* bs = BarrierSet::barrier_set();
3774 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3775 if (addr == ci_card_table_address_as<address>()) {
3776 id = search_address(addr, _extrs_addr, _extrs_length);
3777 assert(id > 0 && _extrs_addr[id - _extrs_base] == addr, "sanity");
3778 return id;
3779 }
3780 }
3781
3782 // Seach for C string
3783 id = id_for_C_string(addr);
3784 if (id >= 0) {
3785 return id + _c_str_base;
3786 }
3787 if (StubRoutines::contains(addr)) {
3788 // Search in stubs
3789 id = search_address(addr, _stubs_addr, _stubs_length);
3790 if (id == BAD_ADDRESS_ID) {
3791 StubCodeDesc* desc = StubCodeDesc::desc_for(addr);
3792 if (desc == nullptr) {
3793 desc = StubCodeDesc::desc_for(addr + frame::pc_return_offset);
3794 }
3795 const char* sub_name = (desc != nullptr) ? desc->name() : "<unknown>";
3796 assert(false, "Address " INTPTR_FORMAT " for Stub:%s is missing in AOT Code Cache addresses table", p2i(addr), sub_name);
3797 } else {
3798 return _stubs_base + id;
3799 }
3800 } else {
3801 CodeBlob* cb = CodeCache::find_blob(addr);
3802 if (cb != nullptr) {
3803 int id_base = _shared_blobs_base;
3804 // Search in code blobs
3805 id = search_address(addr, _shared_blobs_addr, _shared_blobs_length);
3806 if (id == BAD_ADDRESS_ID) {
3807 id_base = _C1_blobs_base;
3808 // search C1 blobs
3809 id = search_address(addr, _C1_blobs_addr, _C1_blobs_length);
3810 }
3811 if (id == BAD_ADDRESS_ID) {
3812 id_base = _C2_blobs_base;
3813 // search C2 blobs
3814 id = search_address(addr, _C2_blobs_addr, _C2_blobs_length);
3815 }
3816 if (id == BAD_ADDRESS_ID) {
3817 assert(false, "Address " INTPTR_FORMAT " for Blob:%s is missing in AOT Code Cache addresses table", p2i(addr), cb->name());
3818 } else {
3819 return id_base + id;
3820 }
3821 } else {
3822 // Search in runtime functions
3823 id = search_address(addr, _extrs_addr, _extrs_length);
3824 if (id == BAD_ADDRESS_ID) {
3825 ResourceMark rm;
3826 const int buflen = 1024;
3827 char* func_name = NEW_RESOURCE_ARRAY(char, buflen);
3828 int offset = 0;
3829 if (os::dll_address_to_function_name(addr, func_name, buflen, &offset)) {
3830 if (offset > 0) {
3831 // Could be address of C string
3832 uint dist = (uint)pointer_delta(addr, (address)os::init, 1);
3833 CompileTask* task = ciEnv::current()->task();
3834 uint compile_id = 0;
3835 uint comp_level =0;
3836 if (task != nullptr) { // this could be called from compiler runtime initialization (compiler blobs)
3837 compile_id = task->compile_id();
3838 comp_level = task->comp_level();
3839 }
3840 log_debug(aot, codecache)("%d (L%d): Address " INTPTR_FORMAT " (offset %d) for runtime target '%s' is missing in AOT Code Cache addresses table",
3841 compile_id, comp_level, p2i(addr), dist, (const char*)addr);
3842 assert(dist > (uint)(_all_max + MAX_STR_COUNT), "change encoding of distance");
3843 return dist;
3844 }
3845 reloc.print_current_on(tty);
3846 blob->print_on(tty);
3847 blob->print_code_on(tty);
3848 assert(false, "Address " INTPTR_FORMAT " for runtime target '%s+%d' is missing in AOT Code Cache addresses table", p2i(addr), func_name, offset);
3849 } else {
3850 reloc.print_current_on(tty);
3851 blob->print_on(tty);
3852 blob->print_code_on(tty);
3853 os::find(addr, tty);
3854 assert(false, "Address " INTPTR_FORMAT " for <unknown>/('%s') is missing in AOT Code Cache addresses table", p2i(addr), (const char*)addr);
3855 }
3856 } else {
3857 return _extrs_base + id;
3858 }
3859 }
3860 }
3861 return id;
3862 }
3863
3864 #undef _extrs_max
3865 #undef _stubs_max
3866 #undef _shared_blobs_max
3867 #undef _C1_blobs_max
3868 #undef _C2_blobs_max
3869 #undef _blobs_max
3870 #undef _extrs_base
3871 #undef _stubs_base
3872 #undef _shared_blobs_base
3873 #undef _C1_blobs_base
3874 #undef _C2_blobs_base
3875 #undef _blobs_end
3876
3877 void AOTRuntimeConstants::initialize_from_runtime() {
3878 BarrierSet* bs = BarrierSet::barrier_set();
3879 if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
3880 CardTableBarrierSet* ctbs = ((CardTableBarrierSet*)bs);
3881 _aot_runtime_constants._grain_shift = ctbs->grain_shift();
3882 _aot_runtime_constants._card_shift = ctbs->card_shift();
3883 }
3884 }
3885
3886 AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
3887
3888 address AOTRuntimeConstants::_field_addresses_list[] = {
3889 grain_shift_address(),
3890 card_shift_address(),
3891 nullptr
3892 };
3893
3894
3895 void AOTCodeCache::wait_for_no_nmethod_readers() {
3896 while (true) {
3897 int cur = Atomic::load(&_nmethod_readers);
3898 int upd = -(cur + 1);
3899 if (cur >= 0 && Atomic::cmpxchg(&_nmethod_readers, cur, upd) == cur) {
3900 // Success, no new readers should appear.
3901 break;
3902 }
3903 }
3904
3905 // Now wait for all readers to leave.
3906 SpinYield w;
3907 while (Atomic::load(&_nmethod_readers) != -1) {
3908 w.wait();
3909 }
3910 }
3911
3912 AOTCodeCache::ReadingMark::ReadingMark() {
3913 while (true) {
3914 int cur = Atomic::load(&_nmethod_readers);
3915 if (cur < 0) {
3916 // Cache is already closed, cannot proceed.
3917 _failed = true;
3918 return;
3919 }
3920 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3921 // Successfully recorded ourselves as entered.
3922 _failed = false;
3923 return;
3924 }
3925 }
3926 }
3927
3928 AOTCodeCache::ReadingMark::~ReadingMark() {
3929 if (_failed) {
3930 return;
3931 }
3932 while (true) {
3933 int cur = Atomic::load(&_nmethod_readers);
3934 if (cur > 0) {
3935 // Cache is open, we are counting down towards 0.
3936 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur - 1) == cur) {
3937 return;
3938 }
3939 } else {
3940 // Cache is closed, we are counting up towards -1.
3941 if (Atomic::cmpxchg(&_nmethod_readers, cur, cur + 1) == cur) {
3942 return;
3943 }
3944 }
3945 }
3946 }
3947
3948 void AOTCodeCache::print_timers_on(outputStream* st) {
3949 if (is_using_code()) {
3950 st->print_cr (" AOT Code Load Time: %7.3f s", _t_totalLoad.seconds());
3951 st->print_cr (" nmethod register: %7.3f s", _t_totalRegister.seconds());
3952 st->print_cr (" find cached code: %7.3f s", _t_totalFind.seconds());
3953 }
3954 if (is_dumping_code()) {
3955 st->print_cr (" AOT Code Store Time: %7.3f s", _t_totalStore.seconds());
3956 }
3957 }
3958
3959 AOTCodeStats AOTCodeStats::add_aot_code_stats(AOTCodeStats stats1, AOTCodeStats stats2) {
3960 AOTCodeStats result;
3961 for (int kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3962 result.ccstats._kind_cnt[kind] = stats1.entry_count(kind) + stats2.entry_count(kind);
3963 }
3964
3965 for (int lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
3966 result.ccstats._nmethod_cnt[lvl] = stats1.nmethod_count(lvl) + stats2.nmethod_count(lvl);
3967 }
3968 result.ccstats._clinit_barriers_cnt = stats1.clinit_barriers_count() + stats2.clinit_barriers_count();
3969 return result;
3970 }
3971
3972 void AOTCodeCache::log_stats_on_exit() {
3973 LogStreamHandle(Debug, aot, codecache, exit) log;
3974 if (log.is_enabled()) {
3975 AOTCodeStats prev_stats;
3976 AOTCodeStats current_stats;
3977 AOTCodeStats total_stats;
3978 uint max_size = 0;
3979
3980 uint load_count = (_load_header != nullptr) ? _load_header->entries_count() : 0;
3981
3982 for (uint i = 0; i < load_count; i++) {
3983 prev_stats.collect_entry_stats(&_load_entries[i]);
3984 if (max_size < _load_entries[i].size()) {
3985 max_size = _load_entries[i].size();
3986 }
3987 }
3988 for (uint i = 0; i < _store_entries_cnt; i++) {
3989 current_stats.collect_entry_stats(&_store_entries[i]);
3990 if (max_size < _store_entries[i].size()) {
3991 max_size = _store_entries[i].size();
3992 }
3993 }
3994 total_stats = AOTCodeStats::add_aot_code_stats(prev_stats, current_stats);
3995
3996 log.print_cr("Wrote %d AOTCodeEntry entries(%u max size) to AOT Code Cache",
3997 total_stats.total_count(), max_size);
3998 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
3999 if (total_stats.entry_count(kind) > 0) {
4000 log.print_cr(" %s: total=%u(old=%u+new=%u)",
4001 aot_code_entry_kind_name[kind], total_stats.entry_count(kind), prev_stats.entry_count(kind), current_stats.entry_count(kind));
4002 if (kind == AOTCodeEntry::Code) {
4003 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4004 if (total_stats.nmethod_count(lvl) > 0) {
4005 log.print_cr(" Tier %d: total=%u(old=%u+new=%u)",
4006 lvl, total_stats.nmethod_count(lvl), prev_stats.nmethod_count(lvl), current_stats.nmethod_count(lvl));
4007 }
4008 }
4009 }
4010 }
4011 }
4012 log.print_cr("Total=%u(old=%u+new=%u)", total_stats.total_count(), prev_stats.total_count(), current_stats.total_count());
4013 }
4014 }
4015
4016 static void print_helper1(outputStream* st, const char* name, int count) {
4017 if (count > 0) {
4018 st->print(" %s=%d", name, count);
4019 }
4020 }
4021
4022 void AOTCodeCache::print_statistics_on(outputStream* st) {
4023 AOTCodeCache* cache = open_for_use();
4024 if (cache != nullptr) {
4025 ReadingMark rdmk;
4026 if (rdmk.failed()) {
4027 // Cache is closed, cannot touch anything.
4028 return;
4029 }
4030
4031 uint count = cache->_load_header->entries_count();
4032 uint* search_entries = (uint*)cache->addr(cache->_load_header->entries_offset()); // [id, index]
4033 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4034
4035 AOTCodeStats stats;
4036 for (uint i = 0; i < count; i++) {
4037 stats.collect_all_stats(&load_entries[i]);
4038 }
4039
4040 for (uint kind = AOTCodeEntry::None; kind < AOTCodeEntry::Kind_count; kind++) {
4041 if (stats.entry_count(kind) > 0) {
4042 st->print(" %s:", aot_code_entry_kind_name[kind]);
4043 print_helper1(st, "total", stats.entry_count(kind));
4044 print_helper1(st, "loaded", stats.entry_loaded_count(kind));
4045 print_helper1(st, "invalidated", stats.entry_invalidated_count(kind));
4046 print_helper1(st, "failed", stats.entry_load_failed_count(kind));
4047 st->cr();
4048 }
4049 if (kind == AOTCodeEntry::Code) {
4050 for (uint lvl = CompLevel_none; lvl < AOTCompLevel_count; lvl++) {
4051 if (stats.nmethod_count(lvl) > 0) {
4052 st->print(" AOT Code T%d", lvl);
4053 print_helper1(st, "total", stats.nmethod_count(lvl));
4054 print_helper1(st, "loaded", stats.nmethod_loaded_count(lvl));
4055 print_helper1(st, "invalidated", stats.nmethod_invalidated_count(lvl));
4056 print_helper1(st, "failed", stats.nmethod_load_failed_count(lvl));
4057 if (lvl == AOTCompLevel_count-1) {
4058 print_helper1(st, "has_clinit_barriers", stats.clinit_barriers_count());
4059 }
4060 st->cr();
4061 }
4062 }
4063 }
4064 }
4065 LogStreamHandle(Debug, aot, codecache, init) log;
4066 if (log.is_enabled()) {
4067 AOTCodeCache::print_unused_entries_on(&log);
4068 }
4069 LogStreamHandle(Trace, aot, codecache) aot_info;
4070 // need a lock to traverse the code cache
4071 if (aot_info.is_enabled()) {
4072 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
4073 NMethodIterator iter(NMethodIterator::all);
4074 while (iter.next()) {
4075 nmethod* nm = iter.method();
4076 if (nm->is_in_use() && !nm->is_native_method() && !nm->is_osr_method()) {
4077 aot_info.print("%5d:%c%c%c%d:", nm->compile_id(),
4078 (nm->method()->is_shared() ? 'S' : ' '),
4079 (nm->is_aot() ? 'A' : ' '),
4080 (nm->preloaded() ? 'P' : ' '),
4081 nm->comp_level());
4082 print_helper(nm, &aot_info);
4083 aot_info.print(": ");
4084 CompileTask::print(&aot_info, nm, nullptr, true /*short_form*/);
4085 LogStreamHandle(Trace, aot, codecache) aot_debug;
4086 if (aot_debug.is_enabled()) {
4087 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), nm->method()));
4088 if (mtd != nullptr) {
4089 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4090 aot_debug.print(" CTD: "); ctd->print_on(&aot_debug); aot_debug.cr();
4091 });
4092 }
4093 }
4094 }
4095 }
4096 }
4097 } else {
4098 st->print_cr("failed to map code cache");
4099 }
4100 }
4101
4102 void AOTCodeEntry::print(outputStream* st) const {
4103 st->print_cr(" AOT Code Cache entry " INTPTR_FORMAT " [kind: %d, id: " UINT32_FORMAT_X_0 ", offset: %d, size: %d, comp_level: %d, comp_id: %d, %s%s%s%s]",
4104 p2i(this), (int)_kind, _id, _offset, _size, _comp_level, _comp_id,
4105 (_not_entrant? "not_entrant" : "entrant"),
4106 (_loaded ? ", loaded" : ""),
4107 (_has_clinit_barriers ? ", has_clinit_barriers" : ""),
4108 (_for_preload ? ", for_preload" : ""));
4109 }
4110
4111 void AOTCodeCache::print_on(outputStream* st) {
4112 if (opened_cache != nullptr && opened_cache->for_use()) {
4113 ReadingMark rdmk;
4114 if (rdmk.failed()) {
4115 // Cache is closed, cannot touch anything.
4116 return;
4117 }
4118
4119 st->print_cr("\nAOT Code Cache");
4120 uint count = opened_cache->_load_header->entries_count();
4121 uint* search_entries = (uint*)opened_cache->addr(opened_cache->_load_header->entries_offset()); // [id, index]
4122 AOTCodeEntry* load_entries = (AOTCodeEntry*)(search_entries + 2 * count);
4123
4124 for (uint i = 0; i < count; i++) {
4125 int index = search_entries[2*i + 1];
4126 AOTCodeEntry* entry = &(load_entries[index]);
4127
4128 uint entry_position = entry->offset();
4129 uint name_offset = entry->name_offset() + entry_position;
4130 const char* saved_name = opened_cache->addr(name_offset);
4131
4132 st->print_cr("%4u: %10s idx:%4u Id:%u L%u size=%u '%s' %s%s%s%s",
4133 i, aot_code_entry_kind_name[entry->kind()], index, entry->id(), entry->comp_level(),
4134 entry->size(), saved_name,
4135 entry->has_clinit_barriers() ? " has_clinit_barriers" : "",
4136 entry->for_preload() ? " for_preload" : "",
4137 entry->is_loaded() ? " loaded" : "",
4138 entry->not_entrant() ? " not_entrant" : "");
4139
4140 st->print_raw(" ");
4141 AOTCodeReader reader(opened_cache, entry, nullptr);
4142 reader.print_on(st);
4143 }
4144 }
4145 }
4146
4147 void AOTCodeCache::print_unused_entries_on(outputStream* st) {
4148 LogStreamHandle(Info, aot, codecache, init) info;
4149 if (info.is_enabled()) {
4150 AOTCodeCache::iterate([&](AOTCodeEntry* entry) {
4151 if (entry->is_code() && !entry->is_loaded()) {
4152 MethodTrainingData* mtd = MethodTrainingData::find(methodHandle(Thread::current(), entry->method()));
4153 if (mtd != nullptr) {
4154 if (mtd->has_holder()) {
4155 if (mtd->holder()->method_holder()->is_initialized()) {
4156 ResourceMark rm;
4157 mtd->iterate_compiles([&](CompileTrainingData* ctd) {
4158 if ((uint)ctd->level() == entry->comp_level()) {
4159 if (ctd->init_deps_left() == 0) {
4160 nmethod* nm = mtd->holder()->code();
4161 if (nm == nullptr) {
4162 if (mtd->holder()->queued_for_compilation()) {
4163 return; // scheduled for compilation
4164 }
4165 } else if ((uint)nm->comp_level() >= entry->comp_level()) {
4166 return; // already online compiled and superseded by a more optimal method
4167 }
4168 info.print("AOT Code Cache entry not loaded: ");
4169 ctd->print_on(&info);
4170 info.cr();
4171 }
4172 }
4173 });
4174 } else {
4175 // not yet initialized
4176 }
4177 } else {
4178 info.print("AOT Code Cache entry doesn't have a holder: ");
4179 mtd->print_on(&info);
4180 info.cr();
4181 }
4182 }
4183 }
4184 });
4185 }
4186 }
4187
4188 void AOTCodeReader::print_on(outputStream* st) {
4189 uint entry_position = _entry->offset();
4190 set_read_position(entry_position);
4191
4192 // Read name
4193 uint name_offset = entry_position + _entry->name_offset();
4194 uint name_size = _entry->name_size(); // Includes '/0'
4195 const char* name = addr(name_offset);
4196
4197 st->print_cr(" name: %s", name);
4198 }
4199
|