1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "code/aotCodeCache.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/nativeInst.hpp"
32 #include "code/nmethod.inline.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "compiler/abstractCompiler.hpp"
35 #include "compiler/compilationLog.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/compileLog.hpp"
38 #include "compiler/compilerDirectives.hpp"
39 #include "compiler/compilerOracle.hpp"
40 #include "compiler/compileTask.hpp"
41 #include "compiler/directivesParser.hpp"
42 #include "compiler/disassembler.hpp"
43 #include "compiler/oopMap.inline.hpp"
44 #include "gc/shared/barrierSet.hpp"
45 #include "gc/shared/barrierSetNMethod.hpp"
46 #include "gc/shared/classUnloadingContext.hpp"
47 #include "gc/shared/collectedHeap.hpp"
48 #include "interpreter/bytecode.inline.hpp"
49 #include "jvm.h"
50 #include "logging/log.hpp"
51 #include "logging/logStream.hpp"
52 #include "memory/allocation.inline.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "memory/universe.hpp"
55 #include "oops/access.inline.hpp"
56 #include "oops/klass.inline.hpp"
57 #include "oops/method.inline.hpp"
58 #include "oops/methodData.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "oops/weakHandle.inline.hpp"
61 #include "prims/jvmtiImpl.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "runtime/atomicAccess.hpp"
65 #include "runtime/continuation.hpp"
66 #include "runtime/deoptimization.hpp"
67 #include "runtime/flags/flagSetting.hpp"
68 #include "runtime/frame.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/jniHandles.inline.hpp"
71 #include "runtime/orderAccess.hpp"
72 #include "runtime/os.hpp"
73 #include "runtime/safepointVerifiers.hpp"
74 #include "runtime/serviceThread.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/signature.hpp"
77 #include "runtime/threadWXSetters.inline.hpp"
78 #include "runtime/vmThread.hpp"
79 #include "utilities/align.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/dtrace.hpp"
82 #include "utilities/events.hpp"
83 #include "utilities/globalDefinitions.hpp"
84 #include "utilities/hashTable.hpp"
85 #include "utilities/xmlstream.hpp"
86 #if INCLUDE_JVMCI
87 #include "jvmci/jvmciRuntime.hpp"
88 #endif
89
90 #ifdef DTRACE_ENABLED
91
92 // Only bother with this argument setup if dtrace is available
93
94 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
95 { \
96 Method* m = (method); \
97 if (m != nullptr) { \
98 Symbol* klass_name = m->klass_name(); \
99 Symbol* name = m->name(); \
100 Symbol* signature = m->signature(); \
101 HOTSPOT_COMPILED_METHOD_UNLOAD( \
102 (char *) klass_name->bytes(), klass_name->utf8_length(), \
103 (char *) name->bytes(), name->utf8_length(), \
104 (char *) signature->bytes(), signature->utf8_length()); \
105 } \
106 }
107
108 #else // ndef DTRACE_ENABLED
109
110 #define DTRACE_METHOD_UNLOAD_PROBE(method)
111
112 #endif
113
114 // Cast from int value to narrow type
115 #define CHECKED_CAST(result, T, thing) \
116 result = static_cast<T>(thing); \
117 guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
118
119 //---------------------------------------------------------------------------------
120 // NMethod statistics
121 // They are printed under various flags, including:
122 // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
123 // (In the latter two cases, they like other stats are printed to the log only.)
124
125 #ifndef PRODUCT
126 // These variables are put into one block to reduce relocations
127 // and make it simpler to print from the debugger.
128 struct java_nmethod_stats_struct {
129 uint nmethod_count;
130 uint total_nm_size;
131 uint total_immut_size;
132 uint total_mut_size;
133 uint relocation_size;
134 uint consts_size;
135 uint insts_size;
136 uint stub_size;
137 uint oops_size;
138 uint metadata_size;
139 uint dependencies_size;
140 uint nul_chk_table_size;
141 uint handler_table_size;
142 uint scopes_pcs_size;
143 uint scopes_data_size;
144 #if INCLUDE_JVMCI
145 uint speculations_size;
146 uint jvmci_data_size;
147 #endif
148
149 void note_nmethod(nmethod* nm) {
150 nmethod_count += 1;
151 total_nm_size += nm->size();
152 total_immut_size += nm->immutable_data_size();
153 total_mut_size += nm->mutable_data_size();
154 relocation_size += nm->relocation_size();
155 consts_size += nm->consts_size();
156 insts_size += nm->insts_size();
157 stub_size += nm->stub_size();
158 oops_size += nm->oops_size();
159 metadata_size += nm->metadata_size();
160 scopes_data_size += nm->scopes_data_size();
161 scopes_pcs_size += nm->scopes_pcs_size();
162 dependencies_size += nm->dependencies_size();
163 handler_table_size += nm->handler_table_size();
164 nul_chk_table_size += nm->nul_chk_table_size();
165 #if INCLUDE_JVMCI
166 speculations_size += nm->speculations_size();
167 jvmci_data_size += nm->jvmci_data_size();
168 #endif
169 }
170 void print_nmethod_stats(const char* name) {
171 if (nmethod_count == 0) return;
172 tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
173 uint total_size = total_nm_size + total_immut_size + total_mut_size;
174 if (total_nm_size != 0) {
175 tty->print_cr(" total size = %u (100%%)", total_size);
176 tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
177 }
178 uint header_size = (uint)(nmethod_count * sizeof(nmethod));
179 if (nmethod_count != 0) {
180 tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
181 }
182 if (consts_size != 0) {
183 tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
184 }
185 if (insts_size != 0) {
186 tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
187 }
188 if (stub_size != 0) {
189 tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
190 }
191 if (oops_size != 0) {
192 tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
193 }
194 if (total_mut_size != 0) {
195 tty->print_cr(" mutable data = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size);
196 }
197 if (relocation_size != 0) {
198 tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size);
199 }
200 if (metadata_size != 0) {
201 tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size);
202 }
203 #if INCLUDE_JVMCI
204 if (jvmci_data_size != 0) {
205 tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size);
206 }
207 #endif
208 if (total_immut_size != 0) {
209 tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
210 }
211 if (dependencies_size != 0) {
212 tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
213 }
214 if (nul_chk_table_size != 0) {
215 tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
216 }
217 if (handler_table_size != 0) {
218 tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
219 }
220 if (scopes_pcs_size != 0) {
221 tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
222 }
223 if (scopes_data_size != 0) {
224 tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
225 }
226 #if INCLUDE_JVMCI
227 if (speculations_size != 0) {
228 tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
229 }
230 #endif
231 }
232 };
233
234 struct native_nmethod_stats_struct {
235 uint native_nmethod_count;
236 uint native_total_size;
237 uint native_relocation_size;
238 uint native_insts_size;
239 uint native_oops_size;
240 uint native_metadata_size;
241 void note_native_nmethod(nmethod* nm) {
242 native_nmethod_count += 1;
243 native_total_size += nm->size();
244 native_relocation_size += nm->relocation_size();
245 native_insts_size += nm->insts_size();
246 native_oops_size += nm->oops_size();
247 native_metadata_size += nm->metadata_size();
248 }
249 void print_native_nmethod_stats() {
250 if (native_nmethod_count == 0) return;
251 tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
252 if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size);
253 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size);
254 if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size);
255 if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size);
256 if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size);
257 }
258 };
259
260 struct pc_nmethod_stats_struct {
261 uint pc_desc_init; // number of initialization of cache (= number of caches)
262 uint pc_desc_queries; // queries to nmethod::find_pc_desc
263 uint pc_desc_approx; // number of those which have approximate true
264 uint pc_desc_repeats; // number of _pc_descs[0] hits
265 uint pc_desc_hits; // number of LRU cache hits
266 uint pc_desc_tests; // total number of PcDesc examinations
267 uint pc_desc_searches; // total number of quasi-binary search steps
268 uint pc_desc_adds; // number of LUR cache insertions
269
270 void print_pc_stats() {
271 tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query",
272 pc_desc_queries,
273 (double)(pc_desc_tests + pc_desc_searches)
274 / pc_desc_queries);
275 tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
276 pc_desc_init,
277 pc_desc_queries, pc_desc_approx,
278 pc_desc_repeats, pc_desc_hits,
279 pc_desc_tests, pc_desc_searches, pc_desc_adds);
280 }
281 };
282
283 #ifdef COMPILER1
284 static java_nmethod_stats_struct c1_java_nmethod_stats;
285 #endif
286 #ifdef COMPILER2
287 static java_nmethod_stats_struct c2_java_nmethod_stats;
288 #endif
289 #if INCLUDE_JVMCI
290 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
291 #endif
292 static java_nmethod_stats_struct unknown_java_nmethod_stats;
293
294 static native_nmethod_stats_struct native_nmethod_stats;
295 static pc_nmethod_stats_struct pc_nmethod_stats;
296
297 static void note_java_nmethod(nmethod* nm) {
298 #ifdef COMPILER1
299 if (nm->is_compiled_by_c1()) {
300 c1_java_nmethod_stats.note_nmethod(nm);
301 } else
302 #endif
303 #ifdef COMPILER2
304 if (nm->is_compiled_by_c2()) {
305 c2_java_nmethod_stats.note_nmethod(nm);
306 } else
307 #endif
308 #if INCLUDE_JVMCI
309 if (nm->is_compiled_by_jvmci()) {
310 jvmci_java_nmethod_stats.note_nmethod(nm);
311 } else
312 #endif
313 {
314 unknown_java_nmethod_stats.note_nmethod(nm);
315 }
316 }
317 #endif // !PRODUCT
318
319 //---------------------------------------------------------------------------------
320
321
322 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
323 assert(pc != nullptr, "Must be non null");
324 assert(exception.not_null(), "Must be non null");
325 assert(handler != nullptr, "Must be non null");
326
327 _count = 0;
328 _exception_type = exception->klass();
329 _next = nullptr;
330 _purge_list_next = nullptr;
331
332 add_address_and_handler(pc,handler);
333 }
334
335
336 address ExceptionCache::match(Handle exception, address pc) {
337 assert(pc != nullptr,"Must be non null");
338 assert(exception.not_null(),"Must be non null");
339 if (exception->klass() == exception_type()) {
340 return (test_address(pc));
341 }
342
343 return nullptr;
344 }
345
346
347 bool ExceptionCache::match_exception_with_space(Handle exception) {
348 assert(exception.not_null(),"Must be non null");
349 if (exception->klass() == exception_type() && count() < cache_size) {
350 return true;
351 }
352 return false;
353 }
354
355
356 address ExceptionCache::test_address(address addr) {
357 int limit = count();
358 for (int i = 0; i < limit; i++) {
359 if (pc_at(i) == addr) {
360 return handler_at(i);
361 }
362 }
363 return nullptr;
364 }
365
366
367 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
368 if (test_address(addr) == handler) return true;
369
370 int index = count();
371 if (index < cache_size) {
372 set_pc_at(index, addr);
373 set_handler_at(index, handler);
374 increment_count();
375 return true;
376 }
377 return false;
378 }
379
380 ExceptionCache* ExceptionCache::next() {
381 return AtomicAccess::load(&_next);
382 }
383
384 void ExceptionCache::set_next(ExceptionCache *ec) {
385 AtomicAccess::store(&_next, ec);
386 }
387
388 //-----------------------------------------------------------------------------
389
390
391 // Helper used by both find_pc_desc methods.
392 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
393 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
394 if (!approximate) {
395 return pc->pc_offset() == pc_offset;
396 } else {
397 // Do not look before the sentinel
398 assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset");
399 return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset;
400 }
401 }
402
403 void PcDescCache::init_to(PcDesc* initial_pc_desc) {
404 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
405 // initialize the cache by filling it with benign (non-null) values
406 assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
407 "must start with a sentinel");
408 for (int i = 0; i < cache_size; i++) {
409 _pc_descs[i] = initial_pc_desc;
410 }
411 }
412
413 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
414 // Note: one might think that caching the most recently
415 // read value separately would be a win, but one would be
416 // wrong. When many threads are updating it, the cache
417 // line it's in would bounce between caches, negating
418 // any benefit.
419
420 // In order to prevent race conditions do not load cache elements
421 // repeatedly, but use a local copy:
422 PcDesc* res;
423
424 // Step one: Check the most recently added value.
425 res = _pc_descs[0];
426 assert(res != nullptr, "PcDesc cache should be initialized already");
427
428 // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
429 if (approximate && match_desc(res, pc_offset, approximate)) {
430 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
431 return res;
432 }
433
434 // Step two: Check the rest of the LRU cache.
435 for (int i = 1; i < cache_size; ++i) {
436 res = _pc_descs[i];
437 if (res->pc_offset() < 0) break; // optimization: skip empty cache
438 if (match_desc(res, pc_offset, approximate)) {
439 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
440 return res;
441 }
442 }
443
444 // Report failure.
445 return nullptr;
446 }
447
448 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
449 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
450 // Update the LRU cache by shifting pc_desc forward.
451 for (int i = 0; i < cache_size; i++) {
452 PcDesc* next = _pc_descs[i];
453 _pc_descs[i] = pc_desc;
454 pc_desc = next;
455 }
456 }
457
458 // adjust pcs_size so that it is a multiple of both oopSize and
459 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
460 // of oopSize, then 2*sizeof(PcDesc) is)
461 static int adjust_pcs_size(int pcs_size) {
462 int nsize = align_up(pcs_size, oopSize);
463 if ((nsize % sizeof(PcDesc)) != 0) {
464 nsize = pcs_size + sizeof(PcDesc);
465 }
466 assert((nsize % oopSize) == 0, "correct alignment");
467 return nsize;
468 }
469
470 // Returns a string version of the method state.
471 const char* nmethod::state() const {
472 int state = get_state();
473 switch (state) {
474 case not_installed:
475 return "not installed";
476 case in_use:
477 return "in use";
478 case not_entrant:
479 return "not_entrant";
480 default:
481 fatal("unexpected method state: %d", state);
482 return nullptr;
483 }
484 }
485
486 void nmethod::set_deoptimized_done() {
487 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
488 if (_deoptimization_status != deoptimize_done) { // can't go backwards
489 AtomicAccess::store(&_deoptimization_status, deoptimize_done);
490 }
491 }
492
493 ExceptionCache* nmethod::exception_cache_acquire() const {
494 return AtomicAccess::load_acquire(&_exception_cache);
495 }
496
497 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
498 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
499 assert(new_entry != nullptr,"Must be non null");
500 assert(new_entry->next() == nullptr, "Must be null");
501
502 for (;;) {
503 ExceptionCache *ec = exception_cache();
504 if (ec != nullptr) {
505 Klass* ex_klass = ec->exception_type();
506 if (!ex_klass->is_loader_alive()) {
507 // We must guarantee that entries are not inserted with new next pointer
508 // edges to ExceptionCache entries with dead klasses, due to bad interactions
509 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
510 // the head pointer forward to the first live ExceptionCache, so that the new
511 // next pointers always point at live ExceptionCaches, that are not removed due
512 // to concurrent ExceptionCache cleanup.
513 ExceptionCache* next = ec->next();
514 if (AtomicAccess::cmpxchg(&_exception_cache, ec, next) == ec) {
515 CodeCache::release_exception_cache(ec);
516 }
517 continue;
518 }
519 ec = exception_cache();
520 if (ec != nullptr) {
521 new_entry->set_next(ec);
522 }
523 }
524 if (AtomicAccess::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
525 return;
526 }
527 }
528 }
529
530 void nmethod::clean_exception_cache() {
531 // For each nmethod, only a single thread may call this cleanup function
532 // at the same time, whether called in STW cleanup or concurrent cleanup.
533 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
534 // then a single writer may contend with cleaning up the head pointer to the
535 // first ExceptionCache node that has a Klass* that is alive. That is fine,
536 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
537 // And the concurrent writers do not clean up next pointers, only the head.
538 // Also note that concurrent readers will walk through Klass* pointers that are not
539 // alive. That does not cause ABA problems, because Klass* is deleted after
540 // a handshake with all threads, after all stale ExceptionCaches have been
541 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
542 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
543 // That similarly implies that CAS operations on ExceptionCache entries do not
544 // suffer from ABA problems as unlinking and deletion is separated by a global
545 // handshake operation.
546 ExceptionCache* prev = nullptr;
547 ExceptionCache* curr = exception_cache_acquire();
548
549 while (curr != nullptr) {
550 ExceptionCache* next = curr->next();
551
552 if (!curr->exception_type()->is_loader_alive()) {
553 if (prev == nullptr) {
554 // Try to clean head; this is contended by concurrent inserts, that
555 // both lazily clean the head, and insert entries at the head. If
556 // the CAS fails, the operation is restarted.
557 if (AtomicAccess::cmpxchg(&_exception_cache, curr, next) != curr) {
558 prev = nullptr;
559 curr = exception_cache_acquire();
560 continue;
561 }
562 } else {
563 // It is impossible to during cleanup connect the next pointer to
564 // an ExceptionCache that has not been published before a safepoint
565 // prior to the cleanup. Therefore, release is not required.
566 prev->set_next(next);
567 }
568 // prev stays the same.
569
570 CodeCache::release_exception_cache(curr);
571 } else {
572 prev = curr;
573 }
574
575 curr = next;
576 }
577 }
578
579 // public method for accessing the exception cache
580 // These are the public access methods.
581 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
582 // We never grab a lock to read the exception cache, so we may
583 // have false negatives. This is okay, as it can only happen during
584 // the first few exception lookups for a given nmethod.
585 ExceptionCache* ec = exception_cache_acquire();
586 while (ec != nullptr) {
587 address ret_val;
588 if ((ret_val = ec->match(exception,pc)) != nullptr) {
589 return ret_val;
590 }
591 ec = ec->next();
592 }
593 return nullptr;
594 }
595
596 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
597 // There are potential race conditions during exception cache updates, so we
598 // must own the ExceptionCache_lock before doing ANY modifications. Because
599 // we don't lock during reads, it is possible to have several threads attempt
600 // to update the cache with the same data. We need to check for already inserted
601 // copies of the current data before adding it.
602
603 MutexLocker ml(ExceptionCache_lock);
604 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
605
606 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
607 target_entry = new ExceptionCache(exception,pc,handler);
608 add_exception_cache_entry(target_entry);
609 }
610 }
611
612 // private method for handling exception cache
613 // These methods are private, and used to manipulate the exception cache
614 // directly.
615 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
616 ExceptionCache* ec = exception_cache_acquire();
617 while (ec != nullptr) {
618 if (ec->match_exception_with_space(exception)) {
619 return ec;
620 }
621 ec = ec->next();
622 }
623 return nullptr;
624 }
625
626 bool nmethod::is_at_poll_return(address pc) {
627 RelocIterator iter(this, pc, pc+1);
628 while (iter.next()) {
629 if (iter.type() == relocInfo::poll_return_type)
630 return true;
631 }
632 return false;
633 }
634
635
636 bool nmethod::is_at_poll_or_poll_return(address pc) {
637 RelocIterator iter(this, pc, pc+1);
638 while (iter.next()) {
639 relocInfo::relocType t = iter.type();
640 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
641 return true;
642 }
643 return false;
644 }
645
646 void nmethod::verify_oop_relocations() {
647 // Ensure sure that the code matches the current oop values
648 RelocIterator iter(this, nullptr, nullptr);
649 while (iter.next()) {
650 if (iter.type() == relocInfo::oop_type) {
651 oop_Relocation* reloc = iter.oop_reloc();
652 if (!reloc->oop_is_immediate()) {
653 reloc->verify_oop_relocation();
654 }
655 }
656 }
657 }
658
659
660 ScopeDesc* nmethod::scope_desc_at(address pc) {
661 PcDesc* pd = pc_desc_at(pc);
662 guarantee(pd != nullptr, "scope must be present");
663 return new ScopeDesc(this, pd);
664 }
665
666 ScopeDesc* nmethod::scope_desc_near(address pc) {
667 PcDesc* pd = pc_desc_near(pc);
668 guarantee(pd != nullptr, "scope must be present");
669 return new ScopeDesc(this, pd);
670 }
671
672 address nmethod::oops_reloc_begin() const {
673 // If the method is not entrant then a JMP is plastered over the
674 // first few bytes. If an oop in the old code was there, that oop
675 // should not get GC'd. Skip the first few bytes of oops on
676 // not-entrant methods.
677 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
678 code_begin() + frame_complete_offset() >
679 verified_entry_point() + NativeJump::instruction_size)
680 {
681 // If we have a frame_complete_offset after the native jump, then there
682 // is no point trying to look for oops before that. This is a requirement
683 // for being allowed to scan oops concurrently.
684 return code_begin() + frame_complete_offset();
685 }
686
687 address low_boundary = verified_entry_point();
688 return low_boundary;
689 }
690
691 // Method that knows how to preserve outgoing arguments at call. This method must be
692 // called with a frame corresponding to a Java invoke
693 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
694 if (method() == nullptr) {
695 return;
696 }
697
698 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
699 JavaThread* thread = reg_map->thread();
700 if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
701 JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
702 return;
703 }
704
705 if (!method()->is_native()) {
706 address pc = fr.pc();
707 bool has_receiver, has_appendix;
708 Symbol* signature;
709
710 // The method attached by JIT-compilers should be used, if present.
711 // Bytecode can be inaccurate in such case.
712 Method* callee = attached_method_before_pc(pc);
713 if (callee != nullptr) {
714 has_receiver = !(callee->access_flags().is_static());
715 has_appendix = false;
716 signature = callee->signature();
717 } else {
718 SimpleScopeDesc ssd(this, pc);
719
720 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
721 has_receiver = call.has_receiver();
722 has_appendix = call.has_appendix();
723 signature = call.signature();
724 }
725
726 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
727 } else if (method()->is_continuation_enter_intrinsic()) {
728 // This method only calls Continuation.enter()
729 Symbol* signature = vmSymbols::continuationEnter_signature();
730 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
731 }
732 }
733
734 Method* nmethod::attached_method(address call_instr) {
735 assert(code_contains(call_instr), "not part of the nmethod");
736 RelocIterator iter(this, call_instr, call_instr + 1);
737 while (iter.next()) {
738 if (iter.addr() == call_instr) {
739 switch(iter.type()) {
740 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
741 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
742 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
743 default: break;
744 }
745 }
746 }
747 return nullptr; // not found
748 }
749
750 Method* nmethod::attached_method_before_pc(address pc) {
751 if (NativeCall::is_call_before(pc)) {
752 NativeCall* ncall = nativeCall_before(pc);
753 return attached_method(ncall->instruction_address());
754 }
755 return nullptr; // not a call
756 }
757
758 void nmethod::clear_inline_caches() {
759 assert(SafepointSynchronize::is_at_safepoint() || (NMethodState_lock->owned_by_self() && is_not_installed()), "clearing of IC's only allowed at safepoint or when not installed");
760 RelocIterator iter(this);
761 while (iter.next()) {
762 iter.reloc()->clear_inline_cache();
763 }
764 }
765
766 #ifdef ASSERT
767 // Check class_loader is alive for this bit of metadata.
768 class CheckClass : public MetadataClosure {
769 void do_metadata(Metadata* md) {
770 Klass* klass = nullptr;
771 if (md->is_klass()) {
772 klass = ((Klass*)md);
773 } else if (md->is_method()) {
774 klass = ((Method*)md)->method_holder();
775 } else if (md->is_methodData()) {
776 klass = ((MethodData*)md)->method()->method_holder();
777 } else if (md->is_methodCounters()) {
778 klass = ((MethodCounters*)md)->method()->method_holder();
779 } else {
780 md->print();
781 ShouldNotReachHere();
782 }
783 assert(klass->is_loader_alive(), "must be alive");
784 }
785 };
786 #endif // ASSERT
787
788 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
789 template <typename CallsiteT>
790 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, bool clean_all) {
791 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
792 if (!cb->is_nmethod()) {
793 return;
794 }
795 nmethod* nm = cb->as_nmethod();
796 if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
797 callsite->set_to_clean();
798 }
799 }
800
801 // Cleans caches in nmethods that point to either classes that are unloaded
802 // or nmethods that are unloaded.
803 //
804 // Can be called either in parallel by G1 currently or after all
805 // nmethods are unloaded. Return postponed=true in the parallel case for
806 // inline caches found that point to nmethods that are not yet visited during
807 // the do_unloading walk.
808 void nmethod::unload_nmethod_caches(bool unloading_occurred) {
809 ResourceMark rm;
810
811 // Exception cache only needs to be called if unloading occurred
812 if (unloading_occurred) {
813 clean_exception_cache();
814 }
815
816 cleanup_inline_caches_impl(unloading_occurred, false);
817
818 #ifdef ASSERT
819 // Check that the metadata embedded in the nmethod is alive
820 CheckClass check_class;
821 metadata_do(&check_class);
822 #endif
823 }
824
825 void nmethod::run_nmethod_entry_barrier() {
826 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
827 if (bs_nm != nullptr) {
828 // We want to keep an invariant that nmethods found through iterations of a Thread's
829 // nmethods found in safepoints have gone through an entry barrier and are not armed.
830 // By calling this nmethod entry barrier, it plays along and acts
831 // like any other nmethod found on the stack of a thread (fewer surprises).
832 nmethod* nm = this;
833 bool alive = bs_nm->nmethod_entry_barrier(nm);
834 assert(alive, "should be alive");
835 }
836 }
837
838 // Only called by whitebox test
839 void nmethod::cleanup_inline_caches_whitebox() {
840 assert_locked_or_safepoint(CodeCache_lock);
841 CompiledICLocker ic_locker(this);
842 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
843 }
844
845 address* nmethod::orig_pc_addr(const frame* fr) {
846 return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
847 }
848
849 // Called to clean up after class unloading for live nmethods
850 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
851 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
852 ResourceMark rm;
853
854 // Find all calls in an nmethod and clear the ones that point to bad nmethods.
855 RelocIterator iter(this, oops_reloc_begin());
856 bool is_in_static_stub = false;
857 while(iter.next()) {
858
859 switch (iter.type()) {
860
861 case relocInfo::virtual_call_type:
862 if (unloading_occurred) {
863 // If class unloading occurred we first clear ICs where the cached metadata
864 // is referring to an unloaded klass or method.
865 CompiledIC_at(&iter)->clean_metadata();
866 }
867
868 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), clean_all);
869 break;
870
871 case relocInfo::opt_virtual_call_type:
872 case relocInfo::static_call_type:
873 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), clean_all);
874 break;
875
876 case relocInfo::static_stub_type: {
877 is_in_static_stub = true;
878 break;
879 }
880
881 case relocInfo::metadata_type: {
882 // Only the metadata relocations contained in static/opt virtual call stubs
883 // contains the Method* passed to c2i adapters. It is the only metadata
884 // relocation that needs to be walked, as it is the one metadata relocation
885 // that violates the invariant that all metadata relocations have an oop
886 // in the compiled method (due to deferred resolution and code patching).
887
888 // This causes dead metadata to remain in compiled methods that are not
889 // unloading. Unless these slippery metadata relocations of the static
890 // stubs are at least cleared, subsequent class redefinition operations
891 // will access potentially free memory, and JavaThread execution
892 // concurrent to class unloading may call c2i adapters with dead methods.
893 if (!is_in_static_stub) {
894 // The first metadata relocation after a static stub relocation is the
895 // metadata relocation of the static stub used to pass the Method* to
896 // c2i adapters.
897 continue;
898 }
899 is_in_static_stub = false;
900 if (is_unloading()) {
901 // If the nmethod itself is dying, then it may point at dead metadata.
902 // Nobody should follow that metadata; it is strictly unsafe.
903 continue;
904 }
905 metadata_Relocation* r = iter.metadata_reloc();
906 Metadata* md = r->metadata_value();
907 if (md != nullptr && md->is_method()) {
908 Method* method = static_cast<Method*>(md);
909 if (!method->method_holder()->is_loader_alive()) {
910 AtomicAccess::store(r->metadata_addr(), (Method*)nullptr);
911
912 if (!r->metadata_is_immediate()) {
913 r->fix_metadata_relocation();
914 }
915 }
916 }
917 break;
918 }
919
920 default:
921 break;
922 }
923 }
924 }
925
926 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
927 // Exception happened outside inline-cache check code => we are inside
928 // an active nmethod => use cpc to determine a return address
929 int exception_offset = int(pc - code_begin());
930 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
931 #ifdef ASSERT
932 if (cont_offset == 0) {
933 Thread* thread = Thread::current();
934 ResourceMark rm(thread);
935 CodeBlob* cb = CodeCache::find_blob(pc);
936 assert(cb != nullptr && cb == this, "");
937
938 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
939 stringStream ss;
940 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
941 print_on(&ss);
942 method()->print_codes_on(&ss);
943 print_code_on(&ss);
944 print_pcs_on(&ss);
945 tty->print("%s", ss.as_string()); // print all at once
946 }
947 #endif
948 if (cont_offset == 0) {
949 // Let the normal error handling report the exception
950 return nullptr;
951 }
952 if (cont_offset == exception_offset) {
953 #if INCLUDE_JVMCI
954 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
955 JavaThread *thread = JavaThread::current();
956 thread->set_jvmci_implicit_exception_pc(pc);
957 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
958 Deoptimization::Action_reinterpret));
959 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
960 #else
961 ShouldNotReachHere();
962 #endif
963 }
964 return code_begin() + cont_offset;
965 }
966
967 class HasEvolDependency : public MetadataClosure {
968 bool _has_evol_dependency;
969 public:
970 HasEvolDependency() : _has_evol_dependency(false) {}
971 void do_metadata(Metadata* md) {
972 if (md->is_method()) {
973 Method* method = (Method*)md;
974 if (method->is_old()) {
975 _has_evol_dependency = true;
976 }
977 }
978 }
979 bool has_evol_dependency() const { return _has_evol_dependency; }
980 };
981
982 bool nmethod::has_evol_metadata() {
983 // Check the metadata in relocIter and CompiledIC and also deoptimize
984 // any nmethod that has reference to old methods.
985 HasEvolDependency check_evol;
986 metadata_do(&check_evol);
987 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
988 ResourceMark rm;
989 log_debug(redefine, class, nmethod)
990 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
991 _method->method_holder()->external_name(),
992 _method->name()->as_C_string(),
993 _method->signature()->as_C_string(),
994 compile_id());
995 }
996 return check_evol.has_evol_dependency();
997 }
998
999 int nmethod::total_size() const {
1000 return
1001 consts_size() +
1002 insts_size() +
1003 stub_size() +
1004 scopes_data_size() +
1005 scopes_pcs_size() +
1006 handler_table_size() +
1007 nul_chk_table_size();
1008 }
1009
1010 const char* nmethod::compile_kind() const {
1011 if (is_osr_method()) return "osr";
1012 if (preloaded()) return "AP";
1013 if (is_aot()) return "A";
1014
1015 if (method() != nullptr && is_native_method()) {
1016 if (method()->is_continuation_native_intrinsic()) {
1017 return "cnt";
1018 }
1019 return "c2n";
1020 }
1021 return nullptr;
1022 }
1023
1024 const char* nmethod::compiler_name() const {
1025 return compilertype2name(_compiler_type);
1026 }
1027
1028 #ifdef ASSERT
1029 class CheckForOopsClosure : public OopClosure {
1030 bool _found_oop = false;
1031 public:
1032 virtual void do_oop(oop* o) { _found_oop = true; }
1033 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1034 bool found_oop() { return _found_oop; }
1035 };
1036 class CheckForMetadataClosure : public MetadataClosure {
1037 bool _found_metadata = false;
1038 Metadata* _ignore = nullptr;
1039 public:
1040 CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1041 virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1042 bool found_metadata() { return _found_metadata; }
1043 };
1044
1045 static void assert_no_oops_or_metadata(nmethod* nm) {
1046 if (nm == nullptr) return;
1047 assert(nm->oop_maps() == nullptr, "expectation");
1048
1049 CheckForOopsClosure cfo;
1050 nm->oops_do(&cfo);
1051 assert(!cfo.found_oop(), "no oops allowed");
1052
1053 // We allow an exception for the own Method, but require its class to be permanent.
1054 Method* own_method = nm->method();
1055 CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1056 nm->metadata_do(&cfm);
1057 assert(!cfm.found_metadata(), "no metadata allowed");
1058
1059 assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1060 "Method's class needs to be permanent");
1061 }
1062 #endif
1063
1064 static int required_mutable_data_size(CodeBuffer* code_buffer,
1065 int jvmci_data_size = 0) {
1066 return align_up(code_buffer->total_relocation_size(), oopSize) +
1067 align_up(code_buffer->total_metadata_size(), oopSize) +
1068 align_up(jvmci_data_size, oopSize);
1069 }
1070
1071 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1072 int compile_id,
1073 CodeBuffer *code_buffer,
1074 int vep_offset,
1075 int frame_complete,
1076 int frame_size,
1077 ByteSize basic_lock_owner_sp_offset,
1078 ByteSize basic_lock_sp_offset,
1079 OopMapSet* oop_maps,
1080 int exception_handler) {
1081 code_buffer->finalize_oop_references(method);
1082 // create nmethod
1083 nmethod* nm = nullptr;
1084 int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1085 {
1086 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1087
1088 CodeOffsets offsets;
1089 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1090 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1091 if (exception_handler != -1) {
1092 offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1093 }
1094
1095 int mutable_data_size = required_mutable_data_size(code_buffer);
1096
1097 // MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1098 // IsUnloadingBehaviour::is_unloading needs to handle them separately.
1099 bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1100 nm = new (native_nmethod_size, allow_NonNMethod_space)
1101 nmethod(method(), compiler_none, native_nmethod_size,
1102 compile_id, &offsets,
1103 code_buffer, frame_size,
1104 basic_lock_owner_sp_offset,
1105 basic_lock_sp_offset,
1106 oop_maps, mutable_data_size);
1107 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1108 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1109 }
1110
1111 if (nm != nullptr) {
1112 // verify nmethod
1113 DEBUG_ONLY(nm->verify();) // might block
1114
1115 nm->log_new_nmethod();
1116 }
1117 return nm;
1118 }
1119
1120 void nmethod::record_nmethod_dependency() {
1121 // To make dependency checking during class loading fast, record
1122 // the nmethod dependencies in the classes it is dependent on.
1123 // This allows the dependency checking code to simply walk the
1124 // class hierarchy above the loaded class, checking only nmethods
1125 // which are dependent on those classes. The slow way is to
1126 // check every nmethod for dependencies which makes it linear in
1127 // the number of methods compiled. For applications with a lot
1128 // classes the slow way is too slow.
1129 for (Dependencies::DepStream deps(this); deps.next(); ) {
1130 if (deps.type() == Dependencies::call_site_target_value) {
1131 // CallSite dependencies are managed on per-CallSite instance basis.
1132 oop call_site = deps.argument_oop(0);
1133 MethodHandles::add_dependent_nmethod(call_site, this);
1134 } else {
1135 InstanceKlass* ik = deps.context_type();
1136 if (ik == nullptr) {
1137 continue; // ignore things like evol_method
1138 }
1139 // record this nmethod as dependent on this klass
1140 ik->add_dependent_nmethod(this);
1141 }
1142 }
1143 }
1144
1145 nmethod* nmethod::new_nmethod(const methodHandle& method,
1146 int compile_id,
1147 int entry_bci,
1148 CodeOffsets* offsets,
1149 int orig_pc_offset,
1150 DebugInformationRecorder* debug_info,
1151 Dependencies* dependencies,
1152 CodeBuffer* code_buffer, int frame_size,
1153 OopMapSet* oop_maps,
1154 ExceptionHandlerTable* handler_table,
1155 ImplicitExceptionTable* nul_chk_table,
1156 AbstractCompiler* compiler,
1157 CompLevel comp_level
1158 #if INCLUDE_JVMCI
1159 , char* speculations,
1160 int speculations_len,
1161 JVMCINMethodData* jvmci_data
1162 #endif
1163 )
1164 {
1165 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1166 code_buffer->finalize_oop_references(method);
1167 // create nmethod
1168 nmethod* nm = nullptr;
1169 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1170
1171 int immutable_data_size =
1172 adjust_pcs_size(debug_info->pcs_size())
1173 + align_up((int)dependencies->size_in_bytes(), oopSize)
1174 + align_up(handler_table->size_in_bytes() , oopSize)
1175 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1176 #if INCLUDE_JVMCI
1177 + align_up(speculations_len , oopSize)
1178 #endif
1179 + align_up(debug_info->data_size() , oopSize);
1180
1181 // First, allocate space for immutable data in C heap.
1182 address immutable_data = nullptr;
1183 if (immutable_data_size > 0) {
1184 immutable_data_size += ImmutableDataRefCountSize;
1185 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1186 if (immutable_data == nullptr) {
1187 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1188 return nullptr;
1189 }
1190 }
1191
1192 int mutable_data_size = required_mutable_data_size(code_buffer
1193 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1194
1195 {
1196 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1197
1198 nm = new (nmethod_size, comp_level)
1199 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1200 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1201 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1202 handler_table, nul_chk_table, compiler, comp_level
1203 #if INCLUDE_JVMCI
1204 , speculations,
1205 speculations_len,
1206 jvmci_data
1207 #endif
1208 );
1209
1210 if (nm != nullptr) {
1211 nm->record_nmethod_dependency();
1212 NOT_PRODUCT(note_java_nmethod(nm));
1213 }
1214 }
1215 // Do verification and logging outside CodeCache_lock.
1216 if (nm != nullptr) {
1217
1218 #ifdef ASSERT
1219 LogTarget(Debug, aot, codecache, nmethod) log;
1220 if (log.is_enabled()) {
1221 LogStream out(log);
1222 out.print_cr("== new_nmethod 2");
1223 FlagSetting fs(PrintRelocations, true);
1224 nm->print_on_impl(&out);
1225 nm->decode(&out);
1226 }
1227 #endif
1228
1229 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1230 DEBUG_ONLY(nm->verify();)
1231 nm->log_new_nmethod();
1232 }
1233 return nm;
1234 }
1235
1236 nmethod* nmethod::restore(address code_cache_buffer,
1237 const methodHandle& method,
1238 int compile_id,
1239 address reloc_data,
1240 GrowableArray<Handle>& oop_list,
1241 GrowableArray<Metadata*>& metadata_list,
1242 ImmutableOopMapSet* oop_maps,
1243 address immutable_data,
1244 GrowableArray<Handle>& reloc_imm_oop_list,
1245 GrowableArray<Metadata*>& reloc_imm_metadata_list,
1246 AOTCodeReader* aot_code_reader)
1247 {
1248 CodeBlob::restore(code_cache_buffer, "nmethod", reloc_data, oop_maps);
1249 nmethod* nm = (nmethod*)code_cache_buffer;
1250 nm->set_method(method());
1251 nm->_compile_id = compile_id;
1252 nm->set_immutable_data(immutable_data);
1253 nm->copy_values(&oop_list);
1254 nm->copy_values(&metadata_list);
1255
1256 aot_code_reader->fix_relocations(nm, &reloc_imm_oop_list, &reloc_imm_metadata_list);
1257
1258 #ifndef PRODUCT
1259 nm->asm_remarks().init();
1260 aot_code_reader->read_asm_remarks(nm->asm_remarks(), /* use_string_table */ false);
1261 nm->dbg_strings().init();
1262 aot_code_reader->read_dbg_strings(nm->dbg_strings(), /* use_string_table */ false);
1263 #endif
1264
1265 // Flush the code block
1266 ICache::invalidate_range(nm->code_begin(), nm->code_size());
1267
1268 // Create cache after PcDesc data is copied - it will be used to initialize cache
1269 nm->_pc_desc_container = new PcDescContainer(nm->scopes_pcs_begin());
1270
1271 nm->set_aot_code_entry(aot_code_reader->aot_code_entry());
1272
1273 nm->post_init();
1274 return nm;
1275 }
1276
1277 nmethod* nmethod::new_nmethod(nmethod* archived_nm,
1278 const methodHandle& method,
1279 AbstractCompiler* compiler,
1280 int compile_id,
1281 address reloc_data,
1282 GrowableArray<Handle>& oop_list,
1283 GrowableArray<Metadata*>& metadata_list,
1284 ImmutableOopMapSet* oop_maps,
1285 address immutable_data,
1286 GrowableArray<Handle>& reloc_imm_oop_list,
1287 GrowableArray<Metadata*>& reloc_imm_metadata_list,
1288 AOTCodeReader* aot_code_reader)
1289 {
1290 nmethod* nm = nullptr;
1291 int nmethod_size = archived_nm->size();
1292 // create nmethod
1293 {
1294 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1295 address code_cache_buffer = (address)CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(archived_nm->comp_level()));
1296 if (code_cache_buffer != nullptr) {
1297 nm = archived_nm->restore(code_cache_buffer,
1298 method,
1299 compile_id,
1300 reloc_data,
1301 oop_list,
1302 metadata_list,
1303 oop_maps,
1304 immutable_data,
1305 reloc_imm_oop_list,
1306 reloc_imm_metadata_list,
1307 aot_code_reader);
1308 nm->record_nmethod_dependency();
1309 NOT_PRODUCT(note_java_nmethod(nm));
1310 }
1311 }
1312 // Do verification and logging outside CodeCache_lock.
1313 if (nm != nullptr) {
1314 #ifdef ASSERT
1315 LogTarget(Debug, aot, codecache, nmethod) log;
1316 if (log.is_enabled()) {
1317 LogStream out(log);
1318 out.print_cr("== new_nmethod 2");
1319 FlagSetting fs(PrintRelocations, true);
1320 nm->print_on_impl(&out);
1321 nm->decode(&out);
1322 }
1323 #endif
1324 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1325 DEBUG_ONLY(nm->verify();)
1326 nm->log_new_nmethod();
1327 }
1328 return nm;
1329 }
1330
1331 // Fill in default values for various fields
1332 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1333 // avoid uninitialized fields, even for short time periods
1334 _exception_cache = nullptr;
1335 _gc_data = nullptr;
1336 _oops_do_mark_link = nullptr;
1337 _compiled_ic_data = nullptr;
1338
1339 _is_unloading_state = 0;
1340 _state = not_installed;
1341
1342 _has_unsafe_access = 0;
1343 _has_wide_vectors = 0;
1344 _has_monitors = 0;
1345 _has_scoped_access = 0;
1346 _has_flushed_dependencies = 0;
1347 _is_unlinked = 0;
1348 _load_reported = 0; // jvmti state
1349 _preloaded = 0;
1350 _has_clinit_barriers = 0;
1351
1352 _used = false;
1353 _deoptimization_status = not_marked;
1354
1355 // SECT_CONSTS is first in code buffer so the offset should be 0.
1356 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1357 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1358
1359 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1360
1361 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1362 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1363
1364 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1365 }
1366
1367 // Post initialization
1368 void nmethod::post_init() {
1369 clear_unloading_state();
1370
1371 finalize_relocations();
1372
1373 Universe::heap()->register_nmethod(this);
1374 DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
1375
1376 CodeCache::commit(this);
1377 }
1378
1379 // For native wrappers
1380 nmethod::nmethod(
1381 Method* method,
1382 CompilerType type,
1383 int nmethod_size,
1384 int compile_id,
1385 CodeOffsets* offsets,
1386 CodeBuffer* code_buffer,
1387 int frame_size,
1388 ByteSize basic_lock_owner_sp_offset,
1389 ByteSize basic_lock_sp_offset,
1390 OopMapSet* oop_maps,
1391 int mutable_data_size)
1392 : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1393 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1394 _deoptimization_generation(0),
1395 _gc_epoch(CodeCache::gc_epoch()),
1396 _method(method),
1397 _native_receiver_sp_offset(basic_lock_owner_sp_offset),
1398 _native_basic_lock_sp_offset(basic_lock_sp_offset)
1399 {
1400 {
1401 DEBUG_ONLY(NoSafepointVerifier nsv;)
1402 assert_locked_or_safepoint(CodeCache_lock);
1403
1404 init_defaults(code_buffer, offsets);
1405
1406 _osr_entry_point = nullptr;
1407 _pc_desc_container = nullptr;
1408 _entry_bci = InvocationEntryBci;
1409 _compile_id = compile_id;
1410 _comp_level = CompLevel_none;
1411 _compiler_type = type;
1412 _orig_pc_offset = 0;
1413 _num_stack_arg_slots = 0;
1414
1415 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1416 // Continuation enter intrinsic
1417 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1418 } else {
1419 _exception_offset = 0;
1420 }
1421 // Native wrappers do not have deopt handlers. Make the values
1422 // something that will never match a pc like the nmethod vtable entry
1423 _deopt_handler_entry_offset = 0;
1424 _aot_code_entry = nullptr;
1425 _method_profiling_count = 0;
1426 _unwind_handler_offset = 0;
1427
1428 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1429 uint16_t metadata_size;
1430 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1431 JVMCI_ONLY( _metadata_size = metadata_size; )
1432 assert(_mutable_data_size == _relocation_size + metadata_size,
1433 "wrong mutable data size: %d != %d + %d",
1434 _mutable_data_size, _relocation_size, metadata_size);
1435
1436 // native wrapper does not have read-only data but we need unique not null address
1437 _immutable_data = blob_end();
1438 _immutable_data_size = 0;
1439 _nul_chk_table_offset = 0;
1440 _handler_table_offset = 0;
1441 _scopes_pcs_offset = 0;
1442 _scopes_data_offset = 0;
1443 #if INCLUDE_JVMCI
1444 _speculations_offset = 0;
1445 #endif
1446 _immutable_data_ref_count_offset = 0;
1447
1448 code_buffer->copy_code_and_locs_to(this);
1449 code_buffer->copy_values_to(this);
1450
1451 post_init();
1452 }
1453
1454 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1455 ttyLocker ttyl; // keep the following output all in one block
1456 // This output goes directly to the tty, not the compiler log.
1457 // To enable tools to match it up with the compilation activity,
1458 // be sure to tag this tty output with the compile ID.
1459 if (xtty != nullptr) {
1460 xtty->begin_head("print_native_nmethod");
1461 xtty->method(_method);
1462 xtty->stamp();
1463 xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1464 }
1465 // Print the header part, then print the requested information.
1466 // This is both handled in decode2(), called via print_code() -> decode()
1467 if (PrintNativeNMethods) {
1468 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1469 print_code();
1470 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1471 #if defined(SUPPORT_DATA_STRUCTS)
1472 if (AbstractDisassembler::show_structs()) {
1473 if (oop_maps != nullptr) {
1474 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1475 oop_maps->print_on(tty);
1476 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1477 }
1478 }
1479 #endif
1480 } else {
1481 print(); // print the header part only.
1482 }
1483 #if defined(SUPPORT_DATA_STRUCTS)
1484 if (AbstractDisassembler::show_structs()) {
1485 if (PrintRelocations) {
1486 print_relocations_on(tty);
1487 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1488 }
1489 }
1490 #endif
1491 if (xtty != nullptr) {
1492 xtty->tail("print_native_nmethod");
1493 }
1494 }
1495 }
1496
1497
1498 nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm._header_size)
1499 {
1500
1501 if (nm._oop_maps != nullptr) {
1502 _oop_maps = nm._oop_maps->clone();
1503 } else {
1504 _oop_maps = nullptr;
1505 }
1506
1507 _size = nm._size;
1508 _relocation_size = nm._relocation_size;
1509 _content_offset = nm._content_offset;
1510 _code_offset = nm._code_offset;
1511 _data_offset = nm._data_offset;
1512 _frame_size = nm._frame_size;
1513
1514 S390_ONLY( _ctable_offset = nm._ctable_offset; )
1515
1516 _header_size = nm._header_size;
1517 _frame_complete_offset = nm._frame_complete_offset;
1518
1519 _kind = nm._kind;
1520
1521 _caller_must_gc_arguments = nm._caller_must_gc_arguments;
1522
1523 #ifndef PRODUCT
1524 _asm_remarks.share(nm._asm_remarks);
1525 _dbg_strings.share(nm._dbg_strings);
1526 #endif
1527
1528 // Allocate memory and copy mutable data to C heap
1529 _mutable_data_size = nm._mutable_data_size;
1530 if (_mutable_data_size > 0) {
1531 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
1532 if (_mutable_data == nullptr) {
1533 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for mutable data");
1534 }
1535 memcpy(mutable_data_begin(), nm.mutable_data_begin(), nm.mutable_data_size());
1536 } else {
1537 _mutable_data = nullptr;
1538 }
1539
1540 _deoptimization_generation = 0;
1541 _gc_epoch = CodeCache::gc_epoch();
1542 _method = nm._method;
1543 _osr_link = nullptr;
1544
1545 _exception_cache = nullptr;
1546 _gc_data = nullptr;
1547 _oops_do_mark_nmethods = nullptr;
1548 _oops_do_mark_link = nullptr;
1549 _compiled_ic_data = nullptr;
1550
1551 if (nm._osr_entry_point != nullptr) {
1552 _osr_entry_point = (nm._osr_entry_point - (address) &nm) + (address) this;
1553 } else {
1554 _osr_entry_point = nullptr;
1555 }
1556
1557 _entry_offset = nm._entry_offset;
1558 _verified_entry_offset = nm._verified_entry_offset;
1559 _entry_bci = nm._entry_bci;
1560 _immutable_data_size = nm._immutable_data_size;
1561
1562 _skipped_instructions_size = nm._skipped_instructions_size;
1563 _stub_offset = nm._stub_offset;
1564 _exception_offset = nm._exception_offset;
1565 _deopt_handler_entry_offset = nm._deopt_handler_entry_offset;
1566 _unwind_handler_offset = nm._unwind_handler_offset;
1567 _num_stack_arg_slots = nm._num_stack_arg_slots;
1568 _oops_size = nm._oops_size;
1569 #if INCLUDE_JVMCI
1570 _metadata_size = nm._metadata_size;
1571 #endif
1572 _nul_chk_table_offset = nm._nul_chk_table_offset;
1573 _handler_table_offset = nm._handler_table_offset;
1574 _scopes_pcs_offset = nm._scopes_pcs_offset;
1575 _scopes_data_offset = nm._scopes_data_offset;
1576 #if INCLUDE_JVMCI
1577 _speculations_offset = nm._speculations_offset;
1578 #endif
1579 _immutable_data_ref_count_offset = nm._immutable_data_ref_count_offset;
1580
1581 // Increment number of references to immutable data to share it between nmethods
1582 if (_immutable_data_size > 0) {
1583 _immutable_data = nm._immutable_data;
1584 inc_immutable_data_ref_count();
1585 } else {
1586 _immutable_data = blob_end();
1587 }
1588
1589 _orig_pc_offset = nm._orig_pc_offset;
1590 _compile_id = nm._compile_id;
1591 _comp_level = nm._comp_level;
1592 _compiler_type = nm._compiler_type;
1593 _is_unloading_state = nm._is_unloading_state;
1594 _state = not_installed;
1595
1596 _has_unsafe_access = nm._has_unsafe_access;
1597 _has_wide_vectors = nm._has_wide_vectors;
1598 _has_monitors = nm._has_monitors;
1599 _has_scoped_access = nm._has_scoped_access;
1600 _has_flushed_dependencies = nm._has_flushed_dependencies;
1601 _is_unlinked = nm._is_unlinked;
1602 _load_reported = nm._load_reported;
1603
1604 _deoptimization_status = nm._deoptimization_status;
1605
1606 if (nm._pc_desc_container != nullptr) {
1607 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1608 } else {
1609 _pc_desc_container = nullptr;
1610 }
1611
1612 // Copy nmethod contents excluding header
1613 // - Constant part (doubles, longs and floats used in nmethod)
1614 // - Code part:
1615 // - Code body
1616 // - Exception handler
1617 // - Stub code
1618 // - OOP table
1619 memcpy(consts_begin(), nm.consts_begin(), nm.data_end() - nm.consts_begin());
1620
1621 // Fix relocation
1622 RelocIterator iter(this);
1623 CodeBuffer src(&nm);
1624 CodeBuffer dst(this);
1625 while (iter.next()) {
1626 #ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
1627 // After an nmethod is moved, some direct call sites may end up out of range.
1628 // CallRelocation::fix_relocation_after_move() assumes the target is always
1629 // reachable and does not check branch range. Calling it without range checks
1630 // could cause us to write an offset too large for the instruction.
1631 //
1632 // If a call site has a trampoline, we skip the normal call relocation. The
1633 // associated trampoline_stub_Relocation will handle the call and the
1634 // trampoline, including range checks and updating the branch as needed.
1635 //
1636 // If no trampoline exists, we can assume the call target is always
1637 // reachable and therefore within direct branch range, so calling
1638 // CallRelocation::fix_relocation_after_move() is safe.
1639 if (iter.reloc()->is_call()) {
1640 address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), this);
1641 if (trampoline != nullptr) {
1642 continue;
1643 }
1644 }
1645 #endif
1646
1647 iter.reloc()->fix_relocation_after_move(&src, &dst);
1648 }
1649
1650 {
1651 MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
1652 clear_inline_caches();
1653 }
1654
1655 post_init();
1656 }
1657
1658 nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
1659 assert(NMethodRelocation, "must enable use of function");
1660
1661 // Locks required to be held by caller to ensure the nmethod
1662 // is not modified or purged from code cache during relocation
1663 assert_lock_strong(CodeCache_lock);
1664 assert_lock_strong(Compile_lock);
1665 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1666
1667 if (!is_relocatable()) {
1668 return nullptr;
1669 }
1670
1671 run_nmethod_entry_barrier();
1672 nmethod* nm_copy = new (size(), code_blob_type) nmethod(*this);
1673
1674 if (nm_copy == nullptr) {
1675 return nullptr;
1676 }
1677
1678 // To make dependency checking during class loading fast, record
1679 // the nmethod dependencies in the classes it is dependent on.
1680 // This allows the dependency checking code to simply walk the
1681 // class hierarchy above the loaded class, checking only nmethods
1682 // which are dependent on those classes. The slow way is to
1683 // check every nmethod for dependencies which makes it linear in
1684 // the number of methods compiled. For applications with a lot
1685 // classes the slow way is too slow.
1686 for (Dependencies::DepStream deps(nm_copy); deps.next(); ) {
1687 if (deps.type() == Dependencies::call_site_target_value) {
1688 // CallSite dependencies are managed on per-CallSite instance basis.
1689 oop call_site = deps.argument_oop(0);
1690 MethodHandles::add_dependent_nmethod(call_site, nm_copy);
1691 } else {
1692 InstanceKlass* ik = deps.context_type();
1693 if (ik == nullptr) {
1694 continue; // ignore things like evol_method
1695 }
1696 // record this nmethod as dependent on this klass
1697 ik->add_dependent_nmethod(nm_copy);
1698 }
1699 }
1700
1701 MutexLocker ml_NMethodState_lock(NMethodState_lock, Mutex::_no_safepoint_check_flag);
1702
1703 // Verify the nm we copied from is still valid
1704 if (!is_marked_for_deoptimization() && is_in_use()) {
1705 assert(method() != nullptr && method()->code() == this, "should be if is in use");
1706
1707 // Attempt to start using the copy
1708 if (nm_copy->make_in_use()) {
1709 ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size());
1710
1711 methodHandle mh(Thread::current(), nm_copy->method());
1712 nm_copy->method()->set_code(mh, nm_copy);
1713
1714 make_not_entrant(InvalidationReason::RELOCATED);
1715
1716 nm_copy->post_compiled_method_load_event();
1717
1718 nm_copy->log_relocated_nmethod(this);
1719
1720 return nm_copy;
1721 }
1722 }
1723
1724 nm_copy->make_not_used();
1725
1726 return nullptr;
1727 }
1728
1729 bool nmethod::is_relocatable() {
1730 if (!is_java_method()) {
1731 return false;
1732 }
1733
1734 if (!is_in_use()) {
1735 return false;
1736 }
1737
1738 if (is_osr_method()) {
1739 return false;
1740 }
1741
1742 if (is_marked_for_deoptimization()) {
1743 return false;
1744 }
1745
1746 #if INCLUDE_JVMCI
1747 if (jvmci_nmethod_data() != nullptr && jvmci_nmethod_data()->has_mirror()) {
1748 return false;
1749 }
1750 #endif
1751
1752 if (is_unloading()) {
1753 return false;
1754 }
1755
1756 if (has_evol_metadata()) {
1757 return false;
1758 }
1759
1760 return true;
1761 }
1762
1763 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1764 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1765 }
1766
1767 void* nmethod::operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw () {
1768 return CodeCache::allocate(nmethod_size, code_blob_type);
1769 }
1770
1771 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1772 // Try MethodNonProfiled and MethodProfiled.
1773 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1774 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1775 // Try NonNMethod or give up.
1776 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1777 }
1778
1779 // For normal JIT compiled code
1780 nmethod::nmethod(
1781 Method* method,
1782 CompilerType type,
1783 int nmethod_size,
1784 int immutable_data_size,
1785 int mutable_data_size,
1786 int compile_id,
1787 int entry_bci,
1788 address immutable_data,
1789 CodeOffsets* offsets,
1790 int orig_pc_offset,
1791 DebugInformationRecorder* debug_info,
1792 Dependencies* dependencies,
1793 CodeBuffer *code_buffer,
1794 int frame_size,
1795 OopMapSet* oop_maps,
1796 ExceptionHandlerTable* handler_table,
1797 ImplicitExceptionTable* nul_chk_table,
1798 AbstractCompiler* compiler,
1799 CompLevel comp_level
1800 #if INCLUDE_JVMCI
1801 , char* speculations,
1802 int speculations_len,
1803 JVMCINMethodData* jvmci_data
1804 #endif
1805 )
1806 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1807 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1808 _deoptimization_generation(0),
1809 _gc_epoch(CodeCache::gc_epoch()),
1810 _method(method),
1811 _osr_link(nullptr)
1812 {
1813 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1814 {
1815 DEBUG_ONLY(NoSafepointVerifier nsv;)
1816 assert_locked_or_safepoint(CodeCache_lock);
1817
1818 init_defaults(code_buffer, offsets);
1819 _aot_code_entry = nullptr; // runtime compiled nmethod does not have AOTCodeEntry
1820 _method_profiling_count = 0;
1821
1822 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1823 _entry_bci = entry_bci;
1824 _compile_id = compile_id;
1825 _comp_level = comp_level;
1826 _compiler_type = type;
1827 _orig_pc_offset = orig_pc_offset;
1828
1829 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1830
1831 set_ctable_begin(header_begin() + content_offset());
1832
1833 #if INCLUDE_JVMCI
1834 if (compiler->is_jvmci()) {
1835 // JVMCI might not produce any stub sections
1836 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1837 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1838 } else {
1839 _exception_offset = -1;
1840 }
1841 if (offsets->value(CodeOffsets::Deopt) != -1) {
1842 _deopt_handler_entry_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
1843 } else {
1844 _deopt_handler_entry_offset = -1;
1845 }
1846 } else
1847 #endif
1848 {
1849 // Exception handler and deopt handler are in the stub section
1850 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
1851
1852 bool has_exception_handler = (offsets->value(CodeOffsets::Exceptions) != -1);
1853 assert(has_exception_handler == (compiler->type() != compiler_c2),
1854 "C2 compiler doesn't provide exception handler stub code.");
1855 if (has_exception_handler) {
1856 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1857 } else {
1858 _exception_offset = -1;
1859 }
1860
1861 _deopt_handler_entry_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
1862 }
1863 if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1864 // C1 generates UnwindHandler at the end of instructions section.
1865 // Calculate positive offset as distance between the start of stubs section
1866 // (which is also the end of instructions section) and the start of the handler.
1867 int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1868 CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1869 } else {
1870 _unwind_handler_offset = -1;
1871 }
1872
1873 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1874 uint16_t metadata_size;
1875 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1876 JVMCI_ONLY( _metadata_size = metadata_size; )
1877 int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
1878 assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,
1879 "wrong mutable data size: %d != %d + %d + %d",
1880 _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size);
1881 assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d",
1882 nmethod_size, (int)(code_end() - header_begin()));
1883
1884 _immutable_data_size = immutable_data_size;
1885 if (immutable_data_size > 0) {
1886 assert(immutable_data != nullptr, "required");
1887 _immutable_data = immutable_data;
1888 } else {
1889 // We need unique not null address
1890 _immutable_data = blob_end();
1891 }
1892 CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1893 CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1894 _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1895 _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
1896
1897 #if INCLUDE_JVMCI
1898 _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1899 _immutable_data_ref_count_offset = _speculations_offset + align_up(speculations_len, oopSize);
1900 #else
1901 _immutable_data_ref_count_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1902 #endif
1903 DEBUG_ONLY( int immutable_data_end_offset = _immutable_data_ref_count_offset + ImmutableDataRefCountSize; )
1904 assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1905 immutable_data_end_offset, immutable_data_size);
1906
1907 // Copy code and relocation info
1908 code_buffer->copy_code_and_locs_to(this);
1909 // Copy oops and metadata
1910 code_buffer->copy_values_to(this);
1911 dependencies->copy_to(this);
1912 // Copy PcDesc and ScopeDesc data
1913 debug_info->copy_to(this);
1914
1915 // Create cache after PcDesc data is copied - it will be used to initialize cache
1916 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1917
1918 #if INCLUDE_JVMCI
1919 if (compiler->is_jvmci()) {
1920 // Initialize the JVMCINMethodData object inlined into nm
1921 jvmci_nmethod_data()->copy(jvmci_data);
1922 }
1923 #endif
1924
1925 // Copy contents of ExceptionHandlerTable to nmethod
1926 handler_table->copy_to(this);
1927 nul_chk_table->copy_to(this);
1928
1929 #if INCLUDE_JVMCI
1930 // Copy speculations to nmethod
1931 if (speculations_size() != 0) {
1932 memcpy(speculations_begin(), speculations, speculations_len);
1933 }
1934 #endif
1935 init_immutable_data_ref_count();
1936
1937 post_init();
1938
1939 // we use the information of entry points to find out if a method is
1940 // static or non static
1941 assert(compiler->is_c2() || compiler->is_jvmci() ||
1942 _method->is_static() == (entry_point() == verified_entry_point()),
1943 " entry points must be same for static methods and vice versa");
1944 }
1945 }
1946
1947 // Print a short set of xml attributes to identify this nmethod. The
1948 // output should be embedded in some other element.
1949 void nmethod::log_identity(xmlStream* log) const {
1950 assert(log->inside_attrs_or_error(), "printing attributes");
1951 log->print(" compile_id='%d'", compile_id());
1952 const char* nm_kind = compile_kind();
1953 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1954 log->print(" compiler='%s'", compiler_name());
1955 if (TieredCompilation) {
1956 log->print(" compile_level='%d'", comp_level());
1957 }
1958 #if INCLUDE_JVMCI
1959 if (jvmci_nmethod_data() != nullptr) {
1960 const char* jvmci_name = jvmci_nmethod_data()->name();
1961 if (jvmci_name != nullptr) {
1962 log->print(" jvmci_mirror_name='");
1963 log->text("%s", jvmci_name);
1964 log->print("'");
1965 }
1966 }
1967 #endif
1968 }
1969
1970
1971 #define LOG_OFFSET(log, name) \
1972 if (p2i(name##_end()) - p2i(name##_begin())) \
1973 log->print(" " XSTR(name) "_offset='%zd'" , \
1974 p2i(name##_begin()) - p2i(this))
1975
1976
1977 void nmethod::log_new_nmethod() const {
1978 if (LogCompilation && xtty != nullptr) {
1979 ttyLocker ttyl;
1980 xtty->begin_elem("nmethod");
1981 log_identity(xtty);
1982 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1983 xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1984
1985 LOG_OFFSET(xtty, relocation);
1986 LOG_OFFSET(xtty, consts);
1987 LOG_OFFSET(xtty, insts);
1988 LOG_OFFSET(xtty, stub);
1989 LOG_OFFSET(xtty, scopes_data);
1990 LOG_OFFSET(xtty, scopes_pcs);
1991 LOG_OFFSET(xtty, dependencies);
1992 LOG_OFFSET(xtty, handler_table);
1993 LOG_OFFSET(xtty, nul_chk_table);
1994 LOG_OFFSET(xtty, oops);
1995 LOG_OFFSET(xtty, metadata);
1996
1997 xtty->method(method());
1998 xtty->stamp();
1999 xtty->end_elem();
2000 }
2001 }
2002
2003
2004 void nmethod::log_relocated_nmethod(nmethod* original) const {
2005 if (LogCompilation && xtty != nullptr) {
2006 ttyLocker ttyl;
2007 xtty->begin_elem("relocated nmethod");
2008 log_identity(xtty);
2009 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
2010
2011 const char* original_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(original));
2012 xtty->print(" original_address='" INTPTR_FORMAT "'", p2i(original));
2013 xtty->print(" original_code_heap='%s'", original_code_heap_name);
2014
2015 const char* new_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(this));
2016 xtty->print(" new_address='" INTPTR_FORMAT "'", p2i(this));
2017 xtty->print(" new_code_heap='%s'", new_code_heap_name);
2018
2019 LOG_OFFSET(xtty, relocation);
2020 LOG_OFFSET(xtty, consts);
2021 LOG_OFFSET(xtty, insts);
2022 LOG_OFFSET(xtty, stub);
2023 LOG_OFFSET(xtty, scopes_data);
2024 LOG_OFFSET(xtty, scopes_pcs);
2025 LOG_OFFSET(xtty, dependencies);
2026 LOG_OFFSET(xtty, handler_table);
2027 LOG_OFFSET(xtty, nul_chk_table);
2028 LOG_OFFSET(xtty, oops);
2029 LOG_OFFSET(xtty, metadata);
2030
2031 xtty->method(method());
2032 xtty->stamp();
2033 xtty->end_elem();
2034 }
2035 }
2036
2037 #undef LOG_OFFSET
2038
2039
2040 // Print out more verbose output usually for a newly created nmethod.
2041 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const {
2042 if (st != nullptr) {
2043 ttyLocker ttyl;
2044 if (WizardMode) {
2045 CompileTask::print(st, this, msg, /*short_form:*/ true);
2046 st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
2047 } else {
2048 CompileTask::print(st, this, msg, /*short_form:*/ false);
2049 }
2050 }
2051 }
2052
2053 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
2054 bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
2055 if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
2056 print_nmethod(printnmethods);
2057 }
2058 }
2059
2060 void nmethod::print_nmethod(bool printmethod) {
2061 ttyLocker ttyl; // keep the following output all in one block
2062 if (xtty != nullptr) {
2063 xtty->begin_head("print_nmethod");
2064 log_identity(xtty);
2065 xtty->stamp();
2066 xtty->end_head();
2067 }
2068 // Print the header part, then print the requested information.
2069 // This is both handled in decode2().
2070 if (printmethod) {
2071 ResourceMark m;
2072 if (is_compiled_by_c1()) {
2073 tty->cr();
2074 tty->print_cr("============================= C1-compiled nmethod ==============================");
2075 }
2076 if (is_compiled_by_jvmci()) {
2077 tty->cr();
2078 tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
2079 }
2080 tty->print_cr("----------------------------------- Assembly -----------------------------------");
2081 decode2(tty);
2082 #if defined(SUPPORT_DATA_STRUCTS)
2083 if (AbstractDisassembler::show_structs()) {
2084 // Print the oops from the underlying CodeBlob as well.
2085 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2086 print_oops(tty);
2087 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2088 print_metadata(tty);
2089 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2090 print_pcs_on(tty);
2091 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2092 if (oop_maps() != nullptr) {
2093 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
2094 oop_maps()->print_on(tty);
2095 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2096 }
2097 }
2098 #endif
2099 } else {
2100 print(); // print the header part only.
2101 }
2102
2103 #if defined(SUPPORT_DATA_STRUCTS)
2104 if (AbstractDisassembler::show_structs()) {
2105 methodHandle mh(Thread::current(), _method);
2106 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
2107 print_scopes();
2108 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2109 }
2110 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
2111 print_relocations_on(tty);
2112 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2113 }
2114 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
2115 print_dependencies_on(tty);
2116 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2117 }
2118 if (printmethod || PrintExceptionHandlers) {
2119 print_handler_table();
2120 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2121 print_nul_chk_table();
2122 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2123 }
2124
2125 if (printmethod) {
2126 print_recorded_oops();
2127 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2128 print_recorded_metadata();
2129 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2130 }
2131 }
2132 #endif
2133
2134 if (xtty != nullptr) {
2135 xtty->tail("print_nmethod");
2136 }
2137 }
2138
2139
2140 // Promote one word from an assembly-time handle to a live embedded oop.
2141 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
2142 if (handle == nullptr ||
2143 // As a special case, IC oops are initialized to 1 or -1.
2144 handle == (jobject) Universe::non_oop_word()) {
2145 *(void**)dest = handle;
2146 } else {
2147 *dest = JNIHandles::resolve_non_null(handle);
2148 }
2149 }
2150
2151 void nmethod::copy_values(GrowableArray<Handle>* array) {
2152 int length = array->length();
2153 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
2154 oop* dest = oops_begin();
2155 for (int index = 0 ; index < length; index++) {
2156 dest[index] = array->at(index)();
2157 }
2158 }
2159
2160 // Have to have the same name because it's called by a template
2161 void nmethod::copy_values(GrowableArray<jobject>* array) {
2162 int length = array->length();
2163 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
2164 oop* dest = oops_begin();
2165 for (int index = 0 ; index < length; index++) {
2166 initialize_immediate_oop(&dest[index], array->at(index));
2167 }
2168
2169 // Now we can fix up all the oops in the code. We need to do this
2170 // in the code because the assembler uses jobjects as placeholders.
2171 // The code and relocations have already been initialized by the
2172 // CodeBlob constructor, so it is valid even at this early point to
2173 // iterate over relocations and patch the code.
2174 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
2175 }
2176
2177 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
2178 int length = array->length();
2179 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
2180 Metadata** dest = metadata_begin();
2181 for (int index = 0 ; index < length; index++) {
2182 dest[index] = array->at(index);
2183 }
2184 }
2185
2186 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
2187 // re-patch all oop-bearing instructions, just in case some oops moved
2188 RelocIterator iter(this, begin, end);
2189 while (iter.next()) {
2190 if (iter.type() == relocInfo::oop_type) {
2191 oop_Relocation* reloc = iter.oop_reloc();
2192 if (initialize_immediates && reloc->oop_is_immediate()) {
2193 oop* dest = reloc->oop_addr();
2194 jobject obj = *reinterpret_cast<jobject*>(dest);
2195 initialize_immediate_oop(dest, obj);
2196 }
2197 // Refresh the oop-related bits of this instruction.
2198 reloc->fix_oop_relocation();
2199 } else if (iter.type() == relocInfo::metadata_type) {
2200 metadata_Relocation* reloc = iter.metadata_reloc();
2201 reloc->fix_metadata_relocation();
2202 }
2203 }
2204 }
2205
2206 void nmethod::create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2207 RelocIterator iter(this);
2208 while (iter.next()) {
2209 if (iter.type() == relocInfo::oop_type) {
2210 oop_Relocation* reloc = iter.oop_reloc();
2211 if (reloc->oop_is_immediate()) {
2212 oop dest = reloc->oop_value();
2213 Handle h(thread, dest);
2214 oop_list.append(h);
2215 }
2216 } else if (iter.type() == relocInfo::metadata_type) {
2217 metadata_Relocation* reloc = iter.metadata_reloc();
2218 if (reloc->metadata_is_immediate()) {
2219 Metadata* m = reloc->metadata_value();
2220 metadata_list.append(m);
2221 }
2222 }
2223 }
2224 }
2225
2226 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
2227 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
2228 intptr_t cbaddr = (intptr_t) nm;
2229 intptr_t offset = ((intptr_t) pc) - cbaddr;
2230
2231 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
2232 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
2233 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
2234 } else if (!nop->patch(oopmap_slot, offset)) {
2235 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
2236 }
2237 }
2238
2239 void nmethod::finalize_relocations() {
2240 NoSafepointVerifier nsv;
2241
2242 GrowableArray<NativeMovConstReg*> virtual_call_data;
2243
2244 // Make sure that post call nops fill in nmethod offsets eagerly so
2245 // we don't have to race with deoptimization
2246 RelocIterator iter(this);
2247 while (iter.next()) {
2248 if (iter.type() == relocInfo::virtual_call_type) {
2249 virtual_call_Relocation* r = iter.virtual_call_reloc();
2250 NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
2251 virtual_call_data.append(value);
2252 } else if (iter.type() == relocInfo::post_call_nop_type) {
2253 post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
2254 address pc = reloc->addr();
2255 install_post_call_nop_displacement(this, pc);
2256 }
2257 }
2258
2259 if (virtual_call_data.length() > 0) {
2260 // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
2261 _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
2262 CompiledICData* next_data = _compiled_ic_data;
2263
2264 for (NativeMovConstReg* value : virtual_call_data) {
2265 value->set_data((intptr_t)next_data);
2266 next_data++;
2267 }
2268 }
2269 }
2270
2271 void nmethod::make_deoptimized() {
2272 if (!Continuations::enabled()) {
2273 // Don't deopt this again.
2274 set_deoptimized_done();
2275 return;
2276 }
2277
2278 assert(method() == nullptr || can_be_deoptimized(), "");
2279
2280 CompiledICLocker ml(this);
2281 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2282
2283 // If post call nops have been already patched, we can just bail-out.
2284 if (has_been_deoptimized()) {
2285 return;
2286 }
2287
2288 ResourceMark rm;
2289 RelocIterator iter(this, oops_reloc_begin());
2290
2291 while (iter.next()) {
2292
2293 switch (iter.type()) {
2294 case relocInfo::virtual_call_type: {
2295 CompiledIC *ic = CompiledIC_at(&iter);
2296 address pc = ic->end_of_call();
2297 NativePostCallNop* nop = nativePostCallNop_at(pc);
2298 if (nop != nullptr) {
2299 nop->make_deopt();
2300 }
2301 assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2302 break;
2303 }
2304 case relocInfo::static_call_type:
2305 case relocInfo::opt_virtual_call_type: {
2306 CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
2307 address pc = csc->end_of_call();
2308 NativePostCallNop* nop = nativePostCallNop_at(pc);
2309 //tty->print_cr(" - static pc %p", pc);
2310 if (nop != nullptr) {
2311 nop->make_deopt();
2312 }
2313 // We can't assert here, there are some calls to stubs / runtime
2314 // that have reloc data and doesn't have a post call NOP.
2315 //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2316 break;
2317 }
2318 default:
2319 break;
2320 }
2321 }
2322 // Don't deopt this again.
2323 set_deoptimized_done();
2324 }
2325
2326 void nmethod::verify_clean_inline_caches() {
2327 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2328
2329 ResourceMark rm;
2330 RelocIterator iter(this, oops_reloc_begin());
2331 while(iter.next()) {
2332 switch(iter.type()) {
2333 case relocInfo::virtual_call_type: {
2334 CompiledIC *ic = CompiledIC_at(&iter);
2335 CodeBlob *cb = CodeCache::find_blob(ic->destination());
2336 assert(cb != nullptr, "destination not in CodeBlob?");
2337 nmethod* nm = cb->as_nmethod_or_null();
2338 if (nm != nullptr) {
2339 // Verify that inline caches pointing to bad nmethods are clean
2340 if (!nm->is_in_use() || nm->is_unloading()) {
2341 assert(ic->is_clean(), "IC should be clean");
2342 }
2343 }
2344 break;
2345 }
2346 case relocInfo::static_call_type:
2347 case relocInfo::opt_virtual_call_type: {
2348 CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
2349 CodeBlob *cb = CodeCache::find_blob(cdc->destination());
2350 assert(cb != nullptr, "destination not in CodeBlob?");
2351 nmethod* nm = cb->as_nmethod_or_null();
2352 if (nm != nullptr) {
2353 // Verify that inline caches pointing to bad nmethods are clean
2354 if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
2355 assert(cdc->is_clean(), "IC should be clean");
2356 }
2357 }
2358 break;
2359 }
2360 default:
2361 break;
2362 }
2363 }
2364 }
2365
2366 void nmethod::mark_as_maybe_on_stack() {
2367 AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch());
2368 }
2369
2370 bool nmethod::is_maybe_on_stack() {
2371 // If the condition below is true, it means that the nmethod was found to
2372 // be alive the previous completed marking cycle.
2373 return AtomicAccess::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
2374 }
2375
2376 void nmethod::inc_decompile_count() {
2377 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
2378 // Could be gated by ProfileTraps, but do not bother...
2379 #if INCLUDE_JVMCI
2380 if (jvmci_skip_profile_deopt()) {
2381 return;
2382 }
2383 #endif
2384 Method* m = method();
2385 if (m == nullptr) return;
2386 MethodData* mdo = m->method_data();
2387 if (mdo == nullptr) return;
2388 // There is a benign race here. See comments in methodData.hpp.
2389 mdo->inc_decompile_count();
2390 }
2391
2392 void nmethod::inc_method_profiling_count() {
2393 AtomicAccess::inc(&_method_profiling_count);
2394 }
2395
2396 uint64_t nmethod::method_profiling_count() {
2397 return _method_profiling_count;
2398 }
2399
2400 bool nmethod::try_transition(signed char new_state_int) {
2401 signed char new_state = new_state_int;
2402 assert_lock_strong(NMethodState_lock);
2403 signed char old_state = _state;
2404 if (old_state >= new_state) {
2405 // Ensure monotonicity of transitions.
2406 return false;
2407 }
2408 AtomicAccess::store(&_state, new_state);
2409 return true;
2410 }
2411
2412 void nmethod::invalidate_osr_method() {
2413 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
2414 // Remove from list of active nmethods
2415 if (method() != nullptr) {
2416 method()->method_holder()->remove_osr_nmethod(this);
2417 }
2418 }
2419
2420 void nmethod::log_state_change(InvalidationReason invalidation_reason) const {
2421 if (LogCompilation) {
2422 if (xtty != nullptr) {
2423 ttyLocker ttyl; // keep the following output all in one block
2424 xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
2425 os::current_thread_id(), invalidation_reason_to_string(invalidation_reason));
2426 log_identity(xtty);
2427 xtty->stamp();
2428 xtty->end_elem();
2429 }
2430 }
2431
2432 ResourceMark rm;
2433 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
2434 ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason));
2435
2436 CompileTask::print_ul(this, ss.freeze());
2437 if (PrintCompilation) {
2438 print_on_with_msg(tty, ss.freeze());
2439 }
2440 }
2441
2442 void nmethod::unlink_from_method() {
2443 if (method() != nullptr) {
2444 method()->unlink_code(this);
2445 }
2446 }
2447
2448 // Invalidate code
2449 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry) {
2450 // This can be called while the system is already at a safepoint which is ok
2451 NoSafepointVerifier nsv;
2452
2453 if (is_unloading()) {
2454 // If the nmethod is unloading, then it is already not entrant through
2455 // the nmethod entry barriers. No need to do anything; GC will unload it.
2456 return false;
2457 }
2458
2459 if (AtomicAccess::load(&_state) == not_entrant) {
2460 // Avoid taking the lock if already in required state.
2461 // This is safe from races because the state is an end-state,
2462 // which the nmethod cannot back out of once entered.
2463 // No need for fencing either.
2464 return false;
2465 }
2466
2467 {
2468 // Enter critical section. Does not block for safepoint.
2469 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2470
2471 if (AtomicAccess::load(&_state) == not_entrant) {
2472 // another thread already performed this transition so nothing
2473 // to do, but return false to indicate this.
2474 return false;
2475 }
2476
2477 if (is_osr_method()) {
2478 // This logic is equivalent to the logic below for patching the
2479 // verified entry point of regular methods.
2480 // this effectively makes the osr nmethod not entrant
2481 invalidate_osr_method();
2482 } else {
2483 // The caller can be calling the method statically or through an inline
2484 // cache call.
2485 BarrierSet::barrier_set()->barrier_set_nmethod()->make_not_entrant(this);
2486 }
2487
2488 if (update_recompile_counts()) {
2489 // Mark the method as decompiled.
2490 inc_decompile_count();
2491 }
2492
2493 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2494 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2495 // If nmethod entry barriers are not supported, we won't mark
2496 // nmethods as on-stack when they become on-stack. So we
2497 // degrade to a less accurate flushing strategy, for now.
2498 mark_as_maybe_on_stack();
2499 }
2500
2501 // Change state
2502 bool success = try_transition(not_entrant);
2503 assert(success, "Transition can't fail");
2504
2505 // Log the transition once
2506 log_state_change(invalidation_reason);
2507
2508 // Remove nmethod from method.
2509 unlink_from_method();
2510
2511 if (!keep_aot_entry) {
2512 // Keep AOT code if it was simply replaced
2513 // otherwise make it not entrant too.
2514 AOTCodeCache::invalidate(_aot_code_entry);
2515 }
2516
2517 CompileBroker::log_not_entrant(this);
2518 } // leave critical region under NMethodState_lock
2519
2520 #if INCLUDE_JVMCI
2521 // Invalidate can't occur while holding the NMethodState_lock
2522 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2523 if (nmethod_data != nullptr) {
2524 nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason);
2525 }
2526 #endif
2527
2528 #ifdef ASSERT
2529 if (is_osr_method() && method() != nullptr) {
2530 // Make sure osr nmethod is invalidated, i.e. not on the list
2531 bool found = method()->method_holder()->remove_osr_nmethod(this);
2532 assert(!found, "osr nmethod should have been invalidated");
2533 }
2534 #endif
2535
2536 return true;
2537 }
2538
2539 // For concurrent GCs, there must be a handshake between unlink and flush
2540 void nmethod::unlink() {
2541 if (is_unlinked()) {
2542 // Already unlinked.
2543 return;
2544 }
2545
2546 flush_dependencies();
2547
2548 // unlink_from_method will take the NMethodState_lock.
2549 // In this case we don't strictly need it when unlinking nmethods from
2550 // the Method, because it is only concurrently unlinked by
2551 // the entry barrier, which acquires the per nmethod lock.
2552 unlink_from_method();
2553
2554 if (is_osr_method()) {
2555 invalidate_osr_method();
2556 }
2557
2558 #if INCLUDE_JVMCI
2559 // Clear the link between this nmethod and a HotSpotNmethod mirror
2560 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2561 if (nmethod_data != nullptr) {
2562 nmethod_data->invalidate_nmethod_mirror(this, is_cold() ?
2563 nmethod::InvalidationReason::UNLOADING_COLD :
2564 nmethod::InvalidationReason::UNLOADING);
2565 }
2566 #endif
2567
2568 // Post before flushing as jmethodID is being used
2569 post_compiled_method_unload();
2570
2571 // Register for flushing when it is safe. For concurrent class unloading,
2572 // that would be after the unloading handshake, and for STW class unloading
2573 // that would be when getting back to the VM thread.
2574 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2575 }
2576
2577 void nmethod::purge(bool unregister_nmethod) {
2578
2579 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2580
2581 // completely deallocate this method
2582 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, compile_kind(), p2i(this));
2583
2584 LogTarget(Debug, codecache) lt;
2585 if (lt.is_enabled()) {
2586 ResourceMark rm;
2587 LogStream ls(lt);
2588 const char* method_name = method()->name()->as_C_string();
2589 const size_t codecache_capacity = CodeCache::capacity()/1024;
2590 const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024;
2591 ls.print("Flushing %s nmethod %6d/" INTPTR_FORMAT ", level=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". "
2592 "Cache capacity: %zuKb, free space: %zuKb. method %s (%s)",
2593 compile_kind(), _compile_id, p2i(this), _comp_level, is_cold(), _gc_epoch, CodeCache::cold_gc_count(),
2594 codecache_capacity, codecache_free_space, method_name, compiler_name());
2595 }
2596
2597 // We need to deallocate any ExceptionCache data.
2598 // Note that we do not need to grab the nmethod lock for this, it
2599 // better be thread safe if we're disposing of it!
2600 ExceptionCache* ec = exception_cache();
2601 while(ec != nullptr) {
2602 ExceptionCache* next = ec->next();
2603 delete ec;
2604 ec = next;
2605 }
2606 if (_pc_desc_container != nullptr) {
2607 delete _pc_desc_container;
2608 }
2609 if (_compiled_ic_data != nullptr) {
2610 delete[] _compiled_ic_data;
2611 }
2612
2613 if (_immutable_data != blob_end() && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) {
2614 // Free memory if this was the last nmethod referencing immutable data
2615 if (dec_immutable_data_ref_count() == 0) {
2616 os::free(_immutable_data);
2617 }
2618
2619 _immutable_data = blob_end(); // Valid not null address
2620 }
2621
2622 if (unregister_nmethod) {
2623 Universe::heap()->unregister_nmethod(this);
2624 }
2625 CodeCache::unregister_old_nmethod(this);
2626
2627 JVMCI_ONLY( _metadata_size = 0; )
2628 CodeBlob::purge();
2629 }
2630
2631 oop nmethod::oop_at(int index) const {
2632 if (index == 0) {
2633 return nullptr;
2634 }
2635
2636 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2637 return bs_nm->oop_load_no_keepalive(this, index);
2638 }
2639
2640 oop nmethod::oop_at_phantom(int index) const {
2641 if (index == 0) {
2642 return nullptr;
2643 }
2644
2645 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2646 return bs_nm->oop_load_phantom(this, index);
2647 }
2648
2649 //
2650 // Notify all classes this nmethod is dependent on that it is no
2651 // longer dependent.
2652
2653 void nmethod::flush_dependencies() {
2654 if (!has_flushed_dependencies()) {
2655 set_has_flushed_dependencies(true);
2656 for (Dependencies::DepStream deps(this); deps.next(); ) {
2657 if (deps.type() == Dependencies::call_site_target_value) {
2658 // CallSite dependencies are managed on per-CallSite instance basis.
2659 oop call_site = deps.argument_oop(0);
2660 MethodHandles::clean_dependency_context(call_site);
2661 } else {
2662 InstanceKlass* ik = deps.context_type();
2663 if (ik == nullptr) {
2664 continue; // ignore things like evol_method
2665 }
2666 // During GC liveness of dependee determines class that needs to be updated.
2667 // The GC may clean dependency contexts concurrently and in parallel.
2668 ik->clean_dependency_context();
2669 }
2670 }
2671 }
2672 }
2673
2674 void nmethod::post_compiled_method(CompileTask* task) {
2675 task->mark_success();
2676 task->set_nm_content_size(content_size());
2677 task->set_nm_insts_size(insts_size());
2678 task->set_nm_total_size(total_size());
2679
2680 // task->is_aot_load() is true only for loaded AOT code.
2681 // nmethod::_aot_code_entry is set for loaded and stored AOT code
2682 // to invalidate the entry when nmethod is deoptimized.
2683 // VerifyAOTCode is option to not store in archive AOT code.
2684 guarantee((_aot_code_entry != nullptr) || !task->is_aot_load() || VerifyAOTCode, "sanity");
2685
2686 // JVMTI -- compiled method notification (must be done outside lock)
2687 post_compiled_method_load_event();
2688
2689 if (CompilationLog::log() != nullptr) {
2690 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2691 }
2692
2693 const DirectiveSet* directive = task->directive();
2694 maybe_print_nmethod(directive);
2695 }
2696
2697 #if INCLUDE_CDS
2698 static GrowableArrayCHeap<nmethod*, mtClassShared>* _delayed_compiled_method_load_events = nullptr;
2699
2700 void nmethod::add_delayed_compiled_method_load_event(nmethod* nm) {
2701 precond(CDSConfig::is_using_aot_linked_classes());
2702 precond(!ServiceThread::has_started());
2703
2704 // We are still in single threaded stage of VM bootstrap. No need to lock.
2705 if (_delayed_compiled_method_load_events == nullptr) {
2706 _delayed_compiled_method_load_events = new GrowableArrayCHeap<nmethod*, mtClassShared>();
2707 }
2708 _delayed_compiled_method_load_events->append(nm);
2709 }
2710
2711 void nmethod::post_delayed_compiled_method_load_events() {
2712 precond(ServiceThread::has_started());
2713 if (_delayed_compiled_method_load_events != nullptr) {
2714 for (int i = 0; i < _delayed_compiled_method_load_events->length(); i++) {
2715 nmethod* nm = _delayed_compiled_method_load_events->at(i);
2716 nm->post_compiled_method_load_event();
2717 }
2718 delete _delayed_compiled_method_load_events;
2719 _delayed_compiled_method_load_events = nullptr;
2720 }
2721 }
2722 #endif
2723
2724 // ------------------------------------------------------------------
2725 // post_compiled_method_load_event
2726 // new method for install_code() path
2727 // Transfer information from compilation to jvmti
2728 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2729 #if INCLUDE_CDS
2730 if (!ServiceThread::has_started()) {
2731 // With AOT-linked classes, we could compile wrappers for native methods before the
2732 // ServiceThread has been started, so we must delay the events to be posted later.
2733 assert(state == nullptr, "must be");
2734 add_delayed_compiled_method_load_event(this);
2735 return;
2736 }
2737 #endif
2738
2739 // This is a bad time for a safepoint. We don't want
2740 // this nmethod to get unloaded while we're queueing the event.
2741 NoSafepointVerifier nsv;
2742
2743 Method* m = method();
2744 HOTSPOT_COMPILED_METHOD_LOAD(
2745 (char *) m->klass_name()->bytes(),
2746 m->klass_name()->utf8_length(),
2747 (char *) m->name()->bytes(),
2748 m->name()->utf8_length(),
2749 (char *) m->signature()->bytes(),
2750 m->signature()->utf8_length(),
2751 insts_begin(), insts_size());
2752
2753
2754 if (JvmtiExport::should_post_compiled_method_load()) {
2755 // Only post unload events if load events are found.
2756 set_load_reported();
2757 // If a JavaThread hasn't been passed in, let the Service thread
2758 // (which is a real Java thread) post the event
2759 JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2760 if (state == nullptr) {
2761 // Execute any barrier code for this nmethod as if it's called, since
2762 // keeping it alive looks like stack walking.
2763 run_nmethod_entry_barrier();
2764 ServiceThread::enqueue_deferred_event(&event);
2765 } else {
2766 // This enters the nmethod barrier outside in the caller.
2767 state->enqueue_event(&event);
2768 }
2769 }
2770 }
2771
2772 void nmethod::post_compiled_method_unload() {
2773 assert(_method != nullptr, "just checking");
2774 DTRACE_METHOD_UNLOAD_PROBE(method());
2775
2776 // If a JVMTI agent has enabled the CompiledMethodUnload event then
2777 // post the event. The Method* will not be valid when this is freed.
2778
2779 // Don't bother posting the unload if the load event wasn't posted.
2780 if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2781 JvmtiDeferredEvent event =
2782 JvmtiDeferredEvent::compiled_method_unload_event(
2783 method()->jmethod_id(), insts_begin());
2784 ServiceThread::enqueue_deferred_event(&event);
2785 }
2786 }
2787
2788 // Iterate over metadata calling this function. Used by RedefineClasses
2789 void nmethod::metadata_do(MetadataClosure* f) {
2790 {
2791 // Visit all immediate references that are embedded in the instruction stream.
2792 RelocIterator iter(this, oops_reloc_begin());
2793 while (iter.next()) {
2794 if (iter.type() == relocInfo::metadata_type) {
2795 metadata_Relocation* r = iter.metadata_reloc();
2796 // In this metadata, we must only follow those metadatas directly embedded in
2797 // the code. Other metadatas (oop_index>0) are seen as part of
2798 // the metadata section below.
2799 assert(1 == (r->metadata_is_immediate()) +
2800 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2801 "metadata must be found in exactly one place");
2802 if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2803 Metadata* md = r->metadata_value();
2804 if (md != _method) f->do_metadata(md);
2805 }
2806 } else if (iter.type() == relocInfo::virtual_call_type) {
2807 // Check compiledIC holders associated with this nmethod
2808 ResourceMark rm;
2809 CompiledIC *ic = CompiledIC_at(&iter);
2810 ic->metadata_do(f);
2811 }
2812 }
2813 }
2814
2815 // Visit the metadata section
2816 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2817 if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops
2818 Metadata* md = *p;
2819 f->do_metadata(md);
2820 }
2821
2822 // Visit metadata not embedded in the other places.
2823 if (_method != nullptr) f->do_metadata(_method);
2824 }
2825
2826 // Heuristic for nuking nmethods even though their oops are live.
2827 // Main purpose is to reduce code cache pressure and get rid of
2828 // nmethods that don't seem to be all that relevant any longer.
2829 bool nmethod::is_cold() {
2830 if (!MethodFlushing || is_not_installed()) {
2831 // No heuristic unloading at all
2832 return false;
2833 }
2834
2835 if (!is_maybe_on_stack() && is_not_entrant()) {
2836 // Not entrant nmethods that are not on any stack can just
2837 // be removed
2838 return true;
2839 }
2840
2841 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2842 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2843 // On platforms that don't support nmethod entry barriers, we can't
2844 // trust the temporal aspect of the gc epochs. So we can't detect
2845 // cold nmethods on such platforms.
2846 return false;
2847 }
2848
2849 if (!UseCodeCacheFlushing) {
2850 // Bail out if we don't heuristically remove nmethods
2851 return false;
2852 }
2853
2854 // Other code can be phased out more gradually after N GCs
2855 return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2856 }
2857
2858 // The _is_unloading_state encodes a tuple comprising the unloading cycle
2859 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2860 // This is the bit layout of the _is_unloading_state byte: 00000CCU
2861 // CC refers to the cycle, which has 2 bits, and U refers to the result of
2862 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2863
2864 class IsUnloadingState: public AllStatic {
2865 static const uint8_t _is_unloading_mask = 1;
2866 static const uint8_t _is_unloading_shift = 0;
2867 static const uint8_t _unloading_cycle_mask = 6;
2868 static const uint8_t _unloading_cycle_shift = 1;
2869
2870 static uint8_t set_is_unloading(uint8_t state, bool value) {
2871 state &= (uint8_t)~_is_unloading_mask;
2872 if (value) {
2873 state |= 1 << _is_unloading_shift;
2874 }
2875 assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2876 return state;
2877 }
2878
2879 static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2880 state &= (uint8_t)~_unloading_cycle_mask;
2881 state |= (uint8_t)(value << _unloading_cycle_shift);
2882 assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2883 return state;
2884 }
2885
2886 public:
2887 static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2888 static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2889
2890 static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2891 uint8_t state = 0;
2892 state = set_is_unloading(state, is_unloading);
2893 state = set_unloading_cycle(state, unloading_cycle);
2894 return state;
2895 }
2896 };
2897
2898 bool nmethod::is_unloading() {
2899 uint8_t state = AtomicAccess::load(&_is_unloading_state);
2900 bool state_is_unloading = IsUnloadingState::is_unloading(state);
2901 if (state_is_unloading) {
2902 return true;
2903 }
2904 uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2905 uint8_t current_cycle = CodeCache::unloading_cycle();
2906 if (state_unloading_cycle == current_cycle) {
2907 return false;
2908 }
2909
2910 // The IsUnloadingBehaviour is responsible for calculating if the nmethod
2911 // should be unloaded. This can be either because there is a dead oop,
2912 // or because is_cold() heuristically determines it is time to unload.
2913 state_unloading_cycle = current_cycle;
2914 state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2915 uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2916
2917 // Note that if an nmethod has dead oops, everyone will agree that the
2918 // nmethod is_unloading. However, the is_cold heuristics can yield
2919 // different outcomes, so we guard the computed result with a CAS
2920 // to ensure all threads have a shared view of whether an nmethod
2921 // is_unloading or not.
2922 uint8_t found_state = AtomicAccess::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2923
2924 if (found_state == state) {
2925 // First to change state, we win
2926 return state_is_unloading;
2927 } else {
2928 // State already set, so use it
2929 return IsUnloadingState::is_unloading(found_state);
2930 }
2931 }
2932
2933 void nmethod::clear_unloading_state() {
2934 uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2935 AtomicAccess::store(&_is_unloading_state, state);
2936 }
2937
2938
2939 // This is called at the end of the strong tracing/marking phase of a
2940 // GC to unload an nmethod if it contains otherwise unreachable
2941 // oops or is heuristically found to be not important.
2942 void nmethod::do_unloading(bool unloading_occurred) {
2943 // Make sure the oop's ready to receive visitors
2944 if (is_unloading()) {
2945 unlink();
2946 } else {
2947 unload_nmethod_caches(unloading_occurred);
2948 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2949 if (bs_nm != nullptr) {
2950 bs_nm->disarm(this);
2951 }
2952 }
2953 }
2954
2955 void nmethod::oops_do(OopClosure* f) {
2956 // Prevent extra code cache walk for platforms that don't have immediate oops.
2957 if (relocInfo::mustIterateImmediateOopsInCode()) {
2958 RelocIterator iter(this, oops_reloc_begin());
2959
2960 while (iter.next()) {
2961 if (iter.type() == relocInfo::oop_type ) {
2962 oop_Relocation* r = iter.oop_reloc();
2963 // In this loop, we must only follow those oops directly embedded in
2964 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
2965 assert(1 == (r->oop_is_immediate()) +
2966 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2967 "oop must be found in exactly one place");
2968 if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2969 f->do_oop(r->oop_addr());
2970 }
2971 }
2972 }
2973 }
2974
2975 // Scopes
2976 // This includes oop constants not inlined in the code stream.
2977 for (oop* p = oops_begin(); p < oops_end(); p++) {
2978 if (*p == Universe::non_oop_word()) continue; // skip non-oops
2979 f->do_oop(p);
2980 }
2981 }
2982
2983 void nmethod::follow_nmethod(OopIterateClosure* cl) {
2984 // Process oops in the nmethod
2985 oops_do(cl);
2986
2987 // CodeCache unloading support
2988 mark_as_maybe_on_stack();
2989
2990 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2991 bs_nm->disarm(this);
2992
2993 // There's an assumption made that this function is not used by GCs that
2994 // relocate objects, and therefore we don't call fix_oop_relocations.
2995 }
2996
2997 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2998
2999 void nmethod::oops_do_log_change(const char* state) {
3000 LogTarget(Trace, gc, nmethod) lt;
3001 if (lt.is_enabled()) {
3002 LogStream ls(lt);
3003 CompileTask::print(&ls, this, state, true /* short_form */);
3004 }
3005 }
3006
3007 bool nmethod::oops_do_try_claim() {
3008 if (oops_do_try_claim_weak_request()) {
3009 nmethod* result = oops_do_try_add_to_list_as_weak_done();
3010 assert(result == nullptr, "adding to global list as weak done must always succeed.");
3011 return true;
3012 }
3013 return false;
3014 }
3015
3016 bool nmethod::oops_do_try_claim_weak_request() {
3017 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3018
3019 if ((_oops_do_mark_link == nullptr) &&
3020 (AtomicAccess::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
3021 oops_do_log_change("oops_do, mark weak request");
3022 return true;
3023 }
3024 return false;
3025 }
3026
3027 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
3028 _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
3029 }
3030
3031 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
3032 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3033
3034 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
3035 if (old_next == nullptr) {
3036 oops_do_log_change("oops_do, mark strong done");
3037 }
3038 return old_next;
3039 }
3040
3041 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
3042 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3043 assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
3044
3045 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
3046 if (old_next == next) {
3047 oops_do_log_change("oops_do, mark strong request");
3048 }
3049 return old_next;
3050 }
3051
3052 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
3053 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3054 assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
3055
3056 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
3057 if (old_next == next) {
3058 oops_do_log_change("oops_do, mark weak done -> mark strong done");
3059 return true;
3060 }
3061 return false;
3062 }
3063
3064 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
3065 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3066
3067 assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
3068 extract_state(_oops_do_mark_link) == claim_strong_request_tag,
3069 "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
3070
3071 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
3072 // Self-loop if needed.
3073 if (old_head == nullptr) {
3074 old_head = this;
3075 }
3076 // Try to install end of list and weak done tag.
3077 if (AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
3078 oops_do_log_change("oops_do, mark weak done");
3079 return nullptr;
3080 } else {
3081 return old_head;
3082 }
3083 }
3084
3085 void nmethod::oops_do_add_to_list_as_strong_done() {
3086 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3087
3088 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
3089 // Self-loop if needed.
3090 if (old_head == nullptr) {
3091 old_head = this;
3092 }
3093 assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
3094 p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
3095
3096 oops_do_set_strong_done(old_head);
3097 }
3098
3099 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
3100 if (!oops_do_try_claim_weak_request()) {
3101 // Failed to claim for weak processing.
3102 oops_do_log_change("oops_do, mark weak request fail");
3103 return;
3104 }
3105
3106 p->do_regular_processing(this);
3107
3108 nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
3109 if (old_head == nullptr) {
3110 return;
3111 }
3112 oops_do_log_change("oops_do, mark weak done fail");
3113 // Adding to global list failed, another thread added a strong request.
3114 assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
3115 "must be but is %u", extract_state(_oops_do_mark_link));
3116
3117 oops_do_log_change("oops_do, mark weak request -> mark strong done");
3118
3119 oops_do_set_strong_done(old_head);
3120 // Do missing strong processing.
3121 p->do_remaining_strong_processing(this);
3122 }
3123
3124 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
3125 oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
3126 if (next_raw == nullptr) {
3127 p->do_regular_processing(this);
3128 oops_do_add_to_list_as_strong_done();
3129 return;
3130 }
3131 // Claim failed. Figure out why and handle it.
3132 if (oops_do_has_weak_request(next_raw)) {
3133 oops_do_mark_link* old = next_raw;
3134 // Claim failed because being weak processed (state == "weak request").
3135 // Try to request deferred strong processing.
3136 next_raw = oops_do_try_add_strong_request(old);
3137 if (next_raw == old) {
3138 // Successfully requested deferred strong processing.
3139 return;
3140 }
3141 // Failed because of a concurrent transition. No longer in "weak request" state.
3142 }
3143 if (oops_do_has_any_strong_state(next_raw)) {
3144 // Already claimed for strong processing or requested for such.
3145 return;
3146 }
3147 if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
3148 // Successfully claimed "weak done" as "strong done". Do the missing marking.
3149 p->do_remaining_strong_processing(this);
3150 return;
3151 }
3152 // Claim failed, some other thread got it.
3153 }
3154
3155 void nmethod::oops_do_marking_prologue() {
3156 assert_at_safepoint();
3157
3158 log_trace(gc, nmethod)("oops_do_marking_prologue");
3159 assert(_oops_do_mark_nmethods == nullptr, "must be empty");
3160 }
3161
3162 void nmethod::oops_do_marking_epilogue() {
3163 assert_at_safepoint();
3164
3165 nmethod* next = _oops_do_mark_nmethods;
3166 _oops_do_mark_nmethods = nullptr;
3167 if (next != nullptr) {
3168 nmethod* cur;
3169 do {
3170 cur = next;
3171 next = extract_nmethod(cur->_oops_do_mark_link);
3172 cur->_oops_do_mark_link = nullptr;
3173 DEBUG_ONLY(cur->verify_oop_relocations());
3174
3175 LogTarget(Trace, gc, nmethod) lt;
3176 if (lt.is_enabled()) {
3177 LogStream ls(lt);
3178 CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
3179 }
3180 // End if self-loop has been detected.
3181 } while (cur != next);
3182 }
3183 log_trace(gc, nmethod)("oops_do_marking_epilogue");
3184 }
3185
3186 inline bool includes(void* p, void* from, void* to) {
3187 return from <= p && p < to;
3188 }
3189
3190
3191 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
3192 assert(count >= 2, "must be sentinel values, at least");
3193
3194 #ifdef ASSERT
3195 // must be sorted and unique; we do a binary search in find_pc_desc()
3196 int prev_offset = pcs[0].pc_offset();
3197 assert(prev_offset == PcDesc::lower_offset_limit,
3198 "must start with a sentinel");
3199 for (int i = 1; i < count; i++) {
3200 int this_offset = pcs[i].pc_offset();
3201 assert(this_offset > prev_offset, "offsets must be sorted");
3202 prev_offset = this_offset;
3203 }
3204 assert(prev_offset == PcDesc::upper_offset_limit,
3205 "must end with a sentinel");
3206 #endif //ASSERT
3207
3208 int size = count * sizeof(PcDesc);
3209 assert(scopes_pcs_size() >= size, "oob");
3210 memcpy(scopes_pcs_begin(), pcs, size);
3211
3212 // Adjust the final sentinel downward.
3213 PcDesc* last_pc = &scopes_pcs_begin()[count-1];
3214 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
3215 last_pc->set_pc_offset(content_size() + 1);
3216 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
3217 // Fill any rounding gaps with copies of the last record.
3218 last_pc[1] = last_pc[0];
3219 }
3220 // The following assert could fail if sizeof(PcDesc) is not
3221 // an integral multiple of oopSize (the rounding term).
3222 // If it fails, change the logic to always allocate a multiple
3223 // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
3224 assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
3225 }
3226
3227 void nmethod::copy_scopes_data(u_char* buffer, int size) {
3228 assert(scopes_data_size() >= size, "oob");
3229 memcpy(scopes_data_begin(), buffer, size);
3230 }
3231
3232 #ifdef ASSERT
3233 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
3234 PcDesc* res = nullptr;
3235 assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
3236 "must start with a sentinel");
3237 // lower + 1 to exclude initial sentinel
3238 for (PcDesc* p = lower + 1; p < upper; p++) {
3239 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
3240 if (match_desc(p, pc_offset, approximate)) {
3241 if (res == nullptr) {
3242 res = p;
3243 } else {
3244 res = (PcDesc*) badAddress;
3245 }
3246 }
3247 }
3248 return res;
3249 }
3250 #endif
3251
3252
3253 #ifndef PRODUCT
3254 // Version of method to collect statistic
3255 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
3256 PcDesc* lower, PcDesc* upper) {
3257 ++pc_nmethod_stats.pc_desc_queries;
3258 if (approximate) ++pc_nmethod_stats.pc_desc_approx;
3259
3260 PcDesc* desc = _pc_desc_cache.last_pc_desc();
3261 assert(desc != nullptr, "PcDesc cache should be initialized already");
3262 if (desc->pc_offset() == (pc - code_begin)) {
3263 // Cached value matched
3264 ++pc_nmethod_stats.pc_desc_tests;
3265 ++pc_nmethod_stats.pc_desc_repeats;
3266 return desc;
3267 }
3268 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
3269 }
3270 #endif
3271
3272 // Finds a PcDesc with real-pc equal to "pc"
3273 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
3274 PcDesc* lower_incl, PcDesc* upper_incl) {
3275 if ((pc < code_begin) ||
3276 (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
3277 return nullptr; // PC is wildly out of range
3278 }
3279 int pc_offset = (int) (pc - code_begin);
3280
3281 // Check the PcDesc cache if it contains the desired PcDesc
3282 // (This as an almost 100% hit rate.)
3283 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
3284 if (res != nullptr) {
3285 assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
3286 return res;
3287 }
3288
3289 // Fallback algorithm: quasi-linear search for the PcDesc
3290 // Find the last pc_offset less than the given offset.
3291 // The successor must be the required match, if there is a match at all.
3292 // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
3293 PcDesc* lower = lower_incl; // this is initial sentinel
3294 PcDesc* upper = upper_incl - 1; // exclude final sentinel
3295 if (lower >= upper) return nullptr; // no PcDescs at all
3296
3297 #define assert_LU_OK \
3298 /* invariant on lower..upper during the following search: */ \
3299 assert(lower->pc_offset() < pc_offset, "sanity"); \
3300 assert(upper->pc_offset() >= pc_offset, "sanity")
3301 assert_LU_OK;
3302
3303 // Use the last successful return as a split point.
3304 PcDesc* mid = _pc_desc_cache.last_pc_desc();
3305 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3306 if (mid->pc_offset() < pc_offset) {
3307 lower = mid;
3308 } else {
3309 upper = mid;
3310 }
3311
3312 // Take giant steps at first (4096, then 256, then 16, then 1)
3313 const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
3314 const int RADIX = (1 << LOG2_RADIX);
3315 for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
3316 while ((mid = lower + step) < upper) {
3317 assert_LU_OK;
3318 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3319 if (mid->pc_offset() < pc_offset) {
3320 lower = mid;
3321 } else {
3322 upper = mid;
3323 break;
3324 }
3325 }
3326 assert_LU_OK;
3327 }
3328
3329 // Sneak up on the value with a linear search of length ~16.
3330 while (true) {
3331 assert_LU_OK;
3332 mid = lower + 1;
3333 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3334 if (mid->pc_offset() < pc_offset) {
3335 lower = mid;
3336 } else {
3337 upper = mid;
3338 break;
3339 }
3340 }
3341 #undef assert_LU_OK
3342
3343 if (match_desc(upper, pc_offset, approximate)) {
3344 assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3345 if (!Thread::current_in_asgct()) {
3346 // we don't want to modify the cache if we're in ASGCT
3347 // which is typically called in a signal handler
3348 _pc_desc_cache.add_pc_desc(upper);
3349 }
3350 return upper;
3351 } else {
3352 assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3353 return nullptr;
3354 }
3355 }
3356
3357 bool nmethod::check_dependency_on(DepChange& changes) {
3358 // What has happened:
3359 // 1) a new class dependee has been added
3360 // 2) dependee and all its super classes have been marked
3361 bool found_check = false; // set true if we are upset
3362 for (Dependencies::DepStream deps(this); deps.next(); ) {
3363 // Evaluate only relevant dependencies.
3364 if (deps.spot_check_dependency_at(changes) != nullptr) {
3365 found_check = true;
3366 NOT_DEBUG(break);
3367 }
3368 }
3369 return found_check;
3370 }
3371
3372 // Called from mark_for_deoptimization, when dependee is invalidated.
3373 bool nmethod::is_dependent_on_method(Method* dependee) {
3374 for (Dependencies::DepStream deps(this); deps.next(); ) {
3375 if (deps.type() != Dependencies::evol_method)
3376 continue;
3377 Method* method = deps.method_argument(0);
3378 if (method == dependee) return true;
3379 }
3380 return false;
3381 }
3382
3383 void nmethod_init() {
3384 // make sure you didn't forget to adjust the filler fields
3385 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
3386 }
3387
3388 // -----------------------------------------------------------------------------
3389 // Verification
3390
3391 class VerifyOopsClosure: public OopClosure {
3392 nmethod* _nm;
3393 bool _ok;
3394 public:
3395 VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
3396 bool ok() { return _ok; }
3397 virtual void do_oop(oop* p) {
3398 if (oopDesc::is_oop_or_null(*p)) return;
3399 // Print diagnostic information before calling print_nmethod().
3400 // Assertions therein might prevent call from returning.
3401 tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
3402 p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
3403 if (_ok) {
3404 _nm->print_nmethod(true);
3405 _ok = false;
3406 }
3407 }
3408 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3409 };
3410
3411 class VerifyMetadataClosure: public MetadataClosure {
3412 public:
3413 void do_metadata(Metadata* md) {
3414 if (md->is_method()) {
3415 Method* method = (Method*)md;
3416 assert(!method->is_old(), "Should not be installing old methods");
3417 }
3418 }
3419 };
3420
3421
3422 void nmethod::verify() {
3423 if (is_not_entrant())
3424 return;
3425
3426 // assert(oopDesc::is_oop(method()), "must be valid");
3427
3428 ResourceMark rm;
3429
3430 if (!CodeCache::contains(this)) {
3431 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
3432 }
3433
3434 if(is_native_method() )
3435 return;
3436
3437 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
3438 if (nm != this) {
3439 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
3440 }
3441
3442 // Verification can triggered during shutdown after AOTCodeCache is closed.
3443 // If the Scopes data is in the AOT code cache, then we should avoid verification during shutdown.
3444 if (!is_aot() || AOTCodeCache::is_on()) {
3445 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3446 if (! p->verify(this)) {
3447 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
3448 }
3449 }
3450
3451 #ifdef ASSERT
3452 #if INCLUDE_JVMCI
3453 {
3454 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
3455 ImmutableOopMapSet* oms = oop_maps();
3456 ImplicitExceptionTable implicit_table(this);
3457 for (uint i = 0; i < implicit_table.len(); i++) {
3458 int exec_offset = (int) implicit_table.get_exec_offset(i);
3459 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
3460 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
3461 bool found = false;
3462 for (int i = 0, imax = oms->count(); i < imax; i++) {
3463 if (oms->pair_at(i)->pc_offset() == exec_offset) {
3464 found = true;
3465 break;
3466 }
3467 }
3468 assert(found, "missing oopmap");
3469 }
3470 }
3471 }
3472 #endif
3473 #endif
3474 }
3475
3476 VerifyOopsClosure voc(this);
3477 oops_do(&voc);
3478 assert(voc.ok(), "embedded oops must be OK");
3479 Universe::heap()->verify_nmethod(this);
3480
3481 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
3482 nm->method()->external_name(), p2i(_oops_do_mark_link));
3483 if (!is_aot() || AOTCodeCache::is_on()) {
3484 verify_scopes();
3485 }
3486
3487 CompiledICLocker nm_verify(this);
3488 VerifyMetadataClosure vmc;
3489 metadata_do(&vmc);
3490 }
3491
3492
3493 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3494
3495 // Verify IC only when nmethod installation is finished.
3496 if (!is_not_installed()) {
3497 if (CompiledICLocker::is_safe(this)) {
3498 if (is_inline_cache) {
3499 CompiledIC_at(this, call_site);
3500 } else {
3501 CompiledDirectCall::at(call_site);
3502 }
3503 } else {
3504 CompiledICLocker ml_verify(this);
3505 if (is_inline_cache) {
3506 CompiledIC_at(this, call_site);
3507 } else {
3508 CompiledDirectCall::at(call_site);
3509 }
3510 }
3511 }
3512
3513 HandleMark hm(Thread::current());
3514
3515 PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
3516 assert(pd != nullptr, "PcDesc must exist");
3517 for (ScopeDesc* sd = new ScopeDesc(this, pd);
3518 !sd->is_top(); sd = sd->sender()) {
3519 sd->verify();
3520 }
3521 }
3522
3523 void nmethod::verify_scopes() {
3524 if( !method() ) return; // Runtime stubs have no scope
3525 if (method()->is_native()) return; // Ignore stub methods.
3526 // iterate through all interrupt point
3527 // and verify the debug information is valid.
3528 RelocIterator iter(this);
3529 while (iter.next()) {
3530 address stub = nullptr;
3531 switch (iter.type()) {
3532 case relocInfo::virtual_call_type:
3533 verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3534 break;
3535 case relocInfo::opt_virtual_call_type:
3536 stub = iter.opt_virtual_call_reloc()->static_stub();
3537 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3538 break;
3539 case relocInfo::static_call_type:
3540 stub = iter.static_call_reloc()->static_stub();
3541 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3542 break;
3543 case relocInfo::runtime_call_type:
3544 case relocInfo::runtime_call_w_cp_type: {
3545 address destination = iter.reloc()->value();
3546 // Right now there is no way to find out which entries support
3547 // an interrupt point. It would be nice if we had this
3548 // information in a table.
3549 break;
3550 }
3551 default:
3552 break;
3553 }
3554 assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3555 }
3556 }
3557
3558
3559 // -----------------------------------------------------------------------------
3560 // Printing operations
3561
3562 void nmethod::print_on_impl(outputStream* st) const {
3563 ResourceMark rm;
3564
3565 st->print("Compiled method ");
3566
3567 if (is_compiled_by_c1()) {
3568 st->print("(c1) ");
3569 } else if (is_compiled_by_c2()) {
3570 st->print("(c2) ");
3571 } else if (is_compiled_by_jvmci()) {
3572 st->print("(JVMCI) ");
3573 } else {
3574 st->print("(n/a) ");
3575 }
3576
3577 print_on_with_msg(st, nullptr);
3578
3579 if (WizardMode) {
3580 st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3581 st->print(" for method " INTPTR_FORMAT , p2i(method()));
3582 st->print(" { ");
3583 st->print_cr("%s ", state());
3584 st->print_cr("}:");
3585 }
3586 if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3587 p2i(this),
3588 p2i(this) + size(),
3589 size());
3590 if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3591 p2i(consts_begin()),
3592 p2i(consts_end()),
3593 consts_size());
3594 if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3595 p2i(insts_begin()),
3596 p2i(insts_end()),
3597 insts_size());
3598 if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3599 p2i(stub_begin()),
3600 p2i(stub_end()),
3601 stub_size());
3602 if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3603 p2i(oops_begin()),
3604 p2i(oops_end()),
3605 oops_size());
3606 if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3607 p2i(mutable_data_begin()),
3608 p2i(mutable_data_end()),
3609 mutable_data_size());
3610 if (relocation_size() > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3611 p2i(relocation_begin()),
3612 p2i(relocation_end()),
3613 relocation_size());
3614 if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3615 p2i(metadata_begin()),
3616 p2i(metadata_end()),
3617 metadata_size());
3618 #if INCLUDE_JVMCI
3619 if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3620 p2i(jvmci_data_begin()),
3621 p2i(jvmci_data_end()),
3622 jvmci_data_size());
3623 #endif
3624 if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3625 p2i(immutable_data_begin()),
3626 p2i(immutable_data_end()),
3627 immutable_data_size());
3628 if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3629 p2i(dependencies_begin()),
3630 p2i(dependencies_end()),
3631 dependencies_size());
3632 if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3633 p2i(nul_chk_table_begin()),
3634 p2i(nul_chk_table_end()),
3635 nul_chk_table_size());
3636 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3637 p2i(handler_table_begin()),
3638 p2i(handler_table_end()),
3639 handler_table_size());
3640 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3641 p2i(scopes_pcs_begin()),
3642 p2i(scopes_pcs_end()),
3643 scopes_pcs_size());
3644 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3645 p2i(scopes_data_begin()),
3646 p2i(scopes_data_end()),
3647 scopes_data_size());
3648 #if INCLUDE_JVMCI
3649 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3650 p2i(speculations_begin()),
3651 p2i(speculations_end()),
3652 speculations_size());
3653 #endif
3654 if (AOTCodeCache::is_on() && _aot_code_entry != nullptr) {
3655 _aot_code_entry->print(st);
3656 }
3657 }
3658
3659 void nmethod::print_code() {
3660 ResourceMark m;
3661 ttyLocker ttyl;
3662 // Call the specialized decode method of this class.
3663 decode(tty);
3664 }
3665
3666 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3667
3668 void nmethod::print_dependencies_on(outputStream* out) {
3669 ResourceMark rm;
3670 stringStream st;
3671 st.print_cr("Dependencies:");
3672 for (Dependencies::DepStream deps(this); deps.next(); ) {
3673 deps.print_dependency(&st);
3674 InstanceKlass* ctxk = deps.context_type();
3675 if (ctxk != nullptr) {
3676 if (ctxk->is_dependent_nmethod(this)) {
3677 st.print_cr(" [nmethod<=klass]%s", ctxk->external_name());
3678 }
3679 }
3680 deps.log_dependency(); // put it into the xml log also
3681 }
3682 out->print_raw(st.as_string());
3683 }
3684 #endif
3685
3686 #if defined(SUPPORT_DATA_STRUCTS)
3687
3688 // Print the oops from the underlying CodeBlob.
3689 void nmethod::print_oops(outputStream* st) {
3690 ResourceMark m;
3691 st->print("Oops:");
3692 if (oops_begin() < oops_end()) {
3693 st->cr();
3694 for (oop* p = oops_begin(); p < oops_end(); p++) {
3695 Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3696 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3697 if (Universe::contains_non_oop_word(p)) {
3698 st->print_cr("NON_OOP");
3699 continue; // skip non-oops
3700 }
3701 if (*p == nullptr) {
3702 st->print_cr("nullptr-oop");
3703 continue; // skip non-oops
3704 }
3705 (*p)->print_value_on(st);
3706 st->cr();
3707 }
3708 } else {
3709 st->print_cr(" <list empty>");
3710 }
3711 }
3712
3713 // Print metadata pool.
3714 void nmethod::print_metadata(outputStream* st) {
3715 ResourceMark m;
3716 st->print("Metadata:");
3717 if (metadata_begin() < metadata_end()) {
3718 st->cr();
3719 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3720 Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3721 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3722 if (*p && *p != Universe::non_oop_word()) {
3723 (*p)->print_value_on(st);
3724 }
3725 st->cr();
3726 }
3727 } else {
3728 st->print_cr(" <list empty>");
3729 }
3730 }
3731
3732 #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3733 void nmethod::print_scopes_on(outputStream* st) {
3734 // Find the first pc desc for all scopes in the code and print it.
3735 ResourceMark rm;
3736 st->print("scopes:");
3737 if (scopes_pcs_begin() < scopes_pcs_end()) {
3738 st->cr();
3739 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3740 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3741 continue;
3742
3743 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3744 while (sd != nullptr) {
3745 sd->print_on(st, p); // print output ends with a newline
3746 sd = sd->sender();
3747 }
3748 }
3749 } else {
3750 st->print_cr(" <list empty>");
3751 }
3752 }
3753 #endif
3754
3755 #ifndef PRODUCT // RelocIterator does support printing only then.
3756 void nmethod::print_relocations_on(outputStream* st) {
3757 ResourceMark m; // in case methods get printed via the debugger
3758 st->print_cr("relocations:");
3759 RelocIterator iter(this);
3760 iter.print_on(st);
3761 }
3762 #endif
3763
3764 void nmethod::print_pcs_on(outputStream* st) {
3765 ResourceMark m; // in case methods get printed via debugger
3766 st->print("pc-bytecode offsets:");
3767 if (scopes_pcs_begin() < scopes_pcs_end()) {
3768 st->cr();
3769 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3770 p->print_on(st, this); // print output ends with a newline
3771 }
3772 } else {
3773 st->print_cr(" <list empty>");
3774 }
3775 }
3776
3777 void nmethod::print_handler_table() {
3778 ExceptionHandlerTable(this).print(code_begin());
3779 }
3780
3781 void nmethod::print_nul_chk_table() {
3782 ImplicitExceptionTable(this).print(code_begin());
3783 }
3784
3785 void nmethod::print_recorded_oop(int log_n, int i) {
3786 void* value;
3787
3788 if (i == 0) {
3789 value = nullptr;
3790 } else {
3791 // Be careful around non-oop words. Don't create an oop
3792 // with that value, or it will assert in verification code.
3793 if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3794 value = Universe::non_oop_word();
3795 } else {
3796 value = oop_at(i);
3797 }
3798 }
3799
3800 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3801
3802 if (value == Universe::non_oop_word()) {
3803 tty->print("non-oop word");
3804 } else {
3805 if (value == nullptr) {
3806 tty->print("nullptr-oop");
3807 } else {
3808 oop_at(i)->print_value_on(tty);
3809 }
3810 }
3811
3812 tty->cr();
3813 }
3814
3815 void nmethod::print_recorded_oops() {
3816 const int n = oops_count();
3817 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3818 tty->print("Recorded oops:");
3819 if (n > 0) {
3820 tty->cr();
3821 for (int i = 0; i < n; i++) {
3822 print_recorded_oop(log_n, i);
3823 }
3824 } else {
3825 tty->print_cr(" <list empty>");
3826 }
3827 }
3828
3829 void nmethod::print_recorded_metadata() {
3830 const int n = metadata_count();
3831 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3832 tty->print("Recorded metadata:");
3833 if (n > 0) {
3834 tty->cr();
3835 for (int i = 0; i < n; i++) {
3836 Metadata* m = metadata_at(i);
3837 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3838 if (m == (Metadata*)Universe::non_oop_word()) {
3839 tty->print("non-metadata word");
3840 } else if (m == nullptr) {
3841 tty->print("nullptr-oop");
3842 } else {
3843 Metadata::print_value_on_maybe_null(tty, m);
3844 }
3845 tty->cr();
3846 }
3847 } else {
3848 tty->print_cr(" <list empty>");
3849 }
3850 }
3851 #endif
3852
3853 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3854
3855 void nmethod::print_constant_pool(outputStream* st) {
3856 //-----------------------------------
3857 //---< Print the constant pool >---
3858 //-----------------------------------
3859 int consts_size = this->consts_size();
3860 if ( consts_size > 0 ) {
3861 unsigned char* cstart = this->consts_begin();
3862 unsigned char* cp = cstart;
3863 unsigned char* cend = cp + consts_size;
3864 unsigned int bytes_per_line = 4;
3865 unsigned int CP_alignment = 8;
3866 unsigned int n;
3867
3868 st->cr();
3869
3870 //---< print CP header to make clear what's printed >---
3871 if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3872 n = bytes_per_line;
3873 st->print_cr("[Constant Pool]");
3874 Disassembler::print_location(cp, cstart, cend, st, true, true);
3875 Disassembler::print_hexdata(cp, n, st, true);
3876 st->cr();
3877 } else {
3878 n = (int)((uintptr_t)cp & (bytes_per_line-1));
3879 st->print_cr("[Constant Pool (unaligned)]");
3880 }
3881
3882 //---< print CP contents, bytes_per_line at a time >---
3883 while (cp < cend) {
3884 Disassembler::print_location(cp, cstart, cend, st, true, false);
3885 Disassembler::print_hexdata(cp, n, st, false);
3886 cp += n;
3887 n = bytes_per_line;
3888 st->cr();
3889 }
3890
3891 //---< Show potential alignment gap between constant pool and code >---
3892 cend = code_begin();
3893 if( cp < cend ) {
3894 n = 4;
3895 st->print_cr("[Code entry alignment]");
3896 while (cp < cend) {
3897 Disassembler::print_location(cp, cstart, cend, st, false, false);
3898 cp += n;
3899 st->cr();
3900 }
3901 }
3902 } else {
3903 st->print_cr("[Constant Pool (empty)]");
3904 }
3905 st->cr();
3906 }
3907
3908 #endif
3909
3910 // Disassemble this nmethod.
3911 // Print additional debug information, if requested. This could be code
3912 // comments, block comments, profiling counters, etc.
3913 // The undisassembled format is useful no disassembler library is available.
3914 // The resulting hex dump (with markers) can be disassembled later, or on
3915 // another system, when/where a disassembler library is available.
3916 void nmethod::decode2(outputStream* ost) const {
3917
3918 // Called from frame::back_trace_with_decode without ResourceMark.
3919 ResourceMark rm;
3920
3921 // Make sure we have a valid stream to print on.
3922 outputStream* st = ost ? ost : tty;
3923
3924 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3925 const bool use_compressed_format = true;
3926 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3927 AbstractDisassembler::show_block_comment());
3928 #else
3929 const bool use_compressed_format = Disassembler::is_abstract();
3930 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3931 AbstractDisassembler::show_block_comment());
3932 #endif
3933
3934 st->cr();
3935 this->print_on(st);
3936 st->cr();
3937
3938 #if defined(SUPPORT_ASSEMBLY)
3939 //----------------------------------
3940 //---< Print real disassembly >---
3941 //----------------------------------
3942 if (! use_compressed_format) {
3943 st->print_cr("[Disassembly]");
3944 Disassembler::decode(const_cast<nmethod*>(this), st);
3945 st->bol();
3946 st->print_cr("[/Disassembly]");
3947 return;
3948 }
3949 #endif
3950
3951 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3952
3953 // Compressed undisassembled disassembly format.
3954 // The following status values are defined/supported:
3955 // = 0 - currently at bol() position, nothing printed yet on current line.
3956 // = 1 - currently at position after print_location().
3957 // > 1 - in the midst of printing instruction stream bytes.
3958 int compressed_format_idx = 0;
3959 int code_comment_column = 0;
3960 const int instr_maxlen = Assembler::instr_maxlen();
3961 const uint tabspacing = 8;
3962 unsigned char* start = this->code_begin();
3963 unsigned char* p = this->code_begin();
3964 unsigned char* end = this->code_end();
3965 unsigned char* pss = p; // start of a code section (used for offsets)
3966
3967 if ((start == nullptr) || (end == nullptr)) {
3968 st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3969 return;
3970 }
3971 #endif
3972
3973 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3974 //---< plain abstract disassembly, no comments or anything, just section headers >---
3975 if (use_compressed_format && ! compressed_with_comments) {
3976 const_cast<nmethod*>(this)->print_constant_pool(st);
3977
3978 st->bol();
3979 st->cr();
3980 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3981 //---< Open the output (Marker for post-mortem disassembler) >---
3982 st->print_cr("[MachCode]");
3983 const char* header = nullptr;
3984 address p0 = p;
3985 while (p < end) {
3986 address pp = p;
3987 while ((p < end) && (header == nullptr)) {
3988 header = nmethod_section_label(p);
3989 pp = p;
3990 p += Assembler::instr_len(p);
3991 }
3992 if (pp > p0) {
3993 AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3994 p0 = pp;
3995 p = pp;
3996 header = nullptr;
3997 } else if (header != nullptr) {
3998 st->bol();
3999 st->print_cr("%s", header);
4000 header = nullptr;
4001 }
4002 }
4003 //---< Close the output (Marker for post-mortem disassembler) >---
4004 st->bol();
4005 st->print_cr("[/MachCode]");
4006 return;
4007 }
4008 #endif
4009
4010 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
4011 //---< abstract disassembly with comments and section headers merged in >---
4012 if (compressed_with_comments) {
4013 const_cast<nmethod*>(this)->print_constant_pool(st);
4014
4015 st->bol();
4016 st->cr();
4017 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
4018 //---< Open the output (Marker for post-mortem disassembler) >---
4019 st->print_cr("[MachCode]");
4020 while ((p < end) && (p != nullptr)) {
4021 const int instruction_size_in_bytes = Assembler::instr_len(p);
4022
4023 //---< Block comments for nmethod. Interrupts instruction stream, if any. >---
4024 // Outputs a bol() before and a cr() after, but only if a comment is printed.
4025 // Prints nmethod_section_label as well.
4026 if (AbstractDisassembler::show_block_comment()) {
4027 print_block_comment(st, p);
4028 if (st->position() == 0) {
4029 compressed_format_idx = 0;
4030 }
4031 }
4032
4033 //---< New location information after line break >---
4034 if (compressed_format_idx == 0) {
4035 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
4036 compressed_format_idx = 1;
4037 }
4038
4039 //---< Code comment for current instruction. Address range [p..(p+len)) >---
4040 unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
4041 S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
4042
4043 if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
4044 //---< interrupt instruction byte stream for code comment >---
4045 if (compressed_format_idx > 1) {
4046 st->cr(); // interrupt byte stream
4047 st->cr(); // add an empty line
4048 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
4049 }
4050 const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
4051 st->bol();
4052 compressed_format_idx = 0;
4053 }
4054
4055 //---< New location information after line break >---
4056 if (compressed_format_idx == 0) {
4057 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
4058 compressed_format_idx = 1;
4059 }
4060
4061 //---< Nicely align instructions for readability >---
4062 if (compressed_format_idx > 1) {
4063 Disassembler::print_delimiter(st);
4064 }
4065
4066 //---< Now, finally, print the actual instruction bytes >---
4067 unsigned char* p0 = p;
4068 p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
4069 compressed_format_idx += (int)(p - p0);
4070
4071 if (Disassembler::start_newline(compressed_format_idx-1)) {
4072 st->cr();
4073 compressed_format_idx = 0;
4074 }
4075 }
4076 //---< Close the output (Marker for post-mortem disassembler) >---
4077 st->bol();
4078 st->print_cr("[/MachCode]");
4079 return;
4080 }
4081 #endif
4082 }
4083
4084 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
4085
4086 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
4087 RelocIterator iter(this, begin, end);
4088 bool have_one = false;
4089 while (iter.next()) {
4090 have_one = true;
4091 switch (iter.type()) {
4092 case relocInfo::none: {
4093 // Skip it and check next
4094 break;
4095 }
4096 case relocInfo::oop_type: {
4097 // Get a non-resizable resource-allocated stringStream.
4098 // Our callees make use of (nested) ResourceMarks.
4099 stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
4100 oop_Relocation* r = iter.oop_reloc();
4101 oop obj = r->oop_value();
4102 st.print("oop(");
4103 if (obj == nullptr) st.print("nullptr");
4104 else obj->print_value_on(&st);
4105 st.print(")");
4106 return st.as_string();
4107 }
4108 case relocInfo::metadata_type: {
4109 stringStream st;
4110 metadata_Relocation* r = iter.metadata_reloc();
4111 Metadata* obj = r->metadata_value();
4112 st.print("metadata(");
4113 if (obj == nullptr) st.print("nullptr");
4114 else obj->print_value_on(&st);
4115 st.print(")");
4116 return st.as_string();
4117 }
4118 case relocInfo::runtime_call_type:
4119 case relocInfo::runtime_call_w_cp_type: {
4120 stringStream st;
4121 st.print("runtime_call");
4122 CallRelocation* r = (CallRelocation*)iter.reloc();
4123 address dest = r->destination();
4124 if (StubRoutines::contains(dest)) {
4125 StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
4126 if (desc == nullptr) {
4127 desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
4128 }
4129 if (desc != nullptr) {
4130 st.print(" Stub::%s", desc->name());
4131 return st.as_string();
4132 }
4133 }
4134 CodeBlob* cb = CodeCache::find_blob(dest);
4135 if (cb != nullptr) {
4136 st.print(" %s", cb->name());
4137 } else {
4138 ResourceMark rm;
4139 const int buflen = 1024;
4140 char* buf = NEW_RESOURCE_ARRAY(char, buflen);
4141 int offset;
4142 if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
4143 st.print(" %s", buf);
4144 if (offset != 0) {
4145 st.print("+%d", offset);
4146 }
4147 }
4148 }
4149 return st.as_string();
4150 }
4151 case relocInfo::virtual_call_type: {
4152 stringStream st;
4153 st.print_raw("virtual_call");
4154 virtual_call_Relocation* r = iter.virtual_call_reloc();
4155 Method* m = r->method_value();
4156 if (m != nullptr) {
4157 assert(m->is_method(), "");
4158 m->print_short_name(&st);
4159 }
4160 return st.as_string();
4161 }
4162 case relocInfo::opt_virtual_call_type: {
4163 stringStream st;
4164 st.print_raw("optimized virtual_call");
4165 opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
4166 Method* m = r->method_value();
4167 if (m != nullptr) {
4168 assert(m->is_method(), "");
4169 m->print_short_name(&st);
4170 }
4171 return st.as_string();
4172 }
4173 case relocInfo::static_call_type: {
4174 stringStream st;
4175 st.print_raw("static_call");
4176 static_call_Relocation* r = iter.static_call_reloc();
4177 Method* m = r->method_value();
4178 if (m != nullptr) {
4179 assert(m->is_method(), "");
4180 m->print_short_name(&st);
4181 }
4182 return st.as_string();
4183 }
4184 case relocInfo::static_stub_type: return "static_stub";
4185 case relocInfo::external_word_type: return "external_word";
4186 case relocInfo::internal_word_type: return "internal_word";
4187 case relocInfo::section_word_type: return "section_word";
4188 case relocInfo::poll_type: return "poll";
4189 case relocInfo::poll_return_type: return "poll_return";
4190 case relocInfo::trampoline_stub_type: return "trampoline_stub";
4191 case relocInfo::entry_guard_type: return "entry_guard";
4192 case relocInfo::post_call_nop_type: return "post_call_nop";
4193 case relocInfo::barrier_type: {
4194 barrier_Relocation* const reloc = iter.barrier_reloc();
4195 stringStream st;
4196 st.print("barrier format=%d", reloc->format());
4197 return st.as_string();
4198 }
4199
4200 case relocInfo::type_mask: return "type_bit_mask";
4201
4202 default: {
4203 stringStream st;
4204 st.print("unknown relocInfo=%d", (int) iter.type());
4205 return st.as_string();
4206 }
4207 }
4208 }
4209 return have_one ? "other" : nullptr;
4210 }
4211
4212 // Return the last scope in (begin..end]
4213 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
4214 PcDesc* p = pc_desc_near(begin+1);
4215 if (p != nullptr && p->real_pc(this) <= end) {
4216 return new ScopeDesc(this, p);
4217 }
4218 return nullptr;
4219 }
4220
4221 const char* nmethod::nmethod_section_label(address pos) const {
4222 const char* label = nullptr;
4223 if (pos == code_begin()) label = "[Instructions begin]";
4224 if (pos == entry_point()) label = "[Entry Point]";
4225 if (pos == verified_entry_point()) label = "[Verified Entry Point]";
4226 if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
4227 // Check stub_code before checking exception_handler or deopt_handler.
4228 if (pos == this->stub_begin()) label = "[Stub Code]";
4229 if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
4230 if (JVMCI_ONLY(_deopt_handler_entry_offset != -1 &&) pos == deopt_handler_entry()) label = "[Deopt Handler Entry Point]";
4231 return label;
4232 }
4233
4234 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
4235 if (print_section_labels) {
4236 const char* label = nmethod_section_label(block_begin);
4237 if (label != nullptr) {
4238 stream->bol();
4239 stream->print_cr("%s", label);
4240 }
4241 }
4242
4243 if (block_begin == entry_point()) {
4244 Method* m = method();
4245 if (m != nullptr) {
4246 stream->print(" # ");
4247 m->print_value_on(stream);
4248 stream->cr();
4249 }
4250 if (m != nullptr && !is_osr_method()) {
4251 ResourceMark rm;
4252 int sizeargs = m->size_of_parameters();
4253 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
4254 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
4255 {
4256 int sig_index = 0;
4257 if (!m->is_static())
4258 sig_bt[sig_index++] = T_OBJECT; // 'this'
4259 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
4260 BasicType t = ss.type();
4261 sig_bt[sig_index++] = t;
4262 if (type2size[t] == 2) {
4263 sig_bt[sig_index++] = T_VOID;
4264 } else {
4265 assert(type2size[t] == 1, "size is 1 or 2");
4266 }
4267 }
4268 assert(sig_index == sizeargs, "");
4269 }
4270 const char* spname = "sp"; // make arch-specific?
4271 SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
4272 int stack_slot_offset = this->frame_size() * wordSize;
4273 int tab1 = 14, tab2 = 24;
4274 int sig_index = 0;
4275 int arg_index = (m->is_static() ? 0 : -1);
4276 bool did_old_sp = false;
4277 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
4278 bool at_this = (arg_index == -1);
4279 bool at_old_sp = false;
4280 BasicType t = (at_this ? T_OBJECT : ss.type());
4281 assert(t == sig_bt[sig_index], "sigs in sync");
4282 if (at_this)
4283 stream->print(" # this: ");
4284 else
4285 stream->print(" # parm%d: ", arg_index);
4286 stream->move_to(tab1);
4287 VMReg fst = regs[sig_index].first();
4288 VMReg snd = regs[sig_index].second();
4289 if (fst->is_reg()) {
4290 stream->print("%s", fst->name());
4291 if (snd->is_valid()) {
4292 stream->print(":%s", snd->name());
4293 }
4294 } else if (fst->is_stack()) {
4295 int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
4296 if (offset == stack_slot_offset) at_old_sp = true;
4297 stream->print("[%s+0x%x]", spname, offset);
4298 } else {
4299 stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
4300 }
4301 stream->print(" ");
4302 stream->move_to(tab2);
4303 stream->print("= ");
4304 if (at_this) {
4305 m->method_holder()->print_value_on(stream);
4306 } else {
4307 bool did_name = false;
4308 if (!at_this && ss.is_reference()) {
4309 Symbol* name = ss.as_symbol();
4310 name->print_value_on(stream);
4311 did_name = true;
4312 }
4313 if (!did_name)
4314 stream->print("%s", type2name(t));
4315 }
4316 if (at_old_sp) {
4317 stream->print(" (%s of caller)", spname);
4318 did_old_sp = true;
4319 }
4320 stream->cr();
4321 sig_index += type2size[t];
4322 arg_index += 1;
4323 if (!at_this) ss.next();
4324 }
4325 if (!did_old_sp) {
4326 stream->print(" # ");
4327 stream->move_to(tab1);
4328 stream->print("[%s+0x%x]", spname, stack_slot_offset);
4329 stream->print(" (%s of caller)", spname);
4330 stream->cr();
4331 }
4332 }
4333 }
4334 }
4335
4336 // Returns whether this nmethod has code comments.
4337 bool nmethod::has_code_comment(address begin, address end) {
4338 // scopes?
4339 ScopeDesc* sd = scope_desc_in(begin, end);
4340 if (sd != nullptr) return true;
4341
4342 // relocations?
4343 const char* str = reloc_string_for(begin, end);
4344 if (str != nullptr) return true;
4345
4346 // implicit exceptions?
4347 int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
4348 if (cont_offset != 0) return true;
4349
4350 return false;
4351 }
4352
4353 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
4354 ImplicitExceptionTable implicit_table(this);
4355 int pc_offset = (int)(begin - code_begin());
4356 int cont_offset = implicit_table.continuation_offset(pc_offset);
4357 bool oop_map_required = false;
4358 if (cont_offset != 0) {
4359 st->move_to(column, 6, 0);
4360 if (pc_offset == cont_offset) {
4361 st->print("; implicit exception: deoptimizes");
4362 oop_map_required = true;
4363 } else {
4364 st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
4365 }
4366 }
4367
4368 // Find an oopmap in (begin, end]. We use the odd half-closed
4369 // interval so that oop maps and scope descs which are tied to the
4370 // byte after a call are printed with the call itself. OopMaps
4371 // associated with implicit exceptions are printed with the implicit
4372 // instruction.
4373 address base = code_begin();
4374 ImmutableOopMapSet* oms = oop_maps();
4375 if (oms != nullptr) {
4376 for (int i = 0, imax = oms->count(); i < imax; i++) {
4377 const ImmutableOopMapPair* pair = oms->pair_at(i);
4378 const ImmutableOopMap* om = pair->get_from(oms);
4379 address pc = base + pair->pc_offset();
4380 if (pc >= begin) {
4381 #if INCLUDE_JVMCI
4382 bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
4383 #else
4384 bool is_implicit_deopt = false;
4385 #endif
4386 if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
4387 st->move_to(column, 6, 0);
4388 st->print("; ");
4389 om->print_on(st);
4390 oop_map_required = false;
4391 }
4392 }
4393 if (pc > end) {
4394 break;
4395 }
4396 }
4397 }
4398 assert(!oop_map_required, "missed oopmap");
4399
4400 Thread* thread = Thread::current();
4401
4402 // Print any debug info present at this pc.
4403 ScopeDesc* sd = scope_desc_in(begin, end);
4404 if (sd != nullptr) {
4405 st->move_to(column, 6, 0);
4406 if (sd->bci() == SynchronizationEntryBCI) {
4407 st->print(";*synchronization entry");
4408 } else if (sd->bci() == AfterBci) {
4409 st->print(";* method exit (unlocked if synchronized)");
4410 } else if (sd->bci() == UnwindBci) {
4411 st->print(";* unwind (locked if synchronized)");
4412 } else if (sd->bci() == AfterExceptionBci) {
4413 st->print(";* unwind (unlocked if synchronized)");
4414 } else if (sd->bci() == UnknownBci) {
4415 st->print(";* unknown");
4416 } else if (sd->bci() == InvalidFrameStateBci) {
4417 st->print(";* invalid frame state");
4418 } else {
4419 if (sd->method() == nullptr) {
4420 st->print("method is nullptr");
4421 } else if (sd->method()->is_native()) {
4422 st->print("method is native");
4423 } else {
4424 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
4425 st->print(";*%s", Bytecodes::name(bc));
4426 switch (bc) {
4427 case Bytecodes::_invokevirtual:
4428 case Bytecodes::_invokespecial:
4429 case Bytecodes::_invokestatic:
4430 case Bytecodes::_invokeinterface:
4431 {
4432 Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
4433 st->print(" ");
4434 if (invoke.name() != nullptr)
4435 invoke.name()->print_symbol_on(st);
4436 else
4437 st->print("<UNKNOWN>");
4438 break;
4439 }
4440 case Bytecodes::_getfield:
4441 case Bytecodes::_putfield:
4442 case Bytecodes::_getstatic:
4443 case Bytecodes::_putstatic:
4444 {
4445 Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
4446 st->print(" ");
4447 if (field.name() != nullptr)
4448 field.name()->print_symbol_on(st);
4449 else
4450 st->print("<UNKNOWN>");
4451 }
4452 default:
4453 break;
4454 }
4455 }
4456 st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
4457 }
4458
4459 // Print all scopes
4460 for (;sd != nullptr; sd = sd->sender()) {
4461 st->move_to(column, 6, 0);
4462 st->print("; -");
4463 if (sd->should_reexecute()) {
4464 st->print(" (reexecute)");
4465 }
4466 if (sd->method() == nullptr) {
4467 st->print("method is nullptr");
4468 } else {
4469 sd->method()->print_short_name(st);
4470 }
4471 int lineno = sd->method()->line_number_from_bci(sd->bci());
4472 if (lineno != -1) {
4473 st->print("@%d (line %d)", sd->bci(), lineno);
4474 } else {
4475 st->print("@%d", sd->bci());
4476 }
4477 st->cr();
4478 }
4479 }
4480
4481 // Print relocation information
4482 // Prevent memory leak: allocating without ResourceMark.
4483 ResourceMark rm;
4484 const char* str = reloc_string_for(begin, end);
4485 if (str != nullptr) {
4486 if (sd != nullptr) st->cr();
4487 st->move_to(column, 6, 0);
4488 st->print("; {%s}", str);
4489 }
4490 }
4491
4492 #endif
4493
4494 address nmethod::call_instruction_address(address pc) const {
4495 if (NativeCall::is_call_before(pc)) {
4496 NativeCall *ncall = nativeCall_before(pc);
4497 return ncall->instruction_address();
4498 }
4499 return nullptr;
4500 }
4501
4502 void nmethod::print_value_on_impl(outputStream* st) const {
4503 st->print_cr("nmethod");
4504 #if defined(SUPPORT_DATA_STRUCTS)
4505 print_on_with_msg(st, nullptr);
4506 #endif
4507 }
4508
4509 void nmethod::print_code_snippet(outputStream* st, address addr) const {
4510 if (entry_point() <= addr && addr < code_end()) {
4511 // Pointing into the nmethod's code. Try to disassemble some instructions around addr.
4512 // Determine conservative start and end points.
4513 address start;
4514 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
4515 addr >= code_begin() + frame_complete_offset()) {
4516 start = code_begin() + frame_complete_offset();
4517 } else {
4518 start = (addr < verified_entry_point()) ? entry_point() : verified_entry_point();
4519 }
4520 address start_for_hex_dump = start; // We can choose a different starting point for hex dump, below.
4521 address end = code_end();
4522
4523 // Try using relocations to find closer instruction start and end points.
4524 // (Some platforms have variable length instructions and can only
4525 // disassemble correctly at instruction start addresses.)
4526 RelocIterator iter((nmethod*)this, start);
4527 while (iter.next() && iter.addr() < addr) { // find relocation before addr
4528 // Note: There's a relocation which doesn't point to an instruction start:
4529 // ZBarrierRelocationFormatStoreGoodAfterMov with ZGC on x86_64
4530 // We could detect and skip it, but hex dump is still usable when
4531 // disassembler produces garbage in such a very rare case.
4532 start = iter.addr();
4533 // We want at least 64 Bytes ahead in hex dump.
4534 if (iter.addr() <= (addr - 64)) start_for_hex_dump = iter.addr();
4535 }
4536 if (iter.has_current()) {
4537 if (iter.addr() == addr) iter.next(); // find relocation after addr
4538 if (iter.has_current()) end = iter.addr();
4539 }
4540
4541 // Always print hex. Disassembler may still have problems when hitting an incorrect instruction start.
4542 os::print_hex_dump(st, start_for_hex_dump, end, 1, /* print_ascii=*/false);
4543 if (!Disassembler::is_abstract()) {
4544 Disassembler::decode(start, end, st);
4545 }
4546 }
4547 }
4548
4549 #ifndef PRODUCT
4550
4551 void nmethod::print_calls(outputStream* st) {
4552 RelocIterator iter(this);
4553 while (iter.next()) {
4554 switch (iter.type()) {
4555 case relocInfo::virtual_call_type: {
4556 CompiledICLocker ml_verify(this);
4557 CompiledIC_at(&iter)->print();
4558 break;
4559 }
4560 case relocInfo::static_call_type:
4561 case relocInfo::opt_virtual_call_type:
4562 st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
4563 CompiledDirectCall::at(iter.reloc())->print();
4564 break;
4565 default:
4566 break;
4567 }
4568 }
4569 }
4570
4571 void nmethod::print_statistics() {
4572 ttyLocker ttyl;
4573 if (xtty != nullptr) xtty->head("statistics type='nmethod'");
4574 native_nmethod_stats.print_native_nmethod_stats();
4575 #ifdef COMPILER1
4576 c1_java_nmethod_stats.print_nmethod_stats("C1");
4577 #endif
4578 #ifdef COMPILER2
4579 c2_java_nmethod_stats.print_nmethod_stats("C2");
4580 #endif
4581 #if INCLUDE_JVMCI
4582 jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
4583 #endif
4584 unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
4585 DebugInformationRecorder::print_statistics();
4586 pc_nmethod_stats.print_pc_stats();
4587 Dependencies::print_statistics();
4588 ExternalsRecorder::print_statistics();
4589 if (xtty != nullptr) xtty->tail("statistics");
4590 }
4591
4592 #endif // !PRODUCT
4593
4594 #if INCLUDE_JVMCI
4595 void nmethod::update_speculation(JavaThread* thread) {
4596 jlong speculation = thread->pending_failed_speculation();
4597 if (speculation != 0) {
4598 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4599 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4600 thread->set_pending_failed_speculation(0);
4601 }
4602 }
4603
4604 const char* nmethod::jvmci_name() {
4605 if (jvmci_nmethod_data() != nullptr) {
4606 return jvmci_nmethod_data()->name();
4607 }
4608 return nullptr;
4609 }
4610
4611 bool nmethod::jvmci_skip_profile_deopt() const {
4612 return jvmci_nmethod_data() != nullptr && !jvmci_nmethod_data()->profile_deopt();
4613 }
4614 #endif
4615
4616 void nmethod::prepare_for_archiving_impl() {
4617 CodeBlob::prepare_for_archiving_impl();
4618 _deoptimization_generation = 0;
4619 _gc_epoch = 0;
4620 _method_profiling_count = 0;
4621 _osr_link = nullptr;
4622 _method = nullptr;
4623 _immutable_data = nullptr;
4624 _pc_desc_container = nullptr;
4625 _exception_cache = nullptr;
4626 _gc_data = nullptr;
4627 _oops_do_mark_link = nullptr;
4628 _compiled_ic_data = nullptr;
4629 _osr_entry_point = nullptr;
4630 _compile_id = -1;
4631 _deoptimization_status = not_marked;
4632 _is_unloading_state = 0;
4633 _state = not_installed;
4634 }