1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/dependencies.hpp"
29 #include "code/nativeInst.hpp"
30 #include "code/nmethod.inline.hpp"
31 #include "code/scopeDesc.hpp"
32 #include "compiler/abstractCompiler.hpp"
33 #include "compiler/compilationLog.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compileLog.hpp"
36 #include "compiler/compilerDirectives.hpp"
37 #include "compiler/compilerOracle.hpp"
38 #include "compiler/compileTask.hpp"
39 #include "compiler/directivesParser.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "compiler/oopMap.inline.hpp"
42 #include "gc/shared/barrierSet.hpp"
43 #include "gc/shared/barrierSetNMethod.hpp"
44 #include "gc/shared/classUnloadingContext.hpp"
45 #include "gc/shared/collectedHeap.hpp"
46 #include "interpreter/bytecode.inline.hpp"
47 #include "jvm.h"
48 #include "logging/log.hpp"
49 #include "logging/logStream.hpp"
50 #include "memory/allocation.inline.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "oops/access.inline.hpp"
54 #include "oops/klass.inline.hpp"
55 #include "oops/method.inline.hpp"
56 #include "oops/methodData.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "oops/weakHandle.inline.hpp"
59 #include "prims/jvmtiImpl.hpp"
60 #include "prims/jvmtiThreadState.hpp"
61 #include "prims/methodHandles.hpp"
62 #include "runtime/atomicAccess.hpp"
63 #include "runtime/continuation.hpp"
64 #include "runtime/deoptimization.hpp"
65 #include "runtime/flags/flagSetting.hpp"
66 #include "runtime/frame.inline.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/orderAccess.hpp"
70 #include "runtime/os.hpp"
71 #include "runtime/safepointVerifiers.hpp"
72 #include "runtime/serviceThread.hpp"
73 #include "runtime/sharedRuntime.hpp"
74 #include "runtime/signature.hpp"
75 #include "runtime/threadWXSetters.inline.hpp"
76 #include "runtime/vmThread.hpp"
77 #include "utilities/align.hpp"
78 #include "utilities/copy.hpp"
79 #include "utilities/dtrace.hpp"
80 #include "utilities/events.hpp"
81 #include "utilities/globalDefinitions.hpp"
82 #include "utilities/hashTable.hpp"
83 #include "utilities/xmlstream.hpp"
84 #if INCLUDE_JVMCI
85 #include "jvmci/jvmciRuntime.hpp"
86 #endif
87
88 #ifdef DTRACE_ENABLED
89
90 // Only bother with this argument setup if dtrace is available
91
92 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
93 { \
94 Method* m = (method); \
95 if (m != nullptr) { \
96 Symbol* klass_name = m->klass_name(); \
97 Symbol* name = m->name(); \
98 Symbol* signature = m->signature(); \
99 HOTSPOT_COMPILED_METHOD_UNLOAD( \
100 (char *) klass_name->bytes(), klass_name->utf8_length(), \
101 (char *) name->bytes(), name->utf8_length(), \
102 (char *) signature->bytes(), signature->utf8_length()); \
103 } \
104 }
105
106 #else // ndef DTRACE_ENABLED
107
108 #define DTRACE_METHOD_UNLOAD_PROBE(method)
109
110 #endif
111
112 // Cast from int value to narrow type
113 #define CHECKED_CAST(result, T, thing) \
114 result = static_cast<T>(thing); \
115 guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
116
117 //---------------------------------------------------------------------------------
118 // NMethod statistics
119 // They are printed under various flags, including:
120 // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
121 // (In the latter two cases, they like other stats are printed to the log only.)
122
123 #ifndef PRODUCT
124 // These variables are put into one block to reduce relocations
125 // and make it simpler to print from the debugger.
126 struct java_nmethod_stats_struct {
127 uint nmethod_count;
128 uint total_nm_size;
129 uint total_immut_size;
130 uint total_mut_size;
131 uint relocation_size;
132 uint consts_size;
133 uint insts_size;
134 uint stub_size;
135 uint oops_size;
136 uint metadata_size;
137 uint dependencies_size;
138 uint nul_chk_table_size;
139 uint handler_table_size;
140 uint scopes_pcs_size;
141 uint scopes_data_size;
142 #if INCLUDE_JVMCI
143 uint speculations_size;
144 uint jvmci_data_size;
145 #endif
146
147 void note_nmethod(nmethod* nm) {
148 nmethod_count += 1;
149 total_nm_size += nm->size();
150 total_immut_size += nm->immutable_data_size();
151 total_mut_size += nm->mutable_data_size();
152 relocation_size += nm->relocation_size();
153 consts_size += nm->consts_size();
154 insts_size += nm->insts_size();
155 stub_size += nm->stub_size();
156 oops_size += nm->oops_size();
157 metadata_size += nm->metadata_size();
158 scopes_data_size += nm->scopes_data_size();
159 scopes_pcs_size += nm->scopes_pcs_size();
160 dependencies_size += nm->dependencies_size();
161 handler_table_size += nm->handler_table_size();
162 nul_chk_table_size += nm->nul_chk_table_size();
163 #if INCLUDE_JVMCI
164 speculations_size += nm->speculations_size();
165 jvmci_data_size += nm->jvmci_data_size();
166 #endif
167 }
168 void print_nmethod_stats(const char* name) {
169 if (nmethod_count == 0) return;
170 tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
171 uint total_size = total_nm_size + total_immut_size + total_mut_size;
172 if (total_nm_size != 0) {
173 tty->print_cr(" total size = %u (100%%)", total_size);
174 tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
175 }
176 uint header_size = (uint)(nmethod_count * sizeof(nmethod));
177 if (nmethod_count != 0) {
178 tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
179 }
180 if (consts_size != 0) {
181 tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
182 }
183 if (insts_size != 0) {
184 tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
185 }
186 if (stub_size != 0) {
187 tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
188 }
189 if (oops_size != 0) {
190 tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
191 }
192 if (total_mut_size != 0) {
193 tty->print_cr(" mutable data = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size);
194 }
195 if (relocation_size != 0) {
196 tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size);
197 }
198 if (metadata_size != 0) {
199 tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size);
200 }
201 #if INCLUDE_JVMCI
202 if (jvmci_data_size != 0) {
203 tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size);
204 }
205 #endif
206 if (total_immut_size != 0) {
207 tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
208 }
209 if (dependencies_size != 0) {
210 tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
211 }
212 if (nul_chk_table_size != 0) {
213 tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
214 }
215 if (handler_table_size != 0) {
216 tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
217 }
218 if (scopes_pcs_size != 0) {
219 tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
220 }
221 if (scopes_data_size != 0) {
222 tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
223 }
224 #if INCLUDE_JVMCI
225 if (speculations_size != 0) {
226 tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
227 }
228 #endif
229 }
230 };
231
232 struct native_nmethod_stats_struct {
233 uint native_nmethod_count;
234 uint native_total_size;
235 uint native_relocation_size;
236 uint native_insts_size;
237 uint native_oops_size;
238 uint native_metadata_size;
239 void note_native_nmethod(nmethod* nm) {
240 native_nmethod_count += 1;
241 native_total_size += nm->size();
242 native_relocation_size += nm->relocation_size();
243 native_insts_size += nm->insts_size();
244 native_oops_size += nm->oops_size();
245 native_metadata_size += nm->metadata_size();
246 }
247 void print_native_nmethod_stats() {
248 if (native_nmethod_count == 0) return;
249 tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
250 if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size);
251 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size);
252 if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size);
253 if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size);
254 if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size);
255 }
256 };
257
258 struct pc_nmethod_stats_struct {
259 uint pc_desc_init; // number of initialization of cache (= number of caches)
260 uint pc_desc_queries; // queries to nmethod::find_pc_desc
261 uint pc_desc_approx; // number of those which have approximate true
262 uint pc_desc_repeats; // number of _pc_descs[0] hits
263 uint pc_desc_hits; // number of LRU cache hits
264 uint pc_desc_tests; // total number of PcDesc examinations
265 uint pc_desc_searches; // total number of quasi-binary search steps
266 uint pc_desc_adds; // number of LUR cache insertions
267
268 void print_pc_stats() {
269 tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query",
270 pc_desc_queries,
271 (double)(pc_desc_tests + pc_desc_searches)
272 / pc_desc_queries);
273 tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
274 pc_desc_init,
275 pc_desc_queries, pc_desc_approx,
276 pc_desc_repeats, pc_desc_hits,
277 pc_desc_tests, pc_desc_searches, pc_desc_adds);
278 }
279 };
280
281 #ifdef COMPILER1
282 static java_nmethod_stats_struct c1_java_nmethod_stats;
283 #endif
284 #ifdef COMPILER2
285 static java_nmethod_stats_struct c2_java_nmethod_stats;
286 #endif
287 #if INCLUDE_JVMCI
288 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
289 #endif
290 static java_nmethod_stats_struct unknown_java_nmethod_stats;
291
292 static native_nmethod_stats_struct native_nmethod_stats;
293 static pc_nmethod_stats_struct pc_nmethod_stats;
294
295 static void note_java_nmethod(nmethod* nm) {
296 #ifdef COMPILER1
297 if (nm->is_compiled_by_c1()) {
298 c1_java_nmethod_stats.note_nmethod(nm);
299 } else
300 #endif
301 #ifdef COMPILER2
302 if (nm->is_compiled_by_c2()) {
303 c2_java_nmethod_stats.note_nmethod(nm);
304 } else
305 #endif
306 #if INCLUDE_JVMCI
307 if (nm->is_compiled_by_jvmci()) {
308 jvmci_java_nmethod_stats.note_nmethod(nm);
309 } else
310 #endif
311 {
312 unknown_java_nmethod_stats.note_nmethod(nm);
313 }
314 }
315 #endif // !PRODUCT
316
317 //---------------------------------------------------------------------------------
318
319
320 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
321 assert(pc != nullptr, "Must be non null");
322 assert(exception.not_null(), "Must be non null");
323 assert(handler != nullptr, "Must be non null");
324
325 _count = 0;
326 _exception_type = exception->klass();
327 _next = nullptr;
328 _purge_list_next = nullptr;
329
330 add_address_and_handler(pc,handler);
331 }
332
333
334 address ExceptionCache::match(Handle exception, address pc) {
335 assert(pc != nullptr,"Must be non null");
336 assert(exception.not_null(),"Must be non null");
337 if (exception->klass() == exception_type()) {
338 return (test_address(pc));
339 }
340
341 return nullptr;
342 }
343
344
345 bool ExceptionCache::match_exception_with_space(Handle exception) {
346 assert(exception.not_null(),"Must be non null");
347 if (exception->klass() == exception_type() && count() < cache_size) {
348 return true;
349 }
350 return false;
351 }
352
353
354 address ExceptionCache::test_address(address addr) {
355 int limit = count();
356 for (int i = 0; i < limit; i++) {
357 if (pc_at(i) == addr) {
358 return handler_at(i);
359 }
360 }
361 return nullptr;
362 }
363
364
365 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
366 if (test_address(addr) == handler) return true;
367
368 int index = count();
369 if (index < cache_size) {
370 set_pc_at(index, addr);
371 set_handler_at(index, handler);
372 increment_count();
373 return true;
374 }
375 return false;
376 }
377
378 ExceptionCache* ExceptionCache::next() {
379 return AtomicAccess::load(&_next);
380 }
381
382 void ExceptionCache::set_next(ExceptionCache *ec) {
383 AtomicAccess::store(&_next, ec);
384 }
385
386 //-----------------------------------------------------------------------------
387
388
389 // Helper used by both find_pc_desc methods.
390 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
391 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
392 if (!approximate) {
393 return pc->pc_offset() == pc_offset;
394 } else {
395 // Do not look before the sentinel
396 assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset");
397 return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset;
398 }
399 }
400
401 void PcDescCache::init_to(PcDesc* initial_pc_desc) {
402 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
403 // initialize the cache by filling it with benign (non-null) values
404 assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
405 "must start with a sentinel");
406 for (int i = 0; i < cache_size; i++) {
407 _pc_descs[i] = initial_pc_desc;
408 }
409 }
410
411 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
412 // Note: one might think that caching the most recently
413 // read value separately would be a win, but one would be
414 // wrong. When many threads are updating it, the cache
415 // line it's in would bounce between caches, negating
416 // any benefit.
417
418 // In order to prevent race conditions do not load cache elements
419 // repeatedly, but use a local copy:
420 PcDesc* res;
421
422 // Step one: Check the most recently added value.
423 res = _pc_descs[0];
424 assert(res != nullptr, "PcDesc cache should be initialized already");
425
426 // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
427 if (approximate && match_desc(res, pc_offset, approximate)) {
428 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
429 return res;
430 }
431
432 // Step two: Check the rest of the LRU cache.
433 for (int i = 1; i < cache_size; ++i) {
434 res = _pc_descs[i];
435 if (res->pc_offset() < 0) break; // optimization: skip empty cache
436 if (match_desc(res, pc_offset, approximate)) {
437 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
438 return res;
439 }
440 }
441
442 // Report failure.
443 return nullptr;
444 }
445
446 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
447 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
448 // Update the LRU cache by shifting pc_desc forward.
449 for (int i = 0; i < cache_size; i++) {
450 PcDesc* next = _pc_descs[i];
451 _pc_descs[i] = pc_desc;
452 pc_desc = next;
453 }
454 }
455
456 // adjust pcs_size so that it is a multiple of both oopSize and
457 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
458 // of oopSize, then 2*sizeof(PcDesc) is)
459 static int adjust_pcs_size(int pcs_size) {
460 int nsize = align_up(pcs_size, oopSize);
461 if ((nsize % sizeof(PcDesc)) != 0) {
462 nsize = pcs_size + sizeof(PcDesc);
463 }
464 assert((nsize % oopSize) == 0, "correct alignment");
465 return nsize;
466 }
467
468 // Returns a string version of the method state.
469 const char* nmethod::state() const {
470 int state = get_state();
471 switch (state) {
472 case not_installed:
473 return "not installed";
474 case in_use:
475 return "in use";
476 case not_entrant:
477 return "not_entrant";
478 default:
479 fatal("unexpected method state: %d", state);
480 return nullptr;
481 }
482 }
483
484 void nmethod::set_deoptimized_done() {
485 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
486 if (_deoptimization_status != deoptimize_done) { // can't go backwards
487 AtomicAccess::store(&_deoptimization_status, deoptimize_done);
488 }
489 }
490
491 ExceptionCache* nmethod::exception_cache_acquire() const {
492 return AtomicAccess::load_acquire(&_exception_cache);
493 }
494
495 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
496 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
497 assert(new_entry != nullptr,"Must be non null");
498 assert(new_entry->next() == nullptr, "Must be null");
499
500 for (;;) {
501 ExceptionCache *ec = exception_cache();
502 if (ec != nullptr) {
503 Klass* ex_klass = ec->exception_type();
504 if (!ex_klass->is_loader_alive()) {
505 // We must guarantee that entries are not inserted with new next pointer
506 // edges to ExceptionCache entries with dead klasses, due to bad interactions
507 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
508 // the head pointer forward to the first live ExceptionCache, so that the new
509 // next pointers always point at live ExceptionCaches, that are not removed due
510 // to concurrent ExceptionCache cleanup.
511 ExceptionCache* next = ec->next();
512 if (AtomicAccess::cmpxchg(&_exception_cache, ec, next) == ec) {
513 CodeCache::release_exception_cache(ec);
514 }
515 continue;
516 }
517 ec = exception_cache();
518 if (ec != nullptr) {
519 new_entry->set_next(ec);
520 }
521 }
522 if (AtomicAccess::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
523 return;
524 }
525 }
526 }
527
528 void nmethod::clean_exception_cache() {
529 // For each nmethod, only a single thread may call this cleanup function
530 // at the same time, whether called in STW cleanup or concurrent cleanup.
531 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
532 // then a single writer may contend with cleaning up the head pointer to the
533 // first ExceptionCache node that has a Klass* that is alive. That is fine,
534 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
535 // And the concurrent writers do not clean up next pointers, only the head.
536 // Also note that concurrent readers will walk through Klass* pointers that are not
537 // alive. That does not cause ABA problems, because Klass* is deleted after
538 // a handshake with all threads, after all stale ExceptionCaches have been
539 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
540 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
541 // That similarly implies that CAS operations on ExceptionCache entries do not
542 // suffer from ABA problems as unlinking and deletion is separated by a global
543 // handshake operation.
544 ExceptionCache* prev = nullptr;
545 ExceptionCache* curr = exception_cache_acquire();
546
547 while (curr != nullptr) {
548 ExceptionCache* next = curr->next();
549
550 if (!curr->exception_type()->is_loader_alive()) {
551 if (prev == nullptr) {
552 // Try to clean head; this is contended by concurrent inserts, that
553 // both lazily clean the head, and insert entries at the head. If
554 // the CAS fails, the operation is restarted.
555 if (AtomicAccess::cmpxchg(&_exception_cache, curr, next) != curr) {
556 prev = nullptr;
557 curr = exception_cache_acquire();
558 continue;
559 }
560 } else {
561 // It is impossible to during cleanup connect the next pointer to
562 // an ExceptionCache that has not been published before a safepoint
563 // prior to the cleanup. Therefore, release is not required.
564 prev->set_next(next);
565 }
566 // prev stays the same.
567
568 CodeCache::release_exception_cache(curr);
569 } else {
570 prev = curr;
571 }
572
573 curr = next;
574 }
575 }
576
577 // public method for accessing the exception cache
578 // These are the public access methods.
579 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
580 // We never grab a lock to read the exception cache, so we may
581 // have false negatives. This is okay, as it can only happen during
582 // the first few exception lookups for a given nmethod.
583 ExceptionCache* ec = exception_cache_acquire();
584 while (ec != nullptr) {
585 address ret_val;
586 if ((ret_val = ec->match(exception,pc)) != nullptr) {
587 return ret_val;
588 }
589 ec = ec->next();
590 }
591 return nullptr;
592 }
593
594 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
595 // There are potential race conditions during exception cache updates, so we
596 // must own the ExceptionCache_lock before doing ANY modifications. Because
597 // we don't lock during reads, it is possible to have several threads attempt
598 // to update the cache with the same data. We need to check for already inserted
599 // copies of the current data before adding it.
600
601 MutexLocker ml(ExceptionCache_lock);
602 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
603
604 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
605 target_entry = new ExceptionCache(exception,pc,handler);
606 add_exception_cache_entry(target_entry);
607 }
608 }
609
610 // private method for handling exception cache
611 // These methods are private, and used to manipulate the exception cache
612 // directly.
613 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
614 ExceptionCache* ec = exception_cache_acquire();
615 while (ec != nullptr) {
616 if (ec->match_exception_with_space(exception)) {
617 return ec;
618 }
619 ec = ec->next();
620 }
621 return nullptr;
622 }
623
624 bool nmethod::is_at_poll_return(address pc) {
625 RelocIterator iter(this, pc, pc+1);
626 while (iter.next()) {
627 if (iter.type() == relocInfo::poll_return_type)
628 return true;
629 }
630 return false;
631 }
632
633
634 bool nmethod::is_at_poll_or_poll_return(address pc) {
635 RelocIterator iter(this, pc, pc+1);
636 while (iter.next()) {
637 relocInfo::relocType t = iter.type();
638 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
639 return true;
640 }
641 return false;
642 }
643
644 void nmethod::verify_oop_relocations() {
645 // Ensure sure that the code matches the current oop values
646 RelocIterator iter(this, nullptr, nullptr);
647 while (iter.next()) {
648 if (iter.type() == relocInfo::oop_type) {
649 oop_Relocation* reloc = iter.oop_reloc();
650 if (!reloc->oop_is_immediate()) {
651 reloc->verify_oop_relocation();
652 }
653 }
654 }
655 }
656
657
658 ScopeDesc* nmethod::scope_desc_at(address pc) {
659 PcDesc* pd = pc_desc_at(pc);
660 guarantee(pd != nullptr, "scope must be present");
661 return new ScopeDesc(this, pd);
662 }
663
664 ScopeDesc* nmethod::scope_desc_near(address pc) {
665 PcDesc* pd = pc_desc_near(pc);
666 guarantee(pd != nullptr, "scope must be present");
667 return new ScopeDesc(this, pd);
668 }
669
670 address nmethod::oops_reloc_begin() const {
671 // If the method is not entrant then a JMP is plastered over the
672 // first few bytes. If an oop in the old code was there, that oop
673 // should not get GC'd. Skip the first few bytes of oops on
674 // not-entrant methods.
675 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
676 code_begin() + frame_complete_offset() >
677 verified_entry_point() + NativeJump::instruction_size)
678 {
679 // If we have a frame_complete_offset after the native jump, then there
680 // is no point trying to look for oops before that. This is a requirement
681 // for being allowed to scan oops concurrently.
682 return code_begin() + frame_complete_offset();
683 }
684
685 address low_boundary = verified_entry_point();
686 return low_boundary;
687 }
688
689 // Method that knows how to preserve outgoing arguments at call. This method must be
690 // called with a frame corresponding to a Java invoke
691 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
692 if (method() == nullptr) {
693 return;
694 }
695
696 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
697 JavaThread* thread = reg_map->thread();
698 if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
699 JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
700 return;
701 }
702
703 if (!method()->is_native()) {
704 address pc = fr.pc();
705 bool has_receiver, has_appendix;
706 Symbol* signature;
707
708 // The method attached by JIT-compilers should be used, if present.
709 // Bytecode can be inaccurate in such case.
710 Method* callee = attached_method_before_pc(pc);
711 if (callee != nullptr) {
712 has_receiver = !(callee->access_flags().is_static());
713 has_appendix = false;
714 signature = callee->signature();
715 } else {
716 SimpleScopeDesc ssd(this, pc);
717
718 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
719 has_receiver = call.has_receiver();
720 has_appendix = call.has_appendix();
721 signature = call.signature();
722 }
723
724 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
725 } else if (method()->is_continuation_enter_intrinsic()) {
726 // This method only calls Continuation.enter()
727 Symbol* signature = vmSymbols::continuationEnter_signature();
728 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
729 }
730 }
731
732 Method* nmethod::attached_method(address call_instr) {
733 assert(code_contains(call_instr), "not part of the nmethod");
734 RelocIterator iter(this, call_instr, call_instr + 1);
735 while (iter.next()) {
736 if (iter.addr() == call_instr) {
737 switch(iter.type()) {
738 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
739 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
740 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
741 default: break;
742 }
743 }
744 }
745 return nullptr; // not found
746 }
747
748 Method* nmethod::attached_method_before_pc(address pc) {
749 if (NativeCall::is_call_before(pc)) {
750 NativeCall* ncall = nativeCall_before(pc);
751 return attached_method(ncall->instruction_address());
752 }
753 return nullptr; // not a call
754 }
755
756 void nmethod::clear_inline_caches() {
757 assert(SafepointSynchronize::is_at_safepoint() || (NMethodState_lock->owned_by_self() && is_not_installed()), "clearing of IC's only allowed at safepoint or when not installed");
758 RelocIterator iter(this);
759 while (iter.next()) {
760 iter.reloc()->clear_inline_cache();
761 }
762 }
763
764 #ifdef ASSERT
765 // Check class_loader is alive for this bit of metadata.
766 class CheckClass : public MetadataClosure {
767 void do_metadata(Metadata* md) {
768 Klass* klass = nullptr;
769 if (md->is_klass()) {
770 klass = ((Klass*)md);
771 } else if (md->is_method()) {
772 klass = ((Method*)md)->method_holder();
773 } else if (md->is_methodData()) {
774 klass = ((MethodData*)md)->method()->method_holder();
775 } else if (md->is_methodCounters()) {
776 klass = ((MethodCounters*)md)->method()->method_holder();
777 } else {
778 md->print();
779 ShouldNotReachHere();
780 }
781 assert(klass->is_loader_alive(), "must be alive");
782 }
783 };
784 #endif // ASSERT
785
786 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
787 template <typename CallsiteT>
788 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, bool clean_all) {
789 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
790 if (!cb->is_nmethod()) {
791 return;
792 }
793 nmethod* nm = cb->as_nmethod();
794 if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
795 callsite->set_to_clean();
796 }
797 }
798
799 // Cleans caches in nmethods that point to either classes that are unloaded
800 // or nmethods that are unloaded.
801 //
802 // Can be called either in parallel by G1 currently or after all
803 // nmethods are unloaded. Return postponed=true in the parallel case for
804 // inline caches found that point to nmethods that are not yet visited during
805 // the do_unloading walk.
806 void nmethod::unload_nmethod_caches(bool unloading_occurred) {
807 ResourceMark rm;
808
809 // Exception cache only needs to be called if unloading occurred
810 if (unloading_occurred) {
811 clean_exception_cache();
812 }
813
814 cleanup_inline_caches_impl(unloading_occurred, false);
815
816 #ifdef ASSERT
817 // Check that the metadata embedded in the nmethod is alive
818 CheckClass check_class;
819 metadata_do(&check_class);
820 #endif
821 }
822
823 void nmethod::run_nmethod_entry_barrier() {
824 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
825 if (bs_nm != nullptr) {
826 // We want to keep an invariant that nmethods found through iterations of a Thread's
827 // nmethods found in safepoints have gone through an entry barrier and are not armed.
828 // By calling this nmethod entry barrier, it plays along and acts
829 // like any other nmethod found on the stack of a thread (fewer surprises).
830 nmethod* nm = this;
831 bool alive = bs_nm->nmethod_entry_barrier(nm);
832 assert(alive, "should be alive");
833 }
834 }
835
836 // Only called by whitebox test
837 void nmethod::cleanup_inline_caches_whitebox() {
838 assert_locked_or_safepoint(CodeCache_lock);
839 CompiledICLocker ic_locker(this);
840 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
841 }
842
843 address* nmethod::orig_pc_addr(const frame* fr) {
844 return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
845 }
846
847 // Called to clean up after class unloading for live nmethods
848 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
849 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
850 ResourceMark rm;
851
852 // Find all calls in an nmethod and clear the ones that point to bad nmethods.
853 RelocIterator iter(this, oops_reloc_begin());
854 bool is_in_static_stub = false;
855 while(iter.next()) {
856
857 switch (iter.type()) {
858
859 case relocInfo::virtual_call_type:
860 if (unloading_occurred) {
861 // If class unloading occurred we first clear ICs where the cached metadata
862 // is referring to an unloaded klass or method.
863 CompiledIC_at(&iter)->clean_metadata();
864 }
865
866 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), clean_all);
867 break;
868
869 case relocInfo::opt_virtual_call_type:
870 case relocInfo::static_call_type:
871 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), clean_all);
872 break;
873
874 case relocInfo::static_stub_type: {
875 is_in_static_stub = true;
876 break;
877 }
878
879 case relocInfo::metadata_type: {
880 // Only the metadata relocations contained in static/opt virtual call stubs
881 // contains the Method* passed to c2i adapters. It is the only metadata
882 // relocation that needs to be walked, as it is the one metadata relocation
883 // that violates the invariant that all metadata relocations have an oop
884 // in the compiled method (due to deferred resolution and code patching).
885
886 // This causes dead metadata to remain in compiled methods that are not
887 // unloading. Unless these slippery metadata relocations of the static
888 // stubs are at least cleared, subsequent class redefinition operations
889 // will access potentially free memory, and JavaThread execution
890 // concurrent to class unloading may call c2i adapters with dead methods.
891 if (!is_in_static_stub) {
892 // The first metadata relocation after a static stub relocation is the
893 // metadata relocation of the static stub used to pass the Method* to
894 // c2i adapters.
895 continue;
896 }
897 is_in_static_stub = false;
898 if (is_unloading()) {
899 // If the nmethod itself is dying, then it may point at dead metadata.
900 // Nobody should follow that metadata; it is strictly unsafe.
901 continue;
902 }
903 metadata_Relocation* r = iter.metadata_reloc();
904 Metadata* md = r->metadata_value();
905 if (md != nullptr && md->is_method()) {
906 Method* method = static_cast<Method*>(md);
907 if (!method->method_holder()->is_loader_alive()) {
908 AtomicAccess::store(r->metadata_addr(), (Method*)nullptr);
909
910 if (!r->metadata_is_immediate()) {
911 r->fix_metadata_relocation();
912 }
913 }
914 }
915 break;
916 }
917
918 default:
919 break;
920 }
921 }
922 }
923
924 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
925 // Exception happened outside inline-cache check code => we are inside
926 // an active nmethod => use cpc to determine a return address
927 int exception_offset = int(pc - code_begin());
928 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
929 #ifdef ASSERT
930 if (cont_offset == 0) {
931 Thread* thread = Thread::current();
932 ResourceMark rm(thread);
933 CodeBlob* cb = CodeCache::find_blob(pc);
934 assert(cb != nullptr && cb == this, "");
935
936 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
937 stringStream ss;
938 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
939 print_on(&ss);
940 method()->print_codes_on(&ss);
941 print_code_on(&ss);
942 print_pcs_on(&ss);
943 tty->print("%s", ss.as_string()); // print all at once
944 }
945 #endif
946 if (cont_offset == 0) {
947 // Let the normal error handling report the exception
948 return nullptr;
949 }
950 if (cont_offset == exception_offset) {
951 #if INCLUDE_JVMCI
952 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
953 JavaThread *thread = JavaThread::current();
954 thread->set_jvmci_implicit_exception_pc(pc);
955 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
956 Deoptimization::Action_reinterpret));
957 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
958 #else
959 ShouldNotReachHere();
960 #endif
961 }
962 return code_begin() + cont_offset;
963 }
964
965 class HasEvolDependency : public MetadataClosure {
966 bool _has_evol_dependency;
967 public:
968 HasEvolDependency() : _has_evol_dependency(false) {}
969 void do_metadata(Metadata* md) {
970 if (md->is_method()) {
971 Method* method = (Method*)md;
972 if (method->is_old()) {
973 _has_evol_dependency = true;
974 }
975 }
976 }
977 bool has_evol_dependency() const { return _has_evol_dependency; }
978 };
979
980 bool nmethod::has_evol_metadata() {
981 // Check the metadata in relocIter and CompiledIC and also deoptimize
982 // any nmethod that has reference to old methods.
983 HasEvolDependency check_evol;
984 metadata_do(&check_evol);
985 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
986 ResourceMark rm;
987 log_debug(redefine, class, nmethod)
988 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
989 _method->method_holder()->external_name(),
990 _method->name()->as_C_string(),
991 _method->signature()->as_C_string(),
992 compile_id());
993 }
994 return check_evol.has_evol_dependency();
995 }
996
997 int nmethod::total_size() const {
998 return
999 consts_size() +
1000 insts_size() +
1001 stub_size() +
1002 scopes_data_size() +
1003 scopes_pcs_size() +
1004 handler_table_size() +
1005 nul_chk_table_size();
1006 }
1007
1008 const char* nmethod::compile_kind() const {
1009 if (is_osr_method()) return "osr";
1010 if (method() != nullptr && is_native_method()) {
1011 if (method()->is_continuation_native_intrinsic()) {
1012 return "cnt";
1013 }
1014 return "c2n";
1015 }
1016 return nullptr;
1017 }
1018
1019 const char* nmethod::compiler_name() const {
1020 return compilertype2name(_compiler_type);
1021 }
1022
1023 #ifdef ASSERT
1024 class CheckForOopsClosure : public OopClosure {
1025 bool _found_oop = false;
1026 public:
1027 virtual void do_oop(oop* o) { _found_oop = true; }
1028 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1029 bool found_oop() { return _found_oop; }
1030 };
1031 class CheckForMetadataClosure : public MetadataClosure {
1032 bool _found_metadata = false;
1033 Metadata* _ignore = nullptr;
1034 public:
1035 CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1036 virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1037 bool found_metadata() { return _found_metadata; }
1038 };
1039
1040 static void assert_no_oops_or_metadata(nmethod* nm) {
1041 if (nm == nullptr) return;
1042 assert(nm->oop_maps() == nullptr, "expectation");
1043
1044 CheckForOopsClosure cfo;
1045 nm->oops_do(&cfo);
1046 assert(!cfo.found_oop(), "no oops allowed");
1047
1048 // We allow an exception for the own Method, but require its class to be permanent.
1049 Method* own_method = nm->method();
1050 CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1051 nm->metadata_do(&cfm);
1052 assert(!cfm.found_metadata(), "no metadata allowed");
1053
1054 assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1055 "Method's class needs to be permanent");
1056 }
1057 #endif
1058
1059 static int required_mutable_data_size(CodeBuffer* code_buffer,
1060 int jvmci_data_size = 0) {
1061 return align_up(code_buffer->total_relocation_size(), oopSize) +
1062 align_up(code_buffer->total_metadata_size(), oopSize) +
1063 align_up(jvmci_data_size, oopSize);
1064 }
1065
1066 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1067 int compile_id,
1068 CodeBuffer *code_buffer,
1069 int vep_offset,
1070 int frame_complete,
1071 int frame_size,
1072 ByteSize basic_lock_owner_sp_offset,
1073 ByteSize basic_lock_sp_offset,
1074 OopMapSet* oop_maps,
1075 int exception_handler) {
1076 code_buffer->finalize_oop_references(method);
1077 // create nmethod
1078 nmethod* nm = nullptr;
1079 int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1080 {
1081 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1082
1083 CodeOffsets offsets;
1084 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1085 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1086 if (exception_handler != -1) {
1087 offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1088 }
1089
1090 int mutable_data_size = required_mutable_data_size(code_buffer);
1091
1092 // MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1093 // IsUnloadingBehaviour::is_unloading needs to handle them separately.
1094 bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1095 nm = new (native_nmethod_size, allow_NonNMethod_space)
1096 nmethod(method(), compiler_none, native_nmethod_size,
1097 compile_id, &offsets,
1098 code_buffer, frame_size,
1099 basic_lock_owner_sp_offset,
1100 basic_lock_sp_offset,
1101 oop_maps, mutable_data_size);
1102 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1103 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1104 }
1105
1106 if (nm != nullptr) {
1107 // verify nmethod
1108 DEBUG_ONLY(nm->verify();) // might block
1109
1110 nm->log_new_nmethod();
1111 }
1112 return nm;
1113 }
1114
1115 nmethod* nmethod::new_nmethod(const methodHandle& method,
1116 int compile_id,
1117 int entry_bci,
1118 CodeOffsets* offsets,
1119 int orig_pc_offset,
1120 DebugInformationRecorder* debug_info,
1121 Dependencies* dependencies,
1122 CodeBuffer* code_buffer, int frame_size,
1123 OopMapSet* oop_maps,
1124 ExceptionHandlerTable* handler_table,
1125 ImplicitExceptionTable* nul_chk_table,
1126 AbstractCompiler* compiler,
1127 CompLevel comp_level
1128 #if INCLUDE_JVMCI
1129 , char* speculations,
1130 int speculations_len,
1131 JVMCINMethodData* jvmci_data
1132 #endif
1133 )
1134 {
1135 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1136 code_buffer->finalize_oop_references(method);
1137 // create nmethod
1138 nmethod* nm = nullptr;
1139 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1140
1141 int immutable_data_size =
1142 adjust_pcs_size(debug_info->pcs_size())
1143 + align_up((int)dependencies->size_in_bytes(), oopSize)
1144 + align_up(handler_table->size_in_bytes() , oopSize)
1145 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1146 #if INCLUDE_JVMCI
1147 + align_up(speculations_len , oopSize)
1148 #endif
1149 + align_up(debug_info->data_size() , oopSize)
1150 + align_up(ImmutableDataReferencesCounterSize, oopSize);
1151
1152 // First, allocate space for immutable data in C heap.
1153 address immutable_data = nullptr;
1154 if (immutable_data_size > 0) {
1155 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1156 if (immutable_data == nullptr) {
1157 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1158 return nullptr;
1159 }
1160 }
1161
1162 int mutable_data_size = required_mutable_data_size(code_buffer
1163 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1164
1165 {
1166 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1167
1168 nm = new (nmethod_size, comp_level)
1169 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1170 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1171 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1172 handler_table, nul_chk_table, compiler, comp_level
1173 #if INCLUDE_JVMCI
1174 , speculations,
1175 speculations_len,
1176 jvmci_data
1177 #endif
1178 );
1179
1180 if (nm != nullptr) {
1181 // To make dependency checking during class loading fast, record
1182 // the nmethod dependencies in the classes it is dependent on.
1183 // This allows the dependency checking code to simply walk the
1184 // class hierarchy above the loaded class, checking only nmethods
1185 // which are dependent on those classes. The slow way is to
1186 // check every nmethod for dependencies which makes it linear in
1187 // the number of methods compiled. For applications with a lot
1188 // classes the slow way is too slow.
1189 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1190 if (deps.type() == Dependencies::call_site_target_value) {
1191 // CallSite dependencies are managed on per-CallSite instance basis.
1192 oop call_site = deps.argument_oop(0);
1193 MethodHandles::add_dependent_nmethod(call_site, nm);
1194 } else {
1195 InstanceKlass* ik = deps.context_type();
1196 if (ik == nullptr) {
1197 continue; // ignore things like evol_method
1198 }
1199 // record this nmethod as dependent on this klass
1200 ik->add_dependent_nmethod(nm);
1201 }
1202 }
1203 NOT_PRODUCT(if (nm != nullptr) note_java_nmethod(nm));
1204 }
1205 }
1206 // Do verification and logging outside CodeCache_lock.
1207 if (nm != nullptr) {
1208 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1209 DEBUG_ONLY(nm->verify();)
1210 nm->log_new_nmethod();
1211 }
1212 return nm;
1213 }
1214
1215 // Fill in default values for various fields
1216 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1217 // avoid uninitialized fields, even for short time periods
1218 _exception_cache = nullptr;
1219 _gc_data = nullptr;
1220 _oops_do_mark_link = nullptr;
1221 _compiled_ic_data = nullptr;
1222
1223 _is_unloading_state = 0;
1224 _state = not_installed;
1225
1226 _has_unsafe_access = 0;
1227 _has_wide_vectors = 0;
1228 _has_monitors = 0;
1229 _has_scoped_access = 0;
1230 _has_flushed_dependencies = 0;
1231 _is_unlinked = 0;
1232 _load_reported = 0; // jvmti state
1233
1234 _deoptimization_status = not_marked;
1235
1236 // SECT_CONSTS is first in code buffer so the offset should be 0.
1237 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1238 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1239
1240 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1241
1242 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1243 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1244
1245 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1246 }
1247
1248 // Post initialization
1249 void nmethod::post_init() {
1250 clear_unloading_state();
1251
1252 finalize_relocations();
1253
1254 Universe::heap()->register_nmethod(this);
1255 DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
1256
1257 CodeCache::commit(this);
1258 }
1259
1260 // For native wrappers
1261 nmethod::nmethod(
1262 Method* method,
1263 CompilerType type,
1264 int nmethod_size,
1265 int compile_id,
1266 CodeOffsets* offsets,
1267 CodeBuffer* code_buffer,
1268 int frame_size,
1269 ByteSize basic_lock_owner_sp_offset,
1270 ByteSize basic_lock_sp_offset,
1271 OopMapSet* oop_maps,
1272 int mutable_data_size)
1273 : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1274 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1275 _deoptimization_generation(0),
1276 _gc_epoch(CodeCache::gc_epoch()),
1277 _method(method),
1278 _native_receiver_sp_offset(basic_lock_owner_sp_offset),
1279 _native_basic_lock_sp_offset(basic_lock_sp_offset)
1280 {
1281 {
1282 DEBUG_ONLY(NoSafepointVerifier nsv;)
1283 assert_locked_or_safepoint(CodeCache_lock);
1284
1285 init_defaults(code_buffer, offsets);
1286
1287 _osr_entry_point = nullptr;
1288 _pc_desc_container = nullptr;
1289 _entry_bci = InvocationEntryBci;
1290 _compile_id = compile_id;
1291 _comp_level = CompLevel_none;
1292 _compiler_type = type;
1293 _orig_pc_offset = 0;
1294 _num_stack_arg_slots = 0;
1295
1296 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1297 // Continuation enter intrinsic
1298 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1299 } else {
1300 _exception_offset = 0;
1301 }
1302 // Native wrappers do not have deopt handlers. Make the values
1303 // something that will never match a pc like the nmethod vtable entry
1304 _deopt_handler_offset = 0;
1305 _unwind_handler_offset = 0;
1306
1307 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1308 uint16_t metadata_size;
1309 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1310 JVMCI_ONLY( _metadata_size = metadata_size; )
1311 assert(_mutable_data_size == _relocation_size + metadata_size,
1312 "wrong mutable data size: %d != %d + %d",
1313 _mutable_data_size, _relocation_size, metadata_size);
1314
1315 // native wrapper does not have read-only data but we need unique not null address
1316 _immutable_data = blob_end();
1317 _immutable_data_size = 0;
1318 _nul_chk_table_offset = 0;
1319 _handler_table_offset = 0;
1320 _scopes_pcs_offset = 0;
1321 _scopes_data_offset = 0;
1322 #if INCLUDE_JVMCI
1323 _speculations_offset = 0;
1324 #endif
1325
1326 code_buffer->copy_code_and_locs_to(this);
1327 code_buffer->copy_values_to(this);
1328
1329 post_init();
1330 }
1331
1332 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1333 ttyLocker ttyl; // keep the following output all in one block
1334 // This output goes directly to the tty, not the compiler log.
1335 // To enable tools to match it up with the compilation activity,
1336 // be sure to tag this tty output with the compile ID.
1337 if (xtty != nullptr) {
1338 xtty->begin_head("print_native_nmethod");
1339 xtty->method(_method);
1340 xtty->stamp();
1341 xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1342 }
1343 // Print the header part, then print the requested information.
1344 // This is both handled in decode2(), called via print_code() -> decode()
1345 if (PrintNativeNMethods) {
1346 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1347 print_code();
1348 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1349 #if defined(SUPPORT_DATA_STRUCTS)
1350 if (AbstractDisassembler::show_structs()) {
1351 if (oop_maps != nullptr) {
1352 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1353 oop_maps->print_on(tty);
1354 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1355 }
1356 }
1357 #endif
1358 } else {
1359 print(); // print the header part only.
1360 }
1361 #if defined(SUPPORT_DATA_STRUCTS)
1362 if (AbstractDisassembler::show_structs()) {
1363 if (PrintRelocations) {
1364 print_relocations();
1365 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1366 }
1367 }
1368 #endif
1369 if (xtty != nullptr) {
1370 xtty->tail("print_native_nmethod");
1371 }
1372 }
1373 }
1374
1375
1376 nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm._header_size)
1377 {
1378
1379 if (nm._oop_maps != nullptr) {
1380 _oop_maps = nm._oop_maps->clone();
1381 } else {
1382 _oop_maps = nullptr;
1383 }
1384
1385 _size = nm._size;
1386 _relocation_size = nm._relocation_size;
1387 _content_offset = nm._content_offset;
1388 _code_offset = nm._code_offset;
1389 _data_offset = nm._data_offset;
1390 _frame_size = nm._frame_size;
1391
1392 S390_ONLY( _ctable_offset = nm._ctable_offset; )
1393
1394 _header_size = nm._header_size;
1395 _frame_complete_offset = nm._frame_complete_offset;
1396
1397 _kind = nm._kind;
1398
1399 _caller_must_gc_arguments = nm._caller_must_gc_arguments;
1400
1401 #ifndef PRODUCT
1402 _asm_remarks.share(nm._asm_remarks);
1403 _dbg_strings.share(nm._dbg_strings);
1404 #endif
1405
1406 // Allocate memory and copy mutable data to C heap
1407 _mutable_data_size = nm._mutable_data_size;
1408 if (_mutable_data_size > 0) {
1409 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
1410 if (_mutable_data == nullptr) {
1411 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for mutable data");
1412 }
1413 memcpy(mutable_data_begin(), nm.mutable_data_begin(), nm.mutable_data_size());
1414 } else {
1415 _mutable_data = nullptr;
1416 }
1417
1418 _deoptimization_generation = 0;
1419 _gc_epoch = CodeCache::gc_epoch();
1420 _method = nm._method;
1421 _osr_link = nullptr;
1422
1423 // Increment number of references to immutable data to share it between nmethods
1424 _immutable_data_size = nm._immutable_data_size;
1425 if (_immutable_data_size > 0) {
1426 _immutable_data = nm._immutable_data;
1427 set_immutable_data_references_counter(get_immutable_data_references_counter() + 1);
1428 } else {
1429 _immutable_data = blob_end();
1430 }
1431
1432 _exception_cache = nullptr;
1433 _gc_data = nullptr;
1434 _oops_do_mark_nmethods = nullptr;
1435 _oops_do_mark_link = nullptr;
1436 _compiled_ic_data = nullptr;
1437
1438 if (nm._osr_entry_point != nullptr) {
1439 _osr_entry_point = (nm._osr_entry_point - (address) &nm) + (address) this;
1440 } else {
1441 _osr_entry_point = nullptr;
1442 }
1443
1444 _entry_offset = nm._entry_offset;
1445 _verified_entry_offset = nm._verified_entry_offset;
1446 _entry_bci = nm._entry_bci;
1447
1448 _skipped_instructions_size = nm._skipped_instructions_size;
1449 _stub_offset = nm._stub_offset;
1450 _exception_offset = nm._exception_offset;
1451 _deopt_handler_offset = nm._deopt_handler_offset;
1452 _unwind_handler_offset = nm._unwind_handler_offset;
1453 _num_stack_arg_slots = nm._num_stack_arg_slots;
1454 _oops_size = nm._oops_size;
1455 #if INCLUDE_JVMCI
1456 _metadata_size = nm._metadata_size;
1457 #endif
1458 _nul_chk_table_offset = nm._nul_chk_table_offset;
1459 _handler_table_offset = nm._handler_table_offset;
1460 _scopes_pcs_offset = nm._scopes_pcs_offset;
1461 _scopes_data_offset = nm._scopes_data_offset;
1462 #if INCLUDE_JVMCI
1463 _speculations_offset = nm._speculations_offset;
1464 #endif
1465
1466 _orig_pc_offset = nm._orig_pc_offset;
1467 _compile_id = nm._compile_id;
1468 _comp_level = nm._comp_level;
1469 _compiler_type = nm._compiler_type;
1470 _is_unloading_state = nm._is_unloading_state;
1471 _state = not_installed;
1472
1473 _has_unsafe_access = nm._has_unsafe_access;
1474 _has_wide_vectors = nm._has_wide_vectors;
1475 _has_monitors = nm._has_monitors;
1476 _has_scoped_access = nm._has_scoped_access;
1477 _has_flushed_dependencies = nm._has_flushed_dependencies;
1478 _is_unlinked = nm._is_unlinked;
1479 _load_reported = nm._load_reported;
1480
1481 _deoptimization_status = nm._deoptimization_status;
1482
1483 if (nm._pc_desc_container != nullptr) {
1484 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1485 } else {
1486 _pc_desc_container = nullptr;
1487 }
1488
1489 // Copy nmethod contents excluding header
1490 // - Constant part (doubles, longs and floats used in nmethod)
1491 // - Code part:
1492 // - Code body
1493 // - Exception handler
1494 // - Stub code
1495 // - OOP table
1496 memcpy(consts_begin(), nm.consts_begin(), nm.data_end() - nm.consts_begin());
1497
1498 post_init();
1499 }
1500
1501 nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
1502 assert(NMethodRelocation, "must enable use of function");
1503
1504 // Locks required to be held by caller to ensure the nmethod
1505 // is not modified or purged from code cache during relocation
1506 assert_lock_strong(CodeCache_lock);
1507 assert_lock_strong(Compile_lock);
1508 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1509
1510 if (!is_relocatable()) {
1511 return nullptr;
1512 }
1513
1514 run_nmethod_entry_barrier();
1515 nmethod* nm_copy = new (size(), code_blob_type) nmethod(*this);
1516
1517 if (nm_copy == nullptr) {
1518 return nullptr;
1519 }
1520
1521 // Fix relocation
1522 RelocIterator iter(nm_copy);
1523 CodeBuffer src(this);
1524 CodeBuffer dst(nm_copy);
1525 while (iter.next()) {
1526 #ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
1527 // Direct calls may no longer be in range and the use of a trampoline may now be required.
1528 // Instead, allow trampoline relocations to update their owners and perform the necessary checks.
1529 if (iter.reloc()->is_call()) {
1530 address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), nm_copy);
1531 if (trampoline != nullptr) {
1532 continue;
1533 }
1534 }
1535 #endif
1536
1537 iter.reloc()->fix_relocation_after_move(&src, &dst);
1538 }
1539
1540 // To make dependency checking during class loading fast, record
1541 // the nmethod dependencies in the classes it is dependent on.
1542 // This allows the dependency checking code to simply walk the
1543 // class hierarchy above the loaded class, checking only nmethods
1544 // which are dependent on those classes. The slow way is to
1545 // check every nmethod for dependencies which makes it linear in
1546 // the number of methods compiled. For applications with a lot
1547 // classes the slow way is too slow.
1548 for (Dependencies::DepStream deps(nm_copy); deps.next(); ) {
1549 if (deps.type() == Dependencies::call_site_target_value) {
1550 // CallSite dependencies are managed on per-CallSite instance basis.
1551 oop call_site = deps.argument_oop(0);
1552 MethodHandles::add_dependent_nmethod(call_site, nm_copy);
1553 } else {
1554 InstanceKlass* ik = deps.context_type();
1555 if (ik == nullptr) {
1556 continue; // ignore things like evol_method
1557 }
1558 // record this nmethod as dependent on this klass
1559 ik->add_dependent_nmethod(nm_copy);
1560 }
1561 }
1562
1563 MutexLocker ml_NMethodState_lock(NMethodState_lock, Mutex::_no_safepoint_check_flag);
1564
1565 // Verify the nm we copied from is still valid
1566 if (!is_marked_for_deoptimization() && is_in_use()) {
1567 assert(method() != nullptr && method()->code() == this, "should be if is in use");
1568
1569 nm_copy->clear_inline_caches();
1570
1571 // Attempt to start using the copy
1572 if (nm_copy->make_in_use()) {
1573 ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size());
1574
1575 methodHandle mh(Thread::current(), nm_copy->method());
1576 nm_copy->method()->set_code(mh, nm_copy);
1577
1578 make_not_used();
1579
1580 nm_copy->post_compiled_method_load_event();
1581
1582 nm_copy->log_relocated_nmethod(this);
1583
1584 return nm_copy;
1585 }
1586 }
1587
1588 nm_copy->make_not_used();
1589
1590 return nullptr;
1591 }
1592
1593 bool nmethod::is_relocatable() {
1594 if (!is_java_method()) {
1595 return false;
1596 }
1597
1598 if (!is_in_use()) {
1599 return false;
1600 }
1601
1602 if (is_osr_method()) {
1603 return false;
1604 }
1605
1606 if (is_marked_for_deoptimization()) {
1607 return false;
1608 }
1609
1610 #if INCLUDE_JVMCI
1611 if (jvmci_nmethod_data() != nullptr && jvmci_nmethod_data()->has_mirror()) {
1612 return false;
1613 }
1614 #endif
1615
1616 if (is_unloading()) {
1617 return false;
1618 }
1619
1620 if (has_evol_metadata()) {
1621 return false;
1622 }
1623
1624 return true;
1625 }
1626
1627 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1628 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1629 }
1630
1631 void* nmethod::operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw () {
1632 return CodeCache::allocate(nmethod_size, code_blob_type);
1633 }
1634
1635 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1636 // Try MethodNonProfiled and MethodProfiled.
1637 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1638 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1639 // Try NonNMethod or give up.
1640 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1641 }
1642
1643 // For normal JIT compiled code
1644 nmethod::nmethod(
1645 Method* method,
1646 CompilerType type,
1647 int nmethod_size,
1648 int immutable_data_size,
1649 int mutable_data_size,
1650 int compile_id,
1651 int entry_bci,
1652 address immutable_data,
1653 CodeOffsets* offsets,
1654 int orig_pc_offset,
1655 DebugInformationRecorder* debug_info,
1656 Dependencies* dependencies,
1657 CodeBuffer *code_buffer,
1658 int frame_size,
1659 OopMapSet* oop_maps,
1660 ExceptionHandlerTable* handler_table,
1661 ImplicitExceptionTable* nul_chk_table,
1662 AbstractCompiler* compiler,
1663 CompLevel comp_level
1664 #if INCLUDE_JVMCI
1665 , char* speculations,
1666 int speculations_len,
1667 JVMCINMethodData* jvmci_data
1668 #endif
1669 )
1670 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1671 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1672 _deoptimization_generation(0),
1673 _gc_epoch(CodeCache::gc_epoch()),
1674 _method(method),
1675 _osr_link(nullptr)
1676 {
1677 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1678 {
1679 DEBUG_ONLY(NoSafepointVerifier nsv;)
1680 assert_locked_or_safepoint(CodeCache_lock);
1681
1682 init_defaults(code_buffer, offsets);
1683
1684 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1685 _entry_bci = entry_bci;
1686 _compile_id = compile_id;
1687 _comp_level = comp_level;
1688 _compiler_type = type;
1689 _orig_pc_offset = orig_pc_offset;
1690
1691 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1692
1693 set_ctable_begin(header_begin() + content_offset());
1694
1695 #if INCLUDE_JVMCI
1696 if (compiler->is_jvmci()) {
1697 // JVMCI might not produce any stub sections
1698 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1699 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1700 } else {
1701 _exception_offset = -1;
1702 }
1703 if (offsets->value(CodeOffsets::Deopt) != -1) {
1704 _deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
1705 } else {
1706 _deopt_handler_offset = -1;
1707 }
1708 } else
1709 #endif
1710 {
1711 // Exception handler and deopt handler are in the stub section
1712 assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
1713 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
1714
1715 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1716 _deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
1717 }
1718 if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1719 // C1 generates UnwindHandler at the end of instructions section.
1720 // Calculate positive offset as distance between the start of stubs section
1721 // (which is also the end of instructions section) and the start of the handler.
1722 int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1723 CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1724 } else {
1725 _unwind_handler_offset = -1;
1726 }
1727
1728 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1729 uint16_t metadata_size;
1730 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1731 JVMCI_ONLY( _metadata_size = metadata_size; )
1732 int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
1733 assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,
1734 "wrong mutable data size: %d != %d + %d + %d",
1735 _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size);
1736 assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d",
1737 nmethod_size, (int)(code_end() - header_begin()));
1738
1739 _immutable_data_size = immutable_data_size;
1740 if (immutable_data_size > 0) {
1741 assert(immutable_data != nullptr, "required");
1742 _immutable_data = immutable_data;
1743 } else {
1744 // We need unique not null address
1745 _immutable_data = blob_end();
1746 }
1747 CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1748 CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1749 _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1750 _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
1751
1752 #if INCLUDE_JVMCI
1753 _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1754 DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
1755 #else
1756 DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
1757 #endif
1758 assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1759 immutable_data_end_offset, immutable_data_size);
1760
1761 // Copy code and relocation info
1762 code_buffer->copy_code_and_locs_to(this);
1763 // Copy oops and metadata
1764 code_buffer->copy_values_to(this);
1765 dependencies->copy_to(this);
1766 // Copy PcDesc and ScopeDesc data
1767 debug_info->copy_to(this);
1768
1769 // Create cache after PcDesc data is copied - it will be used to initialize cache
1770 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1771
1772 #if INCLUDE_JVMCI
1773 if (compiler->is_jvmci()) {
1774 // Initialize the JVMCINMethodData object inlined into nm
1775 jvmci_nmethod_data()->copy(jvmci_data);
1776 }
1777 #endif
1778
1779 // Copy contents of ExceptionHandlerTable to nmethod
1780 handler_table->copy_to(this);
1781 nul_chk_table->copy_to(this);
1782
1783 #if INCLUDE_JVMCI
1784 // Copy speculations to nmethod
1785 if (speculations_size() != 0) {
1786 memcpy(speculations_begin(), speculations, speculations_len);
1787 }
1788 #endif
1789 set_immutable_data_references_counter(1);
1790
1791 post_init();
1792
1793 // we use the information of entry points to find out if a method is
1794 // static or non static
1795 assert(compiler->is_c2() || compiler->is_jvmci() ||
1796 _method->is_static() == (entry_point() == verified_entry_point()),
1797 " entry points must be same for static methods and vice versa");
1798 }
1799 }
1800
1801 // Print a short set of xml attributes to identify this nmethod. The
1802 // output should be embedded in some other element.
1803 void nmethod::log_identity(xmlStream* log) const {
1804 log->print(" compile_id='%d'", compile_id());
1805 const char* nm_kind = compile_kind();
1806 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1807 log->print(" compiler='%s'", compiler_name());
1808 if (TieredCompilation) {
1809 log->print(" level='%d'", comp_level());
1810 }
1811 #if INCLUDE_JVMCI
1812 if (jvmci_nmethod_data() != nullptr) {
1813 const char* jvmci_name = jvmci_nmethod_data()->name();
1814 if (jvmci_name != nullptr) {
1815 log->print(" jvmci_mirror_name='");
1816 log->text("%s", jvmci_name);
1817 log->print("'");
1818 }
1819 }
1820 #endif
1821 }
1822
1823
1824 #define LOG_OFFSET(log, name) \
1825 if (p2i(name##_end()) - p2i(name##_begin())) \
1826 log->print(" " XSTR(name) "_offset='%zd'" , \
1827 p2i(name##_begin()) - p2i(this))
1828
1829
1830 void nmethod::log_new_nmethod() const {
1831 if (LogCompilation && xtty != nullptr) {
1832 ttyLocker ttyl;
1833 xtty->begin_elem("nmethod");
1834 log_identity(xtty);
1835 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1836 xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1837
1838 LOG_OFFSET(xtty, relocation);
1839 LOG_OFFSET(xtty, consts);
1840 LOG_OFFSET(xtty, insts);
1841 LOG_OFFSET(xtty, stub);
1842 LOG_OFFSET(xtty, scopes_data);
1843 LOG_OFFSET(xtty, scopes_pcs);
1844 LOG_OFFSET(xtty, dependencies);
1845 LOG_OFFSET(xtty, handler_table);
1846 LOG_OFFSET(xtty, nul_chk_table);
1847 LOG_OFFSET(xtty, oops);
1848 LOG_OFFSET(xtty, metadata);
1849
1850 xtty->method(method());
1851 xtty->stamp();
1852 xtty->end_elem();
1853 }
1854 }
1855
1856
1857 void nmethod::log_relocated_nmethod(nmethod* original) const {
1858 if (LogCompilation && xtty != nullptr) {
1859 ttyLocker ttyl;
1860 xtty->begin_elem("relocated nmethod");
1861 log_identity(xtty);
1862 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1863
1864 const char* original_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(original));
1865 xtty->print(" original_address='" INTPTR_FORMAT "'", p2i(original));
1866 xtty->print(" original_code_heap='%s'", original_code_heap_name);
1867
1868 const char* new_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(this));
1869 xtty->print(" new_address='" INTPTR_FORMAT "'", p2i(this));
1870 xtty->print(" new_code_heap='%s'", new_code_heap_name);
1871
1872 LOG_OFFSET(xtty, relocation);
1873 LOG_OFFSET(xtty, consts);
1874 LOG_OFFSET(xtty, insts);
1875 LOG_OFFSET(xtty, stub);
1876 LOG_OFFSET(xtty, scopes_data);
1877 LOG_OFFSET(xtty, scopes_pcs);
1878 LOG_OFFSET(xtty, dependencies);
1879 LOG_OFFSET(xtty, handler_table);
1880 LOG_OFFSET(xtty, nul_chk_table);
1881 LOG_OFFSET(xtty, oops);
1882 LOG_OFFSET(xtty, metadata);
1883
1884 xtty->method(method());
1885 xtty->stamp();
1886 xtty->end_elem();
1887 }
1888 }
1889
1890 #undef LOG_OFFSET
1891
1892
1893 // Print out more verbose output usually for a newly created nmethod.
1894 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const {
1895 if (st != nullptr) {
1896 ttyLocker ttyl;
1897 if (WizardMode) {
1898 CompileTask::print(st, this, msg, /*short_form:*/ true);
1899 st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
1900 } else {
1901 CompileTask::print(st, this, msg, /*short_form:*/ false);
1902 }
1903 }
1904 }
1905
1906 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
1907 bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
1908 if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
1909 print_nmethod(printnmethods);
1910 }
1911 }
1912
1913 void nmethod::print_nmethod(bool printmethod) {
1914 ttyLocker ttyl; // keep the following output all in one block
1915 if (xtty != nullptr) {
1916 xtty->begin_head("print_nmethod");
1917 log_identity(xtty);
1918 xtty->stamp();
1919 xtty->end_head();
1920 }
1921 // Print the header part, then print the requested information.
1922 // This is both handled in decode2().
1923 if (printmethod) {
1924 ResourceMark m;
1925 if (is_compiled_by_c1()) {
1926 tty->cr();
1927 tty->print_cr("============================= C1-compiled nmethod ==============================");
1928 }
1929 if (is_compiled_by_jvmci()) {
1930 tty->cr();
1931 tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
1932 }
1933 tty->print_cr("----------------------------------- Assembly -----------------------------------");
1934 decode2(tty);
1935 #if defined(SUPPORT_DATA_STRUCTS)
1936 if (AbstractDisassembler::show_structs()) {
1937 // Print the oops from the underlying CodeBlob as well.
1938 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1939 print_oops(tty);
1940 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1941 print_metadata(tty);
1942 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1943 print_pcs_on(tty);
1944 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1945 if (oop_maps() != nullptr) {
1946 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1947 oop_maps()->print_on(tty);
1948 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1949 }
1950 }
1951 #endif
1952 } else {
1953 print(); // print the header part only.
1954 }
1955
1956 #if defined(SUPPORT_DATA_STRUCTS)
1957 if (AbstractDisassembler::show_structs()) {
1958 methodHandle mh(Thread::current(), _method);
1959 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1960 print_scopes();
1961 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1962 }
1963 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1964 print_relocations();
1965 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1966 }
1967 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1968 print_dependencies_on(tty);
1969 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1970 }
1971 if (printmethod || PrintExceptionHandlers) {
1972 print_handler_table();
1973 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1974 print_nul_chk_table();
1975 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1976 }
1977
1978 if (printmethod) {
1979 print_recorded_oops();
1980 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1981 print_recorded_metadata();
1982 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1983 }
1984 }
1985 #endif
1986
1987 if (xtty != nullptr) {
1988 xtty->tail("print_nmethod");
1989 }
1990 }
1991
1992
1993 // Promote one word from an assembly-time handle to a live embedded oop.
1994 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1995 if (handle == nullptr ||
1996 // As a special case, IC oops are initialized to 1 or -1.
1997 handle == (jobject) Universe::non_oop_word()) {
1998 *(void**)dest = handle;
1999 } else {
2000 *dest = JNIHandles::resolve_non_null(handle);
2001 }
2002 }
2003
2004
2005 // Have to have the same name because it's called by a template
2006 void nmethod::copy_values(GrowableArray<jobject>* array) {
2007 int length = array->length();
2008 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
2009 oop* dest = oops_begin();
2010 for (int index = 0 ; index < length; index++) {
2011 initialize_immediate_oop(&dest[index], array->at(index));
2012 }
2013
2014 // Now we can fix up all the oops in the code. We need to do this
2015 // in the code because the assembler uses jobjects as placeholders.
2016 // The code and relocations have already been initialized by the
2017 // CodeBlob constructor, so it is valid even at this early point to
2018 // iterate over relocations and patch the code.
2019 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
2020 }
2021
2022 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
2023 int length = array->length();
2024 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
2025 Metadata** dest = metadata_begin();
2026 for (int index = 0 ; index < length; index++) {
2027 dest[index] = array->at(index);
2028 }
2029 }
2030
2031 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
2032 // re-patch all oop-bearing instructions, just in case some oops moved
2033 RelocIterator iter(this, begin, end);
2034 while (iter.next()) {
2035 if (iter.type() == relocInfo::oop_type) {
2036 oop_Relocation* reloc = iter.oop_reloc();
2037 if (initialize_immediates && reloc->oop_is_immediate()) {
2038 oop* dest = reloc->oop_addr();
2039 jobject obj = *reinterpret_cast<jobject*>(dest);
2040 initialize_immediate_oop(dest, obj);
2041 }
2042 // Refresh the oop-related bits of this instruction.
2043 reloc->fix_oop_relocation();
2044 } else if (iter.type() == relocInfo::metadata_type) {
2045 metadata_Relocation* reloc = iter.metadata_reloc();
2046 reloc->fix_metadata_relocation();
2047 }
2048 }
2049 }
2050
2051 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
2052 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
2053 intptr_t cbaddr = (intptr_t) nm;
2054 intptr_t offset = ((intptr_t) pc) - cbaddr;
2055
2056 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
2057 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
2058 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
2059 } else if (!nop->patch(oopmap_slot, offset)) {
2060 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
2061 }
2062 }
2063
2064 void nmethod::finalize_relocations() {
2065 NoSafepointVerifier nsv;
2066
2067 GrowableArray<NativeMovConstReg*> virtual_call_data;
2068
2069 // Make sure that post call nops fill in nmethod offsets eagerly so
2070 // we don't have to race with deoptimization
2071 RelocIterator iter(this);
2072 while (iter.next()) {
2073 if (iter.type() == relocInfo::virtual_call_type) {
2074 virtual_call_Relocation* r = iter.virtual_call_reloc();
2075 NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
2076 virtual_call_data.append(value);
2077 } else if (iter.type() == relocInfo::post_call_nop_type) {
2078 post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
2079 address pc = reloc->addr();
2080 install_post_call_nop_displacement(this, pc);
2081 }
2082 }
2083
2084 if (virtual_call_data.length() > 0) {
2085 // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
2086 _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
2087 CompiledICData* next_data = _compiled_ic_data;
2088
2089 for (NativeMovConstReg* value : virtual_call_data) {
2090 value->set_data((intptr_t)next_data);
2091 next_data++;
2092 }
2093 }
2094 }
2095
2096 void nmethod::make_deoptimized() {
2097 if (!Continuations::enabled()) {
2098 // Don't deopt this again.
2099 set_deoptimized_done();
2100 return;
2101 }
2102
2103 assert(method() == nullptr || can_be_deoptimized(), "");
2104
2105 CompiledICLocker ml(this);
2106 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2107
2108 // If post call nops have been already patched, we can just bail-out.
2109 if (has_been_deoptimized()) {
2110 return;
2111 }
2112
2113 ResourceMark rm;
2114 RelocIterator iter(this, oops_reloc_begin());
2115
2116 while (iter.next()) {
2117
2118 switch (iter.type()) {
2119 case relocInfo::virtual_call_type: {
2120 CompiledIC *ic = CompiledIC_at(&iter);
2121 address pc = ic->end_of_call();
2122 NativePostCallNop* nop = nativePostCallNop_at(pc);
2123 if (nop != nullptr) {
2124 nop->make_deopt();
2125 }
2126 assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2127 break;
2128 }
2129 case relocInfo::static_call_type:
2130 case relocInfo::opt_virtual_call_type: {
2131 CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
2132 address pc = csc->end_of_call();
2133 NativePostCallNop* nop = nativePostCallNop_at(pc);
2134 //tty->print_cr(" - static pc %p", pc);
2135 if (nop != nullptr) {
2136 nop->make_deopt();
2137 }
2138 // We can't assert here, there are some calls to stubs / runtime
2139 // that have reloc data and doesn't have a post call NOP.
2140 //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2141 break;
2142 }
2143 default:
2144 break;
2145 }
2146 }
2147 // Don't deopt this again.
2148 set_deoptimized_done();
2149 }
2150
2151 void nmethod::verify_clean_inline_caches() {
2152 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2153
2154 ResourceMark rm;
2155 RelocIterator iter(this, oops_reloc_begin());
2156 while(iter.next()) {
2157 switch(iter.type()) {
2158 case relocInfo::virtual_call_type: {
2159 CompiledIC *ic = CompiledIC_at(&iter);
2160 CodeBlob *cb = CodeCache::find_blob(ic->destination());
2161 assert(cb != nullptr, "destination not in CodeBlob?");
2162 nmethod* nm = cb->as_nmethod_or_null();
2163 if (nm != nullptr) {
2164 // Verify that inline caches pointing to bad nmethods are clean
2165 if (!nm->is_in_use() || nm->is_unloading()) {
2166 assert(ic->is_clean(), "IC should be clean");
2167 }
2168 }
2169 break;
2170 }
2171 case relocInfo::static_call_type:
2172 case relocInfo::opt_virtual_call_type: {
2173 CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
2174 CodeBlob *cb = CodeCache::find_blob(cdc->destination());
2175 assert(cb != nullptr, "destination not in CodeBlob?");
2176 nmethod* nm = cb->as_nmethod_or_null();
2177 if (nm != nullptr) {
2178 // Verify that inline caches pointing to bad nmethods are clean
2179 if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
2180 assert(cdc->is_clean(), "IC should be clean");
2181 }
2182 }
2183 break;
2184 }
2185 default:
2186 break;
2187 }
2188 }
2189 }
2190
2191 void nmethod::mark_as_maybe_on_stack() {
2192 AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch());
2193 }
2194
2195 bool nmethod::is_maybe_on_stack() {
2196 // If the condition below is true, it means that the nmethod was found to
2197 // be alive the previous completed marking cycle.
2198 return AtomicAccess::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
2199 }
2200
2201 void nmethod::inc_decompile_count() {
2202 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
2203 // Could be gated by ProfileTraps, but do not bother...
2204 #if INCLUDE_JVMCI
2205 if (jvmci_skip_profile_deopt()) {
2206 return;
2207 }
2208 #endif
2209 Method* m = method();
2210 if (m == nullptr) return;
2211 MethodData* mdo = m->method_data();
2212 if (mdo == nullptr) return;
2213 // There is a benign race here. See comments in methodData.hpp.
2214 mdo->inc_decompile_count();
2215 }
2216
2217 bool nmethod::try_transition(signed char new_state_int) {
2218 signed char new_state = new_state_int;
2219 assert_lock_strong(NMethodState_lock);
2220 signed char old_state = _state;
2221 if (old_state >= new_state) {
2222 // Ensure monotonicity of transitions.
2223 return false;
2224 }
2225 AtomicAccess::store(&_state, new_state);
2226 return true;
2227 }
2228
2229 void nmethod::invalidate_osr_method() {
2230 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
2231 // Remove from list of active nmethods
2232 if (method() != nullptr) {
2233 method()->method_holder()->remove_osr_nmethod(this);
2234 }
2235 }
2236
2237 void nmethod::log_state_change(InvalidationReason invalidation_reason) const {
2238 if (LogCompilation) {
2239 if (xtty != nullptr) {
2240 ttyLocker ttyl; // keep the following output all in one block
2241 xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
2242 os::current_thread_id(), invalidation_reason_to_string(invalidation_reason));
2243 log_identity(xtty);
2244 xtty->stamp();
2245 xtty->end_elem();
2246 }
2247 }
2248
2249 ResourceMark rm;
2250 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
2251 ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason));
2252
2253 CompileTask::print_ul(this, ss.freeze());
2254 if (PrintCompilation) {
2255 print_on_with_msg(tty, ss.freeze());
2256 }
2257 }
2258
2259 void nmethod::unlink_from_method() {
2260 if (method() != nullptr) {
2261 method()->unlink_code(this);
2262 }
2263 }
2264
2265 // Invalidate code
2266 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {
2267 // This can be called while the system is already at a safepoint which is ok
2268 NoSafepointVerifier nsv;
2269
2270 if (is_unloading()) {
2271 // If the nmethod is unloading, then it is already not entrant through
2272 // the nmethod entry barriers. No need to do anything; GC will unload it.
2273 return false;
2274 }
2275
2276 if (AtomicAccess::load(&_state) == not_entrant) {
2277 // Avoid taking the lock if already in required state.
2278 // This is safe from races because the state is an end-state,
2279 // which the nmethod cannot back out of once entered.
2280 // No need for fencing either.
2281 return false;
2282 }
2283
2284 {
2285 // Enter critical section. Does not block for safepoint.
2286 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2287
2288 if (AtomicAccess::load(&_state) == not_entrant) {
2289 // another thread already performed this transition so nothing
2290 // to do, but return false to indicate this.
2291 return false;
2292 }
2293
2294 if (is_osr_method()) {
2295 // This logic is equivalent to the logic below for patching the
2296 // verified entry point of regular methods.
2297 // this effectively makes the osr nmethod not entrant
2298 invalidate_osr_method();
2299 } else {
2300 // The caller can be calling the method statically or through an inline
2301 // cache call.
2302 BarrierSet::barrier_set()->barrier_set_nmethod()->make_not_entrant(this);
2303 }
2304
2305 if (update_recompile_counts()) {
2306 // Mark the method as decompiled.
2307 inc_decompile_count();
2308 }
2309
2310 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2311 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2312 // If nmethod entry barriers are not supported, we won't mark
2313 // nmethods as on-stack when they become on-stack. So we
2314 // degrade to a less accurate flushing strategy, for now.
2315 mark_as_maybe_on_stack();
2316 }
2317
2318 // Change state
2319 bool success = try_transition(not_entrant);
2320 assert(success, "Transition can't fail");
2321
2322 // Log the transition once
2323 log_state_change(invalidation_reason);
2324
2325 // Remove nmethod from method.
2326 unlink_from_method();
2327
2328 } // leave critical region under NMethodState_lock
2329
2330 #if INCLUDE_JVMCI
2331 // Invalidate can't occur while holding the NMethodState_lock
2332 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2333 if (nmethod_data != nullptr) {
2334 nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason);
2335 }
2336 #endif
2337
2338 #ifdef ASSERT
2339 if (is_osr_method() && method() != nullptr) {
2340 // Make sure osr nmethod is invalidated, i.e. not on the list
2341 bool found = method()->method_holder()->remove_osr_nmethod(this);
2342 assert(!found, "osr nmethod should have been invalidated");
2343 }
2344 #endif
2345
2346 return true;
2347 }
2348
2349 // For concurrent GCs, there must be a handshake between unlink and flush
2350 void nmethod::unlink() {
2351 if (is_unlinked()) {
2352 // Already unlinked.
2353 return;
2354 }
2355
2356 flush_dependencies();
2357
2358 // unlink_from_method will take the NMethodState_lock.
2359 // In this case we don't strictly need it when unlinking nmethods from
2360 // the Method, because it is only concurrently unlinked by
2361 // the entry barrier, which acquires the per nmethod lock.
2362 unlink_from_method();
2363
2364 if (is_osr_method()) {
2365 invalidate_osr_method();
2366 }
2367
2368 #if INCLUDE_JVMCI
2369 // Clear the link between this nmethod and a HotSpotNmethod mirror
2370 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2371 if (nmethod_data != nullptr) {
2372 nmethod_data->invalidate_nmethod_mirror(this, is_cold() ?
2373 nmethod::InvalidationReason::UNLOADING_COLD :
2374 nmethod::InvalidationReason::UNLOADING);
2375 }
2376 #endif
2377
2378 // Post before flushing as jmethodID is being used
2379 post_compiled_method_unload();
2380
2381 // Register for flushing when it is safe. For concurrent class unloading,
2382 // that would be after the unloading handshake, and for STW class unloading
2383 // that would be when getting back to the VM thread.
2384 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2385 }
2386
2387 void nmethod::purge(bool unregister_nmethod) {
2388
2389 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2390
2391 // completely deallocate this method
2392 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2393
2394 LogTarget(Debug, codecache) lt;
2395 if (lt.is_enabled()) {
2396 ResourceMark rm;
2397 LogStream ls(lt);
2398 const char* method_name = method()->name()->as_C_string();
2399 const size_t codecache_capacity = CodeCache::capacity()/1024;
2400 const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024;
2401 ls.print("Flushing nmethod %6d/" INTPTR_FORMAT ", level=%d, osr=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". "
2402 "Cache capacity: %zuKb, free space: %zuKb. method %s (%s)",
2403 _compile_id, p2i(this), _comp_level, is_osr_method(), is_cold(), _gc_epoch, CodeCache::cold_gc_count(),
2404 codecache_capacity, codecache_free_space, method_name, compiler_name());
2405 }
2406
2407 // We need to deallocate any ExceptionCache data.
2408 // Note that we do not need to grab the nmethod lock for this, it
2409 // better be thread safe if we're disposing of it!
2410 ExceptionCache* ec = exception_cache();
2411 while(ec != nullptr) {
2412 ExceptionCache* next = ec->next();
2413 delete ec;
2414 ec = next;
2415 }
2416 if (_pc_desc_container != nullptr) {
2417 delete _pc_desc_container;
2418 }
2419 delete[] _compiled_ic_data;
2420
2421 if (_immutable_data != blob_end()) {
2422 int reference_count = get_immutable_data_references_counter();
2423 assert(reference_count > 0, "immutable data has no references");
2424
2425 set_immutable_data_references_counter(reference_count - 1);
2426 // Free memory if this is the last nmethod referencing immutable data
2427 if (reference_count == 0) {
2428 os::free(_immutable_data);
2429 }
2430
2431 _immutable_data = blob_end(); // Valid not null address
2432 }
2433
2434 if (unregister_nmethod) {
2435 Universe::heap()->unregister_nmethod(this);
2436 }
2437 CodeCache::unregister_old_nmethod(this);
2438
2439 JVMCI_ONLY( _metadata_size = 0; )
2440 CodeBlob::purge();
2441 }
2442
2443 oop nmethod::oop_at(int index) const {
2444 if (index == 0) {
2445 return nullptr;
2446 }
2447
2448 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2449 return bs_nm->oop_load_no_keepalive(this, index);
2450 }
2451
2452 oop nmethod::oop_at_phantom(int index) const {
2453 if (index == 0) {
2454 return nullptr;
2455 }
2456
2457 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2458 return bs_nm->oop_load_phantom(this, index);
2459 }
2460
2461 //
2462 // Notify all classes this nmethod is dependent on that it is no
2463 // longer dependent.
2464
2465 void nmethod::flush_dependencies() {
2466 if (!has_flushed_dependencies()) {
2467 set_has_flushed_dependencies(true);
2468 for (Dependencies::DepStream deps(this); deps.next(); ) {
2469 if (deps.type() == Dependencies::call_site_target_value) {
2470 // CallSite dependencies are managed on per-CallSite instance basis.
2471 oop call_site = deps.argument_oop(0);
2472 MethodHandles::clean_dependency_context(call_site);
2473 } else {
2474 InstanceKlass* ik = deps.context_type();
2475 if (ik == nullptr) {
2476 continue; // ignore things like evol_method
2477 }
2478 // During GC liveness of dependee determines class that needs to be updated.
2479 // The GC may clean dependency contexts concurrently and in parallel.
2480 ik->clean_dependency_context();
2481 }
2482 }
2483 }
2484 }
2485
2486 void nmethod::post_compiled_method(CompileTask* task) {
2487 task->mark_success();
2488 task->set_nm_content_size(content_size());
2489 task->set_nm_insts_size(insts_size());
2490 task->set_nm_total_size(total_size());
2491
2492 // JVMTI -- compiled method notification (must be done outside lock)
2493 post_compiled_method_load_event();
2494
2495 if (CompilationLog::log() != nullptr) {
2496 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2497 }
2498
2499 const DirectiveSet* directive = task->directive();
2500 maybe_print_nmethod(directive);
2501 }
2502
2503 // ------------------------------------------------------------------
2504 // post_compiled_method_load_event
2505 // new method for install_code() path
2506 // Transfer information from compilation to jvmti
2507 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2508 // This is a bad time for a safepoint. We don't want
2509 // this nmethod to get unloaded while we're queueing the event.
2510 NoSafepointVerifier nsv;
2511
2512 Method* m = method();
2513 HOTSPOT_COMPILED_METHOD_LOAD(
2514 (char *) m->klass_name()->bytes(),
2515 m->klass_name()->utf8_length(),
2516 (char *) m->name()->bytes(),
2517 m->name()->utf8_length(),
2518 (char *) m->signature()->bytes(),
2519 m->signature()->utf8_length(),
2520 insts_begin(), insts_size());
2521
2522
2523 if (JvmtiExport::should_post_compiled_method_load()) {
2524 // Only post unload events if load events are found.
2525 set_load_reported();
2526 // If a JavaThread hasn't been passed in, let the Service thread
2527 // (which is a real Java thread) post the event
2528 JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2529 if (state == nullptr) {
2530 // Execute any barrier code for this nmethod as if it's called, since
2531 // keeping it alive looks like stack walking.
2532 run_nmethod_entry_barrier();
2533 ServiceThread::enqueue_deferred_event(&event);
2534 } else {
2535 // This enters the nmethod barrier outside in the caller.
2536 state->enqueue_event(&event);
2537 }
2538 }
2539 }
2540
2541 void nmethod::post_compiled_method_unload() {
2542 assert(_method != nullptr, "just checking");
2543 DTRACE_METHOD_UNLOAD_PROBE(method());
2544
2545 // If a JVMTI agent has enabled the CompiledMethodUnload event then
2546 // post the event. The Method* will not be valid when this is freed.
2547
2548 // Don't bother posting the unload if the load event wasn't posted.
2549 if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2550 JvmtiDeferredEvent event =
2551 JvmtiDeferredEvent::compiled_method_unload_event(
2552 method()->jmethod_id(), insts_begin());
2553 ServiceThread::enqueue_deferred_event(&event);
2554 }
2555 }
2556
2557 // Iterate over metadata calling this function. Used by RedefineClasses
2558 void nmethod::metadata_do(MetadataClosure* f) {
2559 {
2560 // Visit all immediate references that are embedded in the instruction stream.
2561 RelocIterator iter(this, oops_reloc_begin());
2562 while (iter.next()) {
2563 if (iter.type() == relocInfo::metadata_type) {
2564 metadata_Relocation* r = iter.metadata_reloc();
2565 // In this metadata, we must only follow those metadatas directly embedded in
2566 // the code. Other metadatas (oop_index>0) are seen as part of
2567 // the metadata section below.
2568 assert(1 == (r->metadata_is_immediate()) +
2569 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2570 "metadata must be found in exactly one place");
2571 if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2572 Metadata* md = r->metadata_value();
2573 if (md != _method) f->do_metadata(md);
2574 }
2575 } else if (iter.type() == relocInfo::virtual_call_type) {
2576 // Check compiledIC holders associated with this nmethod
2577 ResourceMark rm;
2578 CompiledIC *ic = CompiledIC_at(&iter);
2579 ic->metadata_do(f);
2580 }
2581 }
2582 }
2583
2584 // Visit the metadata section
2585 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2586 if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops
2587 Metadata* md = *p;
2588 f->do_metadata(md);
2589 }
2590
2591 // Visit metadata not embedded in the other places.
2592 if (_method != nullptr) f->do_metadata(_method);
2593 }
2594
2595 // Heuristic for nuking nmethods even though their oops are live.
2596 // Main purpose is to reduce code cache pressure and get rid of
2597 // nmethods that don't seem to be all that relevant any longer.
2598 bool nmethod::is_cold() {
2599 if (!MethodFlushing || is_native_method() || is_not_installed()) {
2600 // No heuristic unloading at all
2601 return false;
2602 }
2603
2604 if (!is_maybe_on_stack() && is_not_entrant()) {
2605 // Not entrant nmethods that are not on any stack can just
2606 // be removed
2607 return true;
2608 }
2609
2610 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2611 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2612 // On platforms that don't support nmethod entry barriers, we can't
2613 // trust the temporal aspect of the gc epochs. So we can't detect
2614 // cold nmethods on such platforms.
2615 return false;
2616 }
2617
2618 if (!UseCodeCacheFlushing) {
2619 // Bail out if we don't heuristically remove nmethods
2620 return false;
2621 }
2622
2623 // Other code can be phased out more gradually after N GCs
2624 return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2625 }
2626
2627 // The _is_unloading_state encodes a tuple comprising the unloading cycle
2628 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2629 // This is the bit layout of the _is_unloading_state byte: 00000CCU
2630 // CC refers to the cycle, which has 2 bits, and U refers to the result of
2631 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2632
2633 class IsUnloadingState: public AllStatic {
2634 static const uint8_t _is_unloading_mask = 1;
2635 static const uint8_t _is_unloading_shift = 0;
2636 static const uint8_t _unloading_cycle_mask = 6;
2637 static const uint8_t _unloading_cycle_shift = 1;
2638
2639 static uint8_t set_is_unloading(uint8_t state, bool value) {
2640 state &= (uint8_t)~_is_unloading_mask;
2641 if (value) {
2642 state |= 1 << _is_unloading_shift;
2643 }
2644 assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2645 return state;
2646 }
2647
2648 static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2649 state &= (uint8_t)~_unloading_cycle_mask;
2650 state |= (uint8_t)(value << _unloading_cycle_shift);
2651 assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2652 return state;
2653 }
2654
2655 public:
2656 static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2657 static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2658
2659 static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2660 uint8_t state = 0;
2661 state = set_is_unloading(state, is_unloading);
2662 state = set_unloading_cycle(state, unloading_cycle);
2663 return state;
2664 }
2665 };
2666
2667 bool nmethod::is_unloading() {
2668 uint8_t state = AtomicAccess::load(&_is_unloading_state);
2669 bool state_is_unloading = IsUnloadingState::is_unloading(state);
2670 if (state_is_unloading) {
2671 return true;
2672 }
2673 uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2674 uint8_t current_cycle = CodeCache::unloading_cycle();
2675 if (state_unloading_cycle == current_cycle) {
2676 return false;
2677 }
2678
2679 // The IsUnloadingBehaviour is responsible for calculating if the nmethod
2680 // should be unloaded. This can be either because there is a dead oop,
2681 // or because is_cold() heuristically determines it is time to unload.
2682 state_unloading_cycle = current_cycle;
2683 state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2684 uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2685
2686 // Note that if an nmethod has dead oops, everyone will agree that the
2687 // nmethod is_unloading. However, the is_cold heuristics can yield
2688 // different outcomes, so we guard the computed result with a CAS
2689 // to ensure all threads have a shared view of whether an nmethod
2690 // is_unloading or not.
2691 uint8_t found_state = AtomicAccess::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2692
2693 if (found_state == state) {
2694 // First to change state, we win
2695 return state_is_unloading;
2696 } else {
2697 // State already set, so use it
2698 return IsUnloadingState::is_unloading(found_state);
2699 }
2700 }
2701
2702 void nmethod::clear_unloading_state() {
2703 uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2704 AtomicAccess::store(&_is_unloading_state, state);
2705 }
2706
2707
2708 // This is called at the end of the strong tracing/marking phase of a
2709 // GC to unload an nmethod if it contains otherwise unreachable
2710 // oops or is heuristically found to be not important.
2711 void nmethod::do_unloading(bool unloading_occurred) {
2712 // Make sure the oop's ready to receive visitors
2713 if (is_unloading()) {
2714 unlink();
2715 } else {
2716 unload_nmethod_caches(unloading_occurred);
2717 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2718 if (bs_nm != nullptr) {
2719 bs_nm->disarm(this);
2720 }
2721 }
2722 }
2723
2724 void nmethod::oops_do(OopClosure* f) {
2725 // Prevent extra code cache walk for platforms that don't have immediate oops.
2726 if (relocInfo::mustIterateImmediateOopsInCode()) {
2727 RelocIterator iter(this, oops_reloc_begin());
2728
2729 while (iter.next()) {
2730 if (iter.type() == relocInfo::oop_type ) {
2731 oop_Relocation* r = iter.oop_reloc();
2732 // In this loop, we must only follow those oops directly embedded in
2733 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
2734 assert(1 == (r->oop_is_immediate()) +
2735 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2736 "oop must be found in exactly one place");
2737 if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2738 f->do_oop(r->oop_addr());
2739 }
2740 }
2741 }
2742 }
2743
2744 // Scopes
2745 // This includes oop constants not inlined in the code stream.
2746 for (oop* p = oops_begin(); p < oops_end(); p++) {
2747 if (*p == Universe::non_oop_word()) continue; // skip non-oops
2748 f->do_oop(p);
2749 }
2750 }
2751
2752 void nmethod::follow_nmethod(OopIterateClosure* cl) {
2753 // Process oops in the nmethod
2754 oops_do(cl);
2755
2756 // CodeCache unloading support
2757 mark_as_maybe_on_stack();
2758
2759 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2760 bs_nm->disarm(this);
2761
2762 // There's an assumption made that this function is not used by GCs that
2763 // relocate objects, and therefore we don't call fix_oop_relocations.
2764 }
2765
2766 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2767
2768 void nmethod::oops_do_log_change(const char* state) {
2769 LogTarget(Trace, gc, nmethod) lt;
2770 if (lt.is_enabled()) {
2771 LogStream ls(lt);
2772 CompileTask::print(&ls, this, state, true /* short_form */);
2773 }
2774 }
2775
2776 bool nmethod::oops_do_try_claim() {
2777 if (oops_do_try_claim_weak_request()) {
2778 nmethod* result = oops_do_try_add_to_list_as_weak_done();
2779 assert(result == nullptr, "adding to global list as weak done must always succeed.");
2780 return true;
2781 }
2782 return false;
2783 }
2784
2785 bool nmethod::oops_do_try_claim_weak_request() {
2786 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2787
2788 if ((_oops_do_mark_link == nullptr) &&
2789 (AtomicAccess::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
2790 oops_do_log_change("oops_do, mark weak request");
2791 return true;
2792 }
2793 return false;
2794 }
2795
2796 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
2797 _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
2798 }
2799
2800 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
2801 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2802
2803 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
2804 if (old_next == nullptr) {
2805 oops_do_log_change("oops_do, mark strong done");
2806 }
2807 return old_next;
2808 }
2809
2810 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
2811 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2812 assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
2813
2814 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
2815 if (old_next == next) {
2816 oops_do_log_change("oops_do, mark strong request");
2817 }
2818 return old_next;
2819 }
2820
2821 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
2822 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2823 assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
2824
2825 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
2826 if (old_next == next) {
2827 oops_do_log_change("oops_do, mark weak done -> mark strong done");
2828 return true;
2829 }
2830 return false;
2831 }
2832
2833 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
2834 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2835
2836 assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
2837 extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2838 "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2839
2840 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
2841 // Self-loop if needed.
2842 if (old_head == nullptr) {
2843 old_head = this;
2844 }
2845 // Try to install end of list and weak done tag.
2846 if (AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
2847 oops_do_log_change("oops_do, mark weak done");
2848 return nullptr;
2849 } else {
2850 return old_head;
2851 }
2852 }
2853
2854 void nmethod::oops_do_add_to_list_as_strong_done() {
2855 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2856
2857 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
2858 // Self-loop if needed.
2859 if (old_head == nullptr) {
2860 old_head = this;
2861 }
2862 assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
2863 p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2864
2865 oops_do_set_strong_done(old_head);
2866 }
2867
2868 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
2869 if (!oops_do_try_claim_weak_request()) {
2870 // Failed to claim for weak processing.
2871 oops_do_log_change("oops_do, mark weak request fail");
2872 return;
2873 }
2874
2875 p->do_regular_processing(this);
2876
2877 nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
2878 if (old_head == nullptr) {
2879 return;
2880 }
2881 oops_do_log_change("oops_do, mark weak done fail");
2882 // Adding to global list failed, another thread added a strong request.
2883 assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2884 "must be but is %u", extract_state(_oops_do_mark_link));
2885
2886 oops_do_log_change("oops_do, mark weak request -> mark strong done");
2887
2888 oops_do_set_strong_done(old_head);
2889 // Do missing strong processing.
2890 p->do_remaining_strong_processing(this);
2891 }
2892
2893 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
2894 oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
2895 if (next_raw == nullptr) {
2896 p->do_regular_processing(this);
2897 oops_do_add_to_list_as_strong_done();
2898 return;
2899 }
2900 // Claim failed. Figure out why and handle it.
2901 if (oops_do_has_weak_request(next_raw)) {
2902 oops_do_mark_link* old = next_raw;
2903 // Claim failed because being weak processed (state == "weak request").
2904 // Try to request deferred strong processing.
2905 next_raw = oops_do_try_add_strong_request(old);
2906 if (next_raw == old) {
2907 // Successfully requested deferred strong processing.
2908 return;
2909 }
2910 // Failed because of a concurrent transition. No longer in "weak request" state.
2911 }
2912 if (oops_do_has_any_strong_state(next_raw)) {
2913 // Already claimed for strong processing or requested for such.
2914 return;
2915 }
2916 if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
2917 // Successfully claimed "weak done" as "strong done". Do the missing marking.
2918 p->do_remaining_strong_processing(this);
2919 return;
2920 }
2921 // Claim failed, some other thread got it.
2922 }
2923
2924 void nmethod::oops_do_marking_prologue() {
2925 assert_at_safepoint();
2926
2927 log_trace(gc, nmethod)("oops_do_marking_prologue");
2928 assert(_oops_do_mark_nmethods == nullptr, "must be empty");
2929 }
2930
2931 void nmethod::oops_do_marking_epilogue() {
2932 assert_at_safepoint();
2933
2934 nmethod* next = _oops_do_mark_nmethods;
2935 _oops_do_mark_nmethods = nullptr;
2936 if (next != nullptr) {
2937 nmethod* cur;
2938 do {
2939 cur = next;
2940 next = extract_nmethod(cur->_oops_do_mark_link);
2941 cur->_oops_do_mark_link = nullptr;
2942 DEBUG_ONLY(cur->verify_oop_relocations());
2943
2944 LogTarget(Trace, gc, nmethod) lt;
2945 if (lt.is_enabled()) {
2946 LogStream ls(lt);
2947 CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
2948 }
2949 // End if self-loop has been detected.
2950 } while (cur != next);
2951 }
2952 log_trace(gc, nmethod)("oops_do_marking_epilogue");
2953 }
2954
2955 inline bool includes(void* p, void* from, void* to) {
2956 return from <= p && p < to;
2957 }
2958
2959
2960 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2961 assert(count >= 2, "must be sentinel values, at least");
2962
2963 #ifdef ASSERT
2964 // must be sorted and unique; we do a binary search in find_pc_desc()
2965 int prev_offset = pcs[0].pc_offset();
2966 assert(prev_offset == PcDesc::lower_offset_limit,
2967 "must start with a sentinel");
2968 for (int i = 1; i < count; i++) {
2969 int this_offset = pcs[i].pc_offset();
2970 assert(this_offset > prev_offset, "offsets must be sorted");
2971 prev_offset = this_offset;
2972 }
2973 assert(prev_offset == PcDesc::upper_offset_limit,
2974 "must end with a sentinel");
2975 #endif //ASSERT
2976
2977 int size = count * sizeof(PcDesc);
2978 assert(scopes_pcs_size() >= size, "oob");
2979 memcpy(scopes_pcs_begin(), pcs, size);
2980
2981 // Adjust the final sentinel downward.
2982 PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2983 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2984 last_pc->set_pc_offset(content_size() + 1);
2985 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2986 // Fill any rounding gaps with copies of the last record.
2987 last_pc[1] = last_pc[0];
2988 }
2989 // The following assert could fail if sizeof(PcDesc) is not
2990 // an integral multiple of oopSize (the rounding term).
2991 // If it fails, change the logic to always allocate a multiple
2992 // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2993 assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2994 }
2995
2996 void nmethod::copy_scopes_data(u_char* buffer, int size) {
2997 assert(scopes_data_size() >= size, "oob");
2998 memcpy(scopes_data_begin(), buffer, size);
2999 }
3000
3001 #ifdef ASSERT
3002 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
3003 PcDesc* res = nullptr;
3004 assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
3005 "must start with a sentinel");
3006 // lower + 1 to exclude initial sentinel
3007 for (PcDesc* p = lower + 1; p < upper; p++) {
3008 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
3009 if (match_desc(p, pc_offset, approximate)) {
3010 if (res == nullptr) {
3011 res = p;
3012 } else {
3013 res = (PcDesc*) badAddress;
3014 }
3015 }
3016 }
3017 return res;
3018 }
3019 #endif
3020
3021
3022 #ifndef PRODUCT
3023 // Version of method to collect statistic
3024 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
3025 PcDesc* lower, PcDesc* upper) {
3026 ++pc_nmethod_stats.pc_desc_queries;
3027 if (approximate) ++pc_nmethod_stats.pc_desc_approx;
3028
3029 PcDesc* desc = _pc_desc_cache.last_pc_desc();
3030 assert(desc != nullptr, "PcDesc cache should be initialized already");
3031 if (desc->pc_offset() == (pc - code_begin)) {
3032 // Cached value matched
3033 ++pc_nmethod_stats.pc_desc_tests;
3034 ++pc_nmethod_stats.pc_desc_repeats;
3035 return desc;
3036 }
3037 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
3038 }
3039 #endif
3040
3041 // Finds a PcDesc with real-pc equal to "pc"
3042 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
3043 PcDesc* lower_incl, PcDesc* upper_incl) {
3044 if ((pc < code_begin) ||
3045 (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
3046 return nullptr; // PC is wildly out of range
3047 }
3048 int pc_offset = (int) (pc - code_begin);
3049
3050 // Check the PcDesc cache if it contains the desired PcDesc
3051 // (This as an almost 100% hit rate.)
3052 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
3053 if (res != nullptr) {
3054 assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
3055 return res;
3056 }
3057
3058 // Fallback algorithm: quasi-linear search for the PcDesc
3059 // Find the last pc_offset less than the given offset.
3060 // The successor must be the required match, if there is a match at all.
3061 // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
3062 PcDesc* lower = lower_incl; // this is initial sentinel
3063 PcDesc* upper = upper_incl - 1; // exclude final sentinel
3064 if (lower >= upper) return nullptr; // no PcDescs at all
3065
3066 #define assert_LU_OK \
3067 /* invariant on lower..upper during the following search: */ \
3068 assert(lower->pc_offset() < pc_offset, "sanity"); \
3069 assert(upper->pc_offset() >= pc_offset, "sanity")
3070 assert_LU_OK;
3071
3072 // Use the last successful return as a split point.
3073 PcDesc* mid = _pc_desc_cache.last_pc_desc();
3074 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3075 if (mid->pc_offset() < pc_offset) {
3076 lower = mid;
3077 } else {
3078 upper = mid;
3079 }
3080
3081 // Take giant steps at first (4096, then 256, then 16, then 1)
3082 const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
3083 const int RADIX = (1 << LOG2_RADIX);
3084 for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
3085 while ((mid = lower + step) < upper) {
3086 assert_LU_OK;
3087 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3088 if (mid->pc_offset() < pc_offset) {
3089 lower = mid;
3090 } else {
3091 upper = mid;
3092 break;
3093 }
3094 }
3095 assert_LU_OK;
3096 }
3097
3098 // Sneak up on the value with a linear search of length ~16.
3099 while (true) {
3100 assert_LU_OK;
3101 mid = lower + 1;
3102 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3103 if (mid->pc_offset() < pc_offset) {
3104 lower = mid;
3105 } else {
3106 upper = mid;
3107 break;
3108 }
3109 }
3110 #undef assert_LU_OK
3111
3112 if (match_desc(upper, pc_offset, approximate)) {
3113 assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3114 if (!Thread::current_in_asgct()) {
3115 // we don't want to modify the cache if we're in ASGCT
3116 // which is typically called in a signal handler
3117 _pc_desc_cache.add_pc_desc(upper);
3118 }
3119 return upper;
3120 } else {
3121 assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3122 return nullptr;
3123 }
3124 }
3125
3126 bool nmethod::check_dependency_on(DepChange& changes) {
3127 // What has happened:
3128 // 1) a new class dependee has been added
3129 // 2) dependee and all its super classes have been marked
3130 bool found_check = false; // set true if we are upset
3131 for (Dependencies::DepStream deps(this); deps.next(); ) {
3132 // Evaluate only relevant dependencies.
3133 if (deps.spot_check_dependency_at(changes) != nullptr) {
3134 found_check = true;
3135 NOT_DEBUG(break);
3136 }
3137 }
3138 return found_check;
3139 }
3140
3141 // Called from mark_for_deoptimization, when dependee is invalidated.
3142 bool nmethod::is_dependent_on_method(Method* dependee) {
3143 for (Dependencies::DepStream deps(this); deps.next(); ) {
3144 if (deps.type() != Dependencies::evol_method)
3145 continue;
3146 Method* method = deps.method_argument(0);
3147 if (method == dependee) return true;
3148 }
3149 return false;
3150 }
3151
3152 void nmethod_init() {
3153 // make sure you didn't forget to adjust the filler fields
3154 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
3155 }
3156
3157 // -----------------------------------------------------------------------------
3158 // Verification
3159
3160 class VerifyOopsClosure: public OopClosure {
3161 nmethod* _nm;
3162 bool _ok;
3163 public:
3164 VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
3165 bool ok() { return _ok; }
3166 virtual void do_oop(oop* p) {
3167 if (oopDesc::is_oop_or_null(*p)) return;
3168 // Print diagnostic information before calling print_nmethod().
3169 // Assertions therein might prevent call from returning.
3170 tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
3171 p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
3172 if (_ok) {
3173 _nm->print_nmethod(true);
3174 _ok = false;
3175 }
3176 }
3177 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3178 };
3179
3180 class VerifyMetadataClosure: public MetadataClosure {
3181 public:
3182 void do_metadata(Metadata* md) {
3183 if (md->is_method()) {
3184 Method* method = (Method*)md;
3185 assert(!method->is_old(), "Should not be installing old methods");
3186 }
3187 }
3188 };
3189
3190
3191 void nmethod::verify() {
3192 if (is_not_entrant())
3193 return;
3194
3195 // assert(oopDesc::is_oop(method()), "must be valid");
3196
3197 ResourceMark rm;
3198
3199 if (!CodeCache::contains(this)) {
3200 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
3201 }
3202
3203 if(is_native_method() )
3204 return;
3205
3206 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
3207 if (nm != this) {
3208 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
3209 }
3210
3211 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3212 if (! p->verify(this)) {
3213 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
3214 }
3215 }
3216
3217 #ifdef ASSERT
3218 #if INCLUDE_JVMCI
3219 {
3220 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
3221 ImmutableOopMapSet* oms = oop_maps();
3222 ImplicitExceptionTable implicit_table(this);
3223 for (uint i = 0; i < implicit_table.len(); i++) {
3224 int exec_offset = (int) implicit_table.get_exec_offset(i);
3225 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
3226 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
3227 bool found = false;
3228 for (int i = 0, imax = oms->count(); i < imax; i++) {
3229 if (oms->pair_at(i)->pc_offset() == exec_offset) {
3230 found = true;
3231 break;
3232 }
3233 }
3234 assert(found, "missing oopmap");
3235 }
3236 }
3237 }
3238 #endif
3239 #endif
3240
3241 VerifyOopsClosure voc(this);
3242 oops_do(&voc);
3243 assert(voc.ok(), "embedded oops must be OK");
3244 Universe::heap()->verify_nmethod(this);
3245
3246 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
3247 nm->method()->external_name(), p2i(_oops_do_mark_link));
3248 verify_scopes();
3249
3250 CompiledICLocker nm_verify(this);
3251 VerifyMetadataClosure vmc;
3252 metadata_do(&vmc);
3253 }
3254
3255
3256 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3257
3258 // Verify IC only when nmethod installation is finished.
3259 if (!is_not_installed()) {
3260 if (CompiledICLocker::is_safe(this)) {
3261 if (is_inline_cache) {
3262 CompiledIC_at(this, call_site);
3263 } else {
3264 CompiledDirectCall::at(call_site);
3265 }
3266 } else {
3267 CompiledICLocker ml_verify(this);
3268 if (is_inline_cache) {
3269 CompiledIC_at(this, call_site);
3270 } else {
3271 CompiledDirectCall::at(call_site);
3272 }
3273 }
3274 }
3275
3276 HandleMark hm(Thread::current());
3277
3278 PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
3279 assert(pd != nullptr, "PcDesc must exist");
3280 for (ScopeDesc* sd = new ScopeDesc(this, pd);
3281 !sd->is_top(); sd = sd->sender()) {
3282 sd->verify();
3283 }
3284 }
3285
3286 void nmethod::verify_scopes() {
3287 if( !method() ) return; // Runtime stubs have no scope
3288 if (method()->is_native()) return; // Ignore stub methods.
3289 // iterate through all interrupt point
3290 // and verify the debug information is valid.
3291 RelocIterator iter(this);
3292 while (iter.next()) {
3293 address stub = nullptr;
3294 switch (iter.type()) {
3295 case relocInfo::virtual_call_type:
3296 verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3297 break;
3298 case relocInfo::opt_virtual_call_type:
3299 stub = iter.opt_virtual_call_reloc()->static_stub();
3300 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3301 break;
3302 case relocInfo::static_call_type:
3303 stub = iter.static_call_reloc()->static_stub();
3304 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3305 break;
3306 case relocInfo::runtime_call_type:
3307 case relocInfo::runtime_call_w_cp_type: {
3308 address destination = iter.reloc()->value();
3309 // Right now there is no way to find out which entries support
3310 // an interrupt point. It would be nice if we had this
3311 // information in a table.
3312 break;
3313 }
3314 default:
3315 break;
3316 }
3317 assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3318 }
3319 }
3320
3321
3322 // -----------------------------------------------------------------------------
3323 // Printing operations
3324
3325 void nmethod::print_on_impl(outputStream* st) const {
3326 ResourceMark rm;
3327
3328 st->print("Compiled method ");
3329
3330 if (is_compiled_by_c1()) {
3331 st->print("(c1) ");
3332 } else if (is_compiled_by_c2()) {
3333 st->print("(c2) ");
3334 } else if (is_compiled_by_jvmci()) {
3335 st->print("(JVMCI) ");
3336 } else {
3337 st->print("(n/a) ");
3338 }
3339
3340 print_on_with_msg(st, nullptr);
3341
3342 if (WizardMode) {
3343 st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3344 st->print(" for method " INTPTR_FORMAT , p2i(method()));
3345 st->print(" { ");
3346 st->print_cr("%s ", state());
3347 st->print_cr("}:");
3348 }
3349 if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3350 p2i(this),
3351 p2i(this) + size(),
3352 size());
3353 if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3354 p2i(consts_begin()),
3355 p2i(consts_end()),
3356 consts_size());
3357 if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3358 p2i(insts_begin()),
3359 p2i(insts_end()),
3360 insts_size());
3361 if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3362 p2i(stub_begin()),
3363 p2i(stub_end()),
3364 stub_size());
3365 if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3366 p2i(oops_begin()),
3367 p2i(oops_end()),
3368 oops_size());
3369 if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3370 p2i(mutable_data_begin()),
3371 p2i(mutable_data_end()),
3372 mutable_data_size());
3373 if (relocation_size() > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3374 p2i(relocation_begin()),
3375 p2i(relocation_end()),
3376 relocation_size());
3377 if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3378 p2i(metadata_begin()),
3379 p2i(metadata_end()),
3380 metadata_size());
3381 #if INCLUDE_JVMCI
3382 if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3383 p2i(jvmci_data_begin()),
3384 p2i(jvmci_data_end()),
3385 jvmci_data_size());
3386 #endif
3387 if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3388 p2i(immutable_data_begin()),
3389 p2i(immutable_data_end()),
3390 immutable_data_size());
3391 if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3392 p2i(dependencies_begin()),
3393 p2i(dependencies_end()),
3394 dependencies_size());
3395 if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3396 p2i(nul_chk_table_begin()),
3397 p2i(nul_chk_table_end()),
3398 nul_chk_table_size());
3399 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3400 p2i(handler_table_begin()),
3401 p2i(handler_table_end()),
3402 handler_table_size());
3403 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3404 p2i(scopes_pcs_begin()),
3405 p2i(scopes_pcs_end()),
3406 scopes_pcs_size());
3407 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3408 p2i(scopes_data_begin()),
3409 p2i(scopes_data_end()),
3410 scopes_data_size());
3411 #if INCLUDE_JVMCI
3412 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3413 p2i(speculations_begin()),
3414 p2i(speculations_end()),
3415 speculations_size());
3416 #endif
3417 }
3418
3419 void nmethod::print_code() {
3420 ResourceMark m;
3421 ttyLocker ttyl;
3422 // Call the specialized decode method of this class.
3423 decode(tty);
3424 }
3425
3426 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3427
3428 void nmethod::print_dependencies_on(outputStream* out) {
3429 ResourceMark rm;
3430 stringStream st;
3431 st.print_cr("Dependencies:");
3432 for (Dependencies::DepStream deps(this); deps.next(); ) {
3433 deps.print_dependency(&st);
3434 InstanceKlass* ctxk = deps.context_type();
3435 if (ctxk != nullptr) {
3436 if (ctxk->is_dependent_nmethod(this)) {
3437 st.print_cr(" [nmethod<=klass]%s", ctxk->external_name());
3438 }
3439 }
3440 deps.log_dependency(); // put it into the xml log also
3441 }
3442 out->print_raw(st.as_string());
3443 }
3444 #endif
3445
3446 #if defined(SUPPORT_DATA_STRUCTS)
3447
3448 // Print the oops from the underlying CodeBlob.
3449 void nmethod::print_oops(outputStream* st) {
3450 ResourceMark m;
3451 st->print("Oops:");
3452 if (oops_begin() < oops_end()) {
3453 st->cr();
3454 for (oop* p = oops_begin(); p < oops_end(); p++) {
3455 Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3456 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3457 if (Universe::contains_non_oop_word(p)) {
3458 st->print_cr("NON_OOP");
3459 continue; // skip non-oops
3460 }
3461 if (*p == nullptr) {
3462 st->print_cr("nullptr-oop");
3463 continue; // skip non-oops
3464 }
3465 (*p)->print_value_on(st);
3466 st->cr();
3467 }
3468 } else {
3469 st->print_cr(" <list empty>");
3470 }
3471 }
3472
3473 // Print metadata pool.
3474 void nmethod::print_metadata(outputStream* st) {
3475 ResourceMark m;
3476 st->print("Metadata:");
3477 if (metadata_begin() < metadata_end()) {
3478 st->cr();
3479 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3480 Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3481 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3482 if (*p && *p != Universe::non_oop_word()) {
3483 (*p)->print_value_on(st);
3484 }
3485 st->cr();
3486 }
3487 } else {
3488 st->print_cr(" <list empty>");
3489 }
3490 }
3491
3492 #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3493 void nmethod::print_scopes_on(outputStream* st) {
3494 // Find the first pc desc for all scopes in the code and print it.
3495 ResourceMark rm;
3496 st->print("scopes:");
3497 if (scopes_pcs_begin() < scopes_pcs_end()) {
3498 st->cr();
3499 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3500 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3501 continue;
3502
3503 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3504 while (sd != nullptr) {
3505 sd->print_on(st, p); // print output ends with a newline
3506 sd = sd->sender();
3507 }
3508 }
3509 } else {
3510 st->print_cr(" <list empty>");
3511 }
3512 }
3513 #endif
3514
3515 #ifndef PRODUCT // RelocIterator does support printing only then.
3516 void nmethod::print_relocations() {
3517 ResourceMark m; // in case methods get printed via the debugger
3518 tty->print_cr("relocations:");
3519 RelocIterator iter(this);
3520 iter.print_on(tty);
3521 }
3522 #endif
3523
3524 void nmethod::print_pcs_on(outputStream* st) {
3525 ResourceMark m; // in case methods get printed via debugger
3526 st->print("pc-bytecode offsets:");
3527 if (scopes_pcs_begin() < scopes_pcs_end()) {
3528 st->cr();
3529 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3530 p->print_on(st, this); // print output ends with a newline
3531 }
3532 } else {
3533 st->print_cr(" <list empty>");
3534 }
3535 }
3536
3537 void nmethod::print_handler_table() {
3538 ExceptionHandlerTable(this).print(code_begin());
3539 }
3540
3541 void nmethod::print_nul_chk_table() {
3542 ImplicitExceptionTable(this).print(code_begin());
3543 }
3544
3545 void nmethod::print_recorded_oop(int log_n, int i) {
3546 void* value;
3547
3548 if (i == 0) {
3549 value = nullptr;
3550 } else {
3551 // Be careful around non-oop words. Don't create an oop
3552 // with that value, or it will assert in verification code.
3553 if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3554 value = Universe::non_oop_word();
3555 } else {
3556 value = oop_at(i);
3557 }
3558 }
3559
3560 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3561
3562 if (value == Universe::non_oop_word()) {
3563 tty->print("non-oop word");
3564 } else {
3565 if (value == nullptr) {
3566 tty->print("nullptr-oop");
3567 } else {
3568 oop_at(i)->print_value_on(tty);
3569 }
3570 }
3571
3572 tty->cr();
3573 }
3574
3575 void nmethod::print_recorded_oops() {
3576 const int n = oops_count();
3577 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3578 tty->print("Recorded oops:");
3579 if (n > 0) {
3580 tty->cr();
3581 for (int i = 0; i < n; i++) {
3582 print_recorded_oop(log_n, i);
3583 }
3584 } else {
3585 tty->print_cr(" <list empty>");
3586 }
3587 }
3588
3589 void nmethod::print_recorded_metadata() {
3590 const int n = metadata_count();
3591 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3592 tty->print("Recorded metadata:");
3593 if (n > 0) {
3594 tty->cr();
3595 for (int i = 0; i < n; i++) {
3596 Metadata* m = metadata_at(i);
3597 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3598 if (m == (Metadata*)Universe::non_oop_word()) {
3599 tty->print("non-metadata word");
3600 } else if (m == nullptr) {
3601 tty->print("nullptr-oop");
3602 } else {
3603 Metadata::print_value_on_maybe_null(tty, m);
3604 }
3605 tty->cr();
3606 }
3607 } else {
3608 tty->print_cr(" <list empty>");
3609 }
3610 }
3611 #endif
3612
3613 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3614
3615 void nmethod::print_constant_pool(outputStream* st) {
3616 //-----------------------------------
3617 //---< Print the constant pool >---
3618 //-----------------------------------
3619 int consts_size = this->consts_size();
3620 if ( consts_size > 0 ) {
3621 unsigned char* cstart = this->consts_begin();
3622 unsigned char* cp = cstart;
3623 unsigned char* cend = cp + consts_size;
3624 unsigned int bytes_per_line = 4;
3625 unsigned int CP_alignment = 8;
3626 unsigned int n;
3627
3628 st->cr();
3629
3630 //---< print CP header to make clear what's printed >---
3631 if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3632 n = bytes_per_line;
3633 st->print_cr("[Constant Pool]");
3634 Disassembler::print_location(cp, cstart, cend, st, true, true);
3635 Disassembler::print_hexdata(cp, n, st, true);
3636 st->cr();
3637 } else {
3638 n = (int)((uintptr_t)cp & (bytes_per_line-1));
3639 st->print_cr("[Constant Pool (unaligned)]");
3640 }
3641
3642 //---< print CP contents, bytes_per_line at a time >---
3643 while (cp < cend) {
3644 Disassembler::print_location(cp, cstart, cend, st, true, false);
3645 Disassembler::print_hexdata(cp, n, st, false);
3646 cp += n;
3647 n = bytes_per_line;
3648 st->cr();
3649 }
3650
3651 //---< Show potential alignment gap between constant pool and code >---
3652 cend = code_begin();
3653 if( cp < cend ) {
3654 n = 4;
3655 st->print_cr("[Code entry alignment]");
3656 while (cp < cend) {
3657 Disassembler::print_location(cp, cstart, cend, st, false, false);
3658 cp += n;
3659 st->cr();
3660 }
3661 }
3662 } else {
3663 st->print_cr("[Constant Pool (empty)]");
3664 }
3665 st->cr();
3666 }
3667
3668 #endif
3669
3670 // Disassemble this nmethod.
3671 // Print additional debug information, if requested. This could be code
3672 // comments, block comments, profiling counters, etc.
3673 // The undisassembled format is useful no disassembler library is available.
3674 // The resulting hex dump (with markers) can be disassembled later, or on
3675 // another system, when/where a disassembler library is available.
3676 void nmethod::decode2(outputStream* ost) const {
3677
3678 // Called from frame::back_trace_with_decode without ResourceMark.
3679 ResourceMark rm;
3680
3681 // Make sure we have a valid stream to print on.
3682 outputStream* st = ost ? ost : tty;
3683
3684 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3685 const bool use_compressed_format = true;
3686 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3687 AbstractDisassembler::show_block_comment());
3688 #else
3689 const bool use_compressed_format = Disassembler::is_abstract();
3690 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3691 AbstractDisassembler::show_block_comment());
3692 #endif
3693
3694 st->cr();
3695 this->print_on(st);
3696 st->cr();
3697
3698 #if defined(SUPPORT_ASSEMBLY)
3699 //----------------------------------
3700 //---< Print real disassembly >---
3701 //----------------------------------
3702 if (! use_compressed_format) {
3703 st->print_cr("[Disassembly]");
3704 Disassembler::decode(const_cast<nmethod*>(this), st);
3705 st->bol();
3706 st->print_cr("[/Disassembly]");
3707 return;
3708 }
3709 #endif
3710
3711 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3712
3713 // Compressed undisassembled disassembly format.
3714 // The following status values are defined/supported:
3715 // = 0 - currently at bol() position, nothing printed yet on current line.
3716 // = 1 - currently at position after print_location().
3717 // > 1 - in the midst of printing instruction stream bytes.
3718 int compressed_format_idx = 0;
3719 int code_comment_column = 0;
3720 const int instr_maxlen = Assembler::instr_maxlen();
3721 const uint tabspacing = 8;
3722 unsigned char* start = this->code_begin();
3723 unsigned char* p = this->code_begin();
3724 unsigned char* end = this->code_end();
3725 unsigned char* pss = p; // start of a code section (used for offsets)
3726
3727 if ((start == nullptr) || (end == nullptr)) {
3728 st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3729 return;
3730 }
3731 #endif
3732
3733 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3734 //---< plain abstract disassembly, no comments or anything, just section headers >---
3735 if (use_compressed_format && ! compressed_with_comments) {
3736 const_cast<nmethod*>(this)->print_constant_pool(st);
3737
3738 st->bol();
3739 st->cr();
3740 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3741 //---< Open the output (Marker for post-mortem disassembler) >---
3742 st->print_cr("[MachCode]");
3743 const char* header = nullptr;
3744 address p0 = p;
3745 while (p < end) {
3746 address pp = p;
3747 while ((p < end) && (header == nullptr)) {
3748 header = nmethod_section_label(p);
3749 pp = p;
3750 p += Assembler::instr_len(p);
3751 }
3752 if (pp > p0) {
3753 AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3754 p0 = pp;
3755 p = pp;
3756 header = nullptr;
3757 } else if (header != nullptr) {
3758 st->bol();
3759 st->print_cr("%s", header);
3760 header = nullptr;
3761 }
3762 }
3763 //---< Close the output (Marker for post-mortem disassembler) >---
3764 st->bol();
3765 st->print_cr("[/MachCode]");
3766 return;
3767 }
3768 #endif
3769
3770 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3771 //---< abstract disassembly with comments and section headers merged in >---
3772 if (compressed_with_comments) {
3773 const_cast<nmethod*>(this)->print_constant_pool(st);
3774
3775 st->bol();
3776 st->cr();
3777 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3778 //---< Open the output (Marker for post-mortem disassembler) >---
3779 st->print_cr("[MachCode]");
3780 while ((p < end) && (p != nullptr)) {
3781 const int instruction_size_in_bytes = Assembler::instr_len(p);
3782
3783 //---< Block comments for nmethod. Interrupts instruction stream, if any. >---
3784 // Outputs a bol() before and a cr() after, but only if a comment is printed.
3785 // Prints nmethod_section_label as well.
3786 if (AbstractDisassembler::show_block_comment()) {
3787 print_block_comment(st, p);
3788 if (st->position() == 0) {
3789 compressed_format_idx = 0;
3790 }
3791 }
3792
3793 //---< New location information after line break >---
3794 if (compressed_format_idx == 0) {
3795 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3796 compressed_format_idx = 1;
3797 }
3798
3799 //---< Code comment for current instruction. Address range [p..(p+len)) >---
3800 unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
3801 S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
3802
3803 if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
3804 //---< interrupt instruction byte stream for code comment >---
3805 if (compressed_format_idx > 1) {
3806 st->cr(); // interrupt byte stream
3807 st->cr(); // add an empty line
3808 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3809 }
3810 const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
3811 st->bol();
3812 compressed_format_idx = 0;
3813 }
3814
3815 //---< New location information after line break >---
3816 if (compressed_format_idx == 0) {
3817 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3818 compressed_format_idx = 1;
3819 }
3820
3821 //---< Nicely align instructions for readability >---
3822 if (compressed_format_idx > 1) {
3823 Disassembler::print_delimiter(st);
3824 }
3825
3826 //---< Now, finally, print the actual instruction bytes >---
3827 unsigned char* p0 = p;
3828 p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
3829 compressed_format_idx += (int)(p - p0);
3830
3831 if (Disassembler::start_newline(compressed_format_idx-1)) {
3832 st->cr();
3833 compressed_format_idx = 0;
3834 }
3835 }
3836 //---< Close the output (Marker for post-mortem disassembler) >---
3837 st->bol();
3838 st->print_cr("[/MachCode]");
3839 return;
3840 }
3841 #endif
3842 }
3843
3844 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3845
3846 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3847 RelocIterator iter(this, begin, end);
3848 bool have_one = false;
3849 while (iter.next()) {
3850 have_one = true;
3851 switch (iter.type()) {
3852 case relocInfo::none: {
3853 // Skip it and check next
3854 break;
3855 }
3856 case relocInfo::oop_type: {
3857 // Get a non-resizable resource-allocated stringStream.
3858 // Our callees make use of (nested) ResourceMarks.
3859 stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
3860 oop_Relocation* r = iter.oop_reloc();
3861 oop obj = r->oop_value();
3862 st.print("oop(");
3863 if (obj == nullptr) st.print("nullptr");
3864 else obj->print_value_on(&st);
3865 st.print(")");
3866 return st.as_string();
3867 }
3868 case relocInfo::metadata_type: {
3869 stringStream st;
3870 metadata_Relocation* r = iter.metadata_reloc();
3871 Metadata* obj = r->metadata_value();
3872 st.print("metadata(");
3873 if (obj == nullptr) st.print("nullptr");
3874 else obj->print_value_on(&st);
3875 st.print(")");
3876 return st.as_string();
3877 }
3878 case relocInfo::runtime_call_type:
3879 case relocInfo::runtime_call_w_cp_type: {
3880 stringStream st;
3881 st.print("runtime_call");
3882 CallRelocation* r = (CallRelocation*)iter.reloc();
3883 address dest = r->destination();
3884 if (StubRoutines::contains(dest)) {
3885 StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
3886 if (desc == nullptr) {
3887 desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
3888 }
3889 if (desc != nullptr) {
3890 st.print(" Stub::%s", desc->name());
3891 return st.as_string();
3892 }
3893 }
3894 CodeBlob* cb = CodeCache::find_blob(dest);
3895 if (cb != nullptr) {
3896 st.print(" %s", cb->name());
3897 } else {
3898 ResourceMark rm;
3899 const int buflen = 1024;
3900 char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3901 int offset;
3902 if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3903 st.print(" %s", buf);
3904 if (offset != 0) {
3905 st.print("+%d", offset);
3906 }
3907 }
3908 }
3909 return st.as_string();
3910 }
3911 case relocInfo::virtual_call_type: {
3912 stringStream st;
3913 st.print_raw("virtual_call");
3914 virtual_call_Relocation* r = iter.virtual_call_reloc();
3915 Method* m = r->method_value();
3916 if (m != nullptr) {
3917 assert(m->is_method(), "");
3918 m->print_short_name(&st);
3919 }
3920 return st.as_string();
3921 }
3922 case relocInfo::opt_virtual_call_type: {
3923 stringStream st;
3924 st.print_raw("optimized virtual_call");
3925 opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
3926 Method* m = r->method_value();
3927 if (m != nullptr) {
3928 assert(m->is_method(), "");
3929 m->print_short_name(&st);
3930 }
3931 return st.as_string();
3932 }
3933 case relocInfo::static_call_type: {
3934 stringStream st;
3935 st.print_raw("static_call");
3936 static_call_Relocation* r = iter.static_call_reloc();
3937 Method* m = r->method_value();
3938 if (m != nullptr) {
3939 assert(m->is_method(), "");
3940 m->print_short_name(&st);
3941 }
3942 return st.as_string();
3943 }
3944 case relocInfo::static_stub_type: return "static_stub";
3945 case relocInfo::external_word_type: return "external_word";
3946 case relocInfo::internal_word_type: return "internal_word";
3947 case relocInfo::section_word_type: return "section_word";
3948 case relocInfo::poll_type: return "poll";
3949 case relocInfo::poll_return_type: return "poll_return";
3950 case relocInfo::trampoline_stub_type: return "trampoline_stub";
3951 case relocInfo::entry_guard_type: return "entry_guard";
3952 case relocInfo::post_call_nop_type: return "post_call_nop";
3953 case relocInfo::barrier_type: {
3954 barrier_Relocation* const reloc = iter.barrier_reloc();
3955 stringStream st;
3956 st.print("barrier format=%d", reloc->format());
3957 return st.as_string();
3958 }
3959
3960 case relocInfo::type_mask: return "type_bit_mask";
3961
3962 default: {
3963 stringStream st;
3964 st.print("unknown relocInfo=%d", (int) iter.type());
3965 return st.as_string();
3966 }
3967 }
3968 }
3969 return have_one ? "other" : nullptr;
3970 }
3971
3972 // Return the last scope in (begin..end]
3973 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3974 PcDesc* p = pc_desc_near(begin+1);
3975 if (p != nullptr && p->real_pc(this) <= end) {
3976 return new ScopeDesc(this, p);
3977 }
3978 return nullptr;
3979 }
3980
3981 const char* nmethod::nmethod_section_label(address pos) const {
3982 const char* label = nullptr;
3983 if (pos == code_begin()) label = "[Instructions begin]";
3984 if (pos == entry_point()) label = "[Entry Point]";
3985 if (pos == verified_entry_point()) label = "[Verified Entry Point]";
3986 if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
3987 // Check stub_code before checking exception_handler or deopt_handler.
3988 if (pos == this->stub_begin()) label = "[Stub Code]";
3989 if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
3990 if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
3991 return label;
3992 }
3993
3994 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
3995 if (print_section_labels) {
3996 const char* label = nmethod_section_label(block_begin);
3997 if (label != nullptr) {
3998 stream->bol();
3999 stream->print_cr("%s", label);
4000 }
4001 }
4002
4003 if (block_begin == entry_point()) {
4004 Method* m = method();
4005 if (m != nullptr) {
4006 stream->print(" # ");
4007 m->print_value_on(stream);
4008 stream->cr();
4009 }
4010 if (m != nullptr && !is_osr_method()) {
4011 ResourceMark rm;
4012 int sizeargs = m->size_of_parameters();
4013 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
4014 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
4015 {
4016 int sig_index = 0;
4017 if (!m->is_static())
4018 sig_bt[sig_index++] = T_OBJECT; // 'this'
4019 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
4020 BasicType t = ss.type();
4021 sig_bt[sig_index++] = t;
4022 if (type2size[t] == 2) {
4023 sig_bt[sig_index++] = T_VOID;
4024 } else {
4025 assert(type2size[t] == 1, "size is 1 or 2");
4026 }
4027 }
4028 assert(sig_index == sizeargs, "");
4029 }
4030 const char* spname = "sp"; // make arch-specific?
4031 SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
4032 int stack_slot_offset = this->frame_size() * wordSize;
4033 int tab1 = 14, tab2 = 24;
4034 int sig_index = 0;
4035 int arg_index = (m->is_static() ? 0 : -1);
4036 bool did_old_sp = false;
4037 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
4038 bool at_this = (arg_index == -1);
4039 bool at_old_sp = false;
4040 BasicType t = (at_this ? T_OBJECT : ss.type());
4041 assert(t == sig_bt[sig_index], "sigs in sync");
4042 if (at_this)
4043 stream->print(" # this: ");
4044 else
4045 stream->print(" # parm%d: ", arg_index);
4046 stream->move_to(tab1);
4047 VMReg fst = regs[sig_index].first();
4048 VMReg snd = regs[sig_index].second();
4049 if (fst->is_reg()) {
4050 stream->print("%s", fst->name());
4051 if (snd->is_valid()) {
4052 stream->print(":%s", snd->name());
4053 }
4054 } else if (fst->is_stack()) {
4055 int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
4056 if (offset == stack_slot_offset) at_old_sp = true;
4057 stream->print("[%s+0x%x]", spname, offset);
4058 } else {
4059 stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
4060 }
4061 stream->print(" ");
4062 stream->move_to(tab2);
4063 stream->print("= ");
4064 if (at_this) {
4065 m->method_holder()->print_value_on(stream);
4066 } else {
4067 bool did_name = false;
4068 if (!at_this && ss.is_reference()) {
4069 Symbol* name = ss.as_symbol();
4070 name->print_value_on(stream);
4071 did_name = true;
4072 }
4073 if (!did_name)
4074 stream->print("%s", type2name(t));
4075 }
4076 if (at_old_sp) {
4077 stream->print(" (%s of caller)", spname);
4078 did_old_sp = true;
4079 }
4080 stream->cr();
4081 sig_index += type2size[t];
4082 arg_index += 1;
4083 if (!at_this) ss.next();
4084 }
4085 if (!did_old_sp) {
4086 stream->print(" # ");
4087 stream->move_to(tab1);
4088 stream->print("[%s+0x%x]", spname, stack_slot_offset);
4089 stream->print(" (%s of caller)", spname);
4090 stream->cr();
4091 }
4092 }
4093 }
4094 }
4095
4096 // Returns whether this nmethod has code comments.
4097 bool nmethod::has_code_comment(address begin, address end) {
4098 // scopes?
4099 ScopeDesc* sd = scope_desc_in(begin, end);
4100 if (sd != nullptr) return true;
4101
4102 // relocations?
4103 const char* str = reloc_string_for(begin, end);
4104 if (str != nullptr) return true;
4105
4106 // implicit exceptions?
4107 int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
4108 if (cont_offset != 0) return true;
4109
4110 return false;
4111 }
4112
4113 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
4114 ImplicitExceptionTable implicit_table(this);
4115 int pc_offset = (int)(begin - code_begin());
4116 int cont_offset = implicit_table.continuation_offset(pc_offset);
4117 bool oop_map_required = false;
4118 if (cont_offset != 0) {
4119 st->move_to(column, 6, 0);
4120 if (pc_offset == cont_offset) {
4121 st->print("; implicit exception: deoptimizes");
4122 oop_map_required = true;
4123 } else {
4124 st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
4125 }
4126 }
4127
4128 // Find an oopmap in (begin, end]. We use the odd half-closed
4129 // interval so that oop maps and scope descs which are tied to the
4130 // byte after a call are printed with the call itself. OopMaps
4131 // associated with implicit exceptions are printed with the implicit
4132 // instruction.
4133 address base = code_begin();
4134 ImmutableOopMapSet* oms = oop_maps();
4135 if (oms != nullptr) {
4136 for (int i = 0, imax = oms->count(); i < imax; i++) {
4137 const ImmutableOopMapPair* pair = oms->pair_at(i);
4138 const ImmutableOopMap* om = pair->get_from(oms);
4139 address pc = base + pair->pc_offset();
4140 if (pc >= begin) {
4141 #if INCLUDE_JVMCI
4142 bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
4143 #else
4144 bool is_implicit_deopt = false;
4145 #endif
4146 if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
4147 st->move_to(column, 6, 0);
4148 st->print("; ");
4149 om->print_on(st);
4150 oop_map_required = false;
4151 }
4152 }
4153 if (pc > end) {
4154 break;
4155 }
4156 }
4157 }
4158 assert(!oop_map_required, "missed oopmap");
4159
4160 Thread* thread = Thread::current();
4161
4162 // Print any debug info present at this pc.
4163 ScopeDesc* sd = scope_desc_in(begin, end);
4164 if (sd != nullptr) {
4165 st->move_to(column, 6, 0);
4166 if (sd->bci() == SynchronizationEntryBCI) {
4167 st->print(";*synchronization entry");
4168 } else if (sd->bci() == AfterBci) {
4169 st->print(";* method exit (unlocked if synchronized)");
4170 } else if (sd->bci() == UnwindBci) {
4171 st->print(";* unwind (locked if synchronized)");
4172 } else if (sd->bci() == AfterExceptionBci) {
4173 st->print(";* unwind (unlocked if synchronized)");
4174 } else if (sd->bci() == UnknownBci) {
4175 st->print(";* unknown");
4176 } else if (sd->bci() == InvalidFrameStateBci) {
4177 st->print(";* invalid frame state");
4178 } else {
4179 if (sd->method() == nullptr) {
4180 st->print("method is nullptr");
4181 } else if (sd->method()->is_native()) {
4182 st->print("method is native");
4183 } else {
4184 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
4185 st->print(";*%s", Bytecodes::name(bc));
4186 switch (bc) {
4187 case Bytecodes::_invokevirtual:
4188 case Bytecodes::_invokespecial:
4189 case Bytecodes::_invokestatic:
4190 case Bytecodes::_invokeinterface:
4191 {
4192 Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
4193 st->print(" ");
4194 if (invoke.name() != nullptr)
4195 invoke.name()->print_symbol_on(st);
4196 else
4197 st->print("<UNKNOWN>");
4198 break;
4199 }
4200 case Bytecodes::_getfield:
4201 case Bytecodes::_putfield:
4202 case Bytecodes::_getstatic:
4203 case Bytecodes::_putstatic:
4204 {
4205 Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
4206 st->print(" ");
4207 if (field.name() != nullptr)
4208 field.name()->print_symbol_on(st);
4209 else
4210 st->print("<UNKNOWN>");
4211 }
4212 default:
4213 break;
4214 }
4215 }
4216 st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
4217 }
4218
4219 // Print all scopes
4220 for (;sd != nullptr; sd = sd->sender()) {
4221 st->move_to(column, 6, 0);
4222 st->print("; -");
4223 if (sd->should_reexecute()) {
4224 st->print(" (reexecute)");
4225 }
4226 if (sd->method() == nullptr) {
4227 st->print("method is nullptr");
4228 } else {
4229 sd->method()->print_short_name(st);
4230 }
4231 int lineno = sd->method()->line_number_from_bci(sd->bci());
4232 if (lineno != -1) {
4233 st->print("@%d (line %d)", sd->bci(), lineno);
4234 } else {
4235 st->print("@%d", sd->bci());
4236 }
4237 st->cr();
4238 }
4239 }
4240
4241 // Print relocation information
4242 // Prevent memory leak: allocating without ResourceMark.
4243 ResourceMark rm;
4244 const char* str = reloc_string_for(begin, end);
4245 if (str != nullptr) {
4246 if (sd != nullptr) st->cr();
4247 st->move_to(column, 6, 0);
4248 st->print("; {%s}", str);
4249 }
4250 }
4251
4252 #endif
4253
4254 address nmethod::call_instruction_address(address pc) const {
4255 if (NativeCall::is_call_before(pc)) {
4256 NativeCall *ncall = nativeCall_before(pc);
4257 return ncall->instruction_address();
4258 }
4259 return nullptr;
4260 }
4261
4262 void nmethod::print_value_on_impl(outputStream* st) const {
4263 st->print_cr("nmethod");
4264 #if defined(SUPPORT_DATA_STRUCTS)
4265 print_on_with_msg(st, nullptr);
4266 #endif
4267 }
4268
4269 #ifndef PRODUCT
4270
4271 void nmethod::print_calls(outputStream* st) {
4272 RelocIterator iter(this);
4273 while (iter.next()) {
4274 switch (iter.type()) {
4275 case relocInfo::virtual_call_type: {
4276 CompiledICLocker ml_verify(this);
4277 CompiledIC_at(&iter)->print();
4278 break;
4279 }
4280 case relocInfo::static_call_type:
4281 case relocInfo::opt_virtual_call_type:
4282 st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
4283 CompiledDirectCall::at(iter.reloc())->print();
4284 break;
4285 default:
4286 break;
4287 }
4288 }
4289 }
4290
4291 void nmethod::print_statistics() {
4292 ttyLocker ttyl;
4293 if (xtty != nullptr) xtty->head("statistics type='nmethod'");
4294 native_nmethod_stats.print_native_nmethod_stats();
4295 #ifdef COMPILER1
4296 c1_java_nmethod_stats.print_nmethod_stats("C1");
4297 #endif
4298 #ifdef COMPILER2
4299 c2_java_nmethod_stats.print_nmethod_stats("C2");
4300 #endif
4301 #if INCLUDE_JVMCI
4302 jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
4303 #endif
4304 unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
4305 DebugInformationRecorder::print_statistics();
4306 pc_nmethod_stats.print_pc_stats();
4307 Dependencies::print_statistics();
4308 ExternalsRecorder::print_statistics();
4309 if (xtty != nullptr) xtty->tail("statistics");
4310 }
4311
4312 #endif // !PRODUCT
4313
4314 #if INCLUDE_JVMCI
4315 void nmethod::update_speculation(JavaThread* thread) {
4316 jlong speculation = thread->pending_failed_speculation();
4317 if (speculation != 0) {
4318 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4319 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4320 thread->set_pending_failed_speculation(0);
4321 }
4322 }
4323
4324 const char* nmethod::jvmci_name() {
4325 if (jvmci_nmethod_data() != nullptr) {
4326 return jvmci_nmethod_data()->name();
4327 }
4328 return nullptr;
4329 }
4330
4331 bool nmethod::jvmci_skip_profile_deopt() const {
4332 return jvmci_nmethod_data() != nullptr && !jvmci_nmethod_data()->profile_deopt();
4333 }
4334 #endif