1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/nmethod.inline.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "compiler/abstractCompiler.hpp"
34 #include "compiler/compilationLog.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compileLog.hpp"
37 #include "compiler/compilerDirectives.hpp"
38 #include "compiler/compilerOracle.hpp"
39 #include "compiler/compileTask.hpp"
40 #include "compiler/directivesParser.hpp"
41 #include "compiler/disassembler.hpp"
42 #include "compiler/oopMap.inline.hpp"
43 #include "gc/shared/barrierSet.hpp"
44 #include "gc/shared/barrierSetNMethod.hpp"
45 #include "gc/shared/classUnloadingContext.hpp"
46 #include "gc/shared/collectedHeap.hpp"
47 #include "interpreter/bytecode.inline.hpp"
48 #include "jvm.h"
49 #include "logging/log.hpp"
50 #include "logging/logStream.hpp"
51 #include "memory/allocation.inline.hpp"
52 #include "memory/resourceArea.hpp"
53 #include "memory/universe.hpp"
54 #include "oops/access.inline.hpp"
55 #include "oops/klass.inline.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/methodData.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "oops/weakHandle.inline.hpp"
60 #include "prims/jvmtiImpl.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "prims/methodHandles.hpp"
63 #include "runtime/atomicAccess.hpp"
64 #include "runtime/continuation.hpp"
65 #include "runtime/deoptimization.hpp"
66 #include "runtime/flags/flagSetting.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/orderAccess.hpp"
71 #include "runtime/os.hpp"
72 #include "runtime/safepointVerifiers.hpp"
73 #include "runtime/serviceThread.hpp"
74 #include "runtime/sharedRuntime.hpp"
75 #include "runtime/signature.hpp"
76 #include "runtime/threadWXSetters.inline.hpp"
77 #include "runtime/vmThread.hpp"
78 #include "utilities/align.hpp"
79 #include "utilities/copy.hpp"
80 #include "utilities/dtrace.hpp"
81 #include "utilities/events.hpp"
82 #include "utilities/globalDefinitions.hpp"
83 #include "utilities/hashTable.hpp"
84 #include "utilities/xmlstream.hpp"
85 #if INCLUDE_JVMCI
86 #include "jvmci/jvmciRuntime.hpp"
87 #endif
88
89 #ifdef DTRACE_ENABLED
90
91 // Only bother with this argument setup if dtrace is available
92
93 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
94 { \
95 Method* m = (method); \
96 if (m != nullptr) { \
97 Symbol* klass_name = m->klass_name(); \
98 Symbol* name = m->name(); \
99 Symbol* signature = m->signature(); \
100 HOTSPOT_COMPILED_METHOD_UNLOAD( \
101 (char *) klass_name->bytes(), klass_name->utf8_length(), \
102 (char *) name->bytes(), name->utf8_length(), \
103 (char *) signature->bytes(), signature->utf8_length()); \
104 } \
105 }
106
107 #else // ndef DTRACE_ENABLED
108
109 #define DTRACE_METHOD_UNLOAD_PROBE(method)
110
111 #endif
112
113 // Cast from int value to narrow type
114 #define CHECKED_CAST(result, T, thing) \
115 result = static_cast<T>(thing); \
116 guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
117
118 //---------------------------------------------------------------------------------
119 // NMethod statistics
120 // They are printed under various flags, including:
121 // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
122 // (In the latter two cases, they like other stats are printed to the log only.)
123
124 #ifndef PRODUCT
125 // These variables are put into one block to reduce relocations
126 // and make it simpler to print from the debugger.
127 struct java_nmethod_stats_struct {
128 uint nmethod_count;
129 uint total_nm_size;
130 uint total_immut_size;
131 uint total_mut_size;
132 uint relocation_size;
133 uint consts_size;
134 uint insts_size;
135 uint stub_size;
136 uint oops_size;
137 uint metadata_size;
138 uint dependencies_size;
139 uint nul_chk_table_size;
140 uint handler_table_size;
141 uint scopes_pcs_size;
142 uint scopes_data_size;
143 #if INCLUDE_JVMCI
144 uint speculations_size;
145 uint jvmci_data_size;
146 #endif
147
148 void note_nmethod(nmethod* nm) {
149 nmethod_count += 1;
150 total_nm_size += nm->size();
151 total_immut_size += nm->immutable_data_size();
152 total_mut_size += nm->mutable_data_size();
153 relocation_size += nm->relocation_size();
154 consts_size += nm->consts_size();
155 insts_size += nm->insts_size();
156 stub_size += nm->stub_size();
157 oops_size += nm->oops_size();
158 metadata_size += nm->metadata_size();
159 scopes_data_size += nm->scopes_data_size();
160 scopes_pcs_size += nm->scopes_pcs_size();
161 dependencies_size += nm->dependencies_size();
162 handler_table_size += nm->handler_table_size();
163 nul_chk_table_size += nm->nul_chk_table_size();
164 #if INCLUDE_JVMCI
165 speculations_size += nm->speculations_size();
166 jvmci_data_size += nm->jvmci_data_size();
167 #endif
168 }
169 void print_nmethod_stats(const char* name) {
170 if (nmethod_count == 0) return;
171 tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
172 uint total_size = total_nm_size + total_immut_size + total_mut_size;
173 if (total_nm_size != 0) {
174 tty->print_cr(" total size = %u (100%%)", total_size);
175 tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
176 }
177 uint header_size = (uint)(nmethod_count * sizeof(nmethod));
178 if (nmethod_count != 0) {
179 tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
180 }
181 if (consts_size != 0) {
182 tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
183 }
184 if (insts_size != 0) {
185 tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
186 }
187 if (stub_size != 0) {
188 tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
189 }
190 if (oops_size != 0) {
191 tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
192 }
193 if (total_mut_size != 0) {
194 tty->print_cr(" mutable data = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size);
195 }
196 if (relocation_size != 0) {
197 tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size);
198 }
199 if (metadata_size != 0) {
200 tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size);
201 }
202 #if INCLUDE_JVMCI
203 if (jvmci_data_size != 0) {
204 tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size);
205 }
206 #endif
207 if (total_immut_size != 0) {
208 tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
209 }
210 if (dependencies_size != 0) {
211 tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
212 }
213 if (nul_chk_table_size != 0) {
214 tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
215 }
216 if (handler_table_size != 0) {
217 tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
218 }
219 if (scopes_pcs_size != 0) {
220 tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
221 }
222 if (scopes_data_size != 0) {
223 tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
224 }
225 #if INCLUDE_JVMCI
226 if (speculations_size != 0) {
227 tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
228 }
229 #endif
230 }
231 };
232
233 struct native_nmethod_stats_struct {
234 uint native_nmethod_count;
235 uint native_total_size;
236 uint native_relocation_size;
237 uint native_insts_size;
238 uint native_oops_size;
239 uint native_metadata_size;
240 void note_native_nmethod(nmethod* nm) {
241 native_nmethod_count += 1;
242 native_total_size += nm->size();
243 native_relocation_size += nm->relocation_size();
244 native_insts_size += nm->insts_size();
245 native_oops_size += nm->oops_size();
246 native_metadata_size += nm->metadata_size();
247 }
248 void print_native_nmethod_stats() {
249 if (native_nmethod_count == 0) return;
250 tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
251 if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size);
252 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size);
253 if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size);
254 if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size);
255 if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size);
256 }
257 };
258
259 struct pc_nmethod_stats_struct {
260 uint pc_desc_init; // number of initialization of cache (= number of caches)
261 uint pc_desc_queries; // queries to nmethod::find_pc_desc
262 uint pc_desc_approx; // number of those which have approximate true
263 uint pc_desc_repeats; // number of _pc_descs[0] hits
264 uint pc_desc_hits; // number of LRU cache hits
265 uint pc_desc_tests; // total number of PcDesc examinations
266 uint pc_desc_searches; // total number of quasi-binary search steps
267 uint pc_desc_adds; // number of LUR cache insertions
268
269 void print_pc_stats() {
270 tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query",
271 pc_desc_queries,
272 (double)(pc_desc_tests + pc_desc_searches)
273 / pc_desc_queries);
274 tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
275 pc_desc_init,
276 pc_desc_queries, pc_desc_approx,
277 pc_desc_repeats, pc_desc_hits,
278 pc_desc_tests, pc_desc_searches, pc_desc_adds);
279 }
280 };
281
282 #ifdef COMPILER1
283 static java_nmethod_stats_struct c1_java_nmethod_stats;
284 #endif
285 #ifdef COMPILER2
286 static java_nmethod_stats_struct c2_java_nmethod_stats;
287 #endif
288 #if INCLUDE_JVMCI
289 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
290 #endif
291 static java_nmethod_stats_struct unknown_java_nmethod_stats;
292
293 static native_nmethod_stats_struct native_nmethod_stats;
294 static pc_nmethod_stats_struct pc_nmethod_stats;
295
296 static void note_java_nmethod(nmethod* nm) {
297 #ifdef COMPILER1
298 if (nm->is_compiled_by_c1()) {
299 c1_java_nmethod_stats.note_nmethod(nm);
300 } else
301 #endif
302 #ifdef COMPILER2
303 if (nm->is_compiled_by_c2()) {
304 c2_java_nmethod_stats.note_nmethod(nm);
305 } else
306 #endif
307 #if INCLUDE_JVMCI
308 if (nm->is_compiled_by_jvmci()) {
309 jvmci_java_nmethod_stats.note_nmethod(nm);
310 } else
311 #endif
312 {
313 unknown_java_nmethod_stats.note_nmethod(nm);
314 }
315 }
316 #endif // !PRODUCT
317
318 //---------------------------------------------------------------------------------
319
320
321 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
322 assert(pc != nullptr, "Must be non null");
323 assert(exception.not_null(), "Must be non null");
324 assert(handler != nullptr, "Must be non null");
325
326 _count = 0;
327 _exception_type = exception->klass();
328 _next = nullptr;
329 _purge_list_next = nullptr;
330
331 add_address_and_handler(pc,handler);
332 }
333
334
335 address ExceptionCache::match(Handle exception, address pc) {
336 assert(pc != nullptr,"Must be non null");
337 assert(exception.not_null(),"Must be non null");
338 if (exception->klass() == exception_type()) {
339 return (test_address(pc));
340 }
341
342 return nullptr;
343 }
344
345
346 bool ExceptionCache::match_exception_with_space(Handle exception) {
347 assert(exception.not_null(),"Must be non null");
348 if (exception->klass() == exception_type() && count() < cache_size) {
349 return true;
350 }
351 return false;
352 }
353
354
355 address ExceptionCache::test_address(address addr) {
356 int limit = count();
357 for (int i = 0; i < limit; i++) {
358 if (pc_at(i) == addr) {
359 return handler_at(i);
360 }
361 }
362 return nullptr;
363 }
364
365
366 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
367 if (test_address(addr) == handler) return true;
368
369 int index = count();
370 if (index < cache_size) {
371 set_pc_at(index, addr);
372 set_handler_at(index, handler);
373 increment_count();
374 return true;
375 }
376 return false;
377 }
378
379 ExceptionCache* ExceptionCache::next() {
380 return AtomicAccess::load(&_next);
381 }
382
383 void ExceptionCache::set_next(ExceptionCache *ec) {
384 AtomicAccess::store(&_next, ec);
385 }
386
387 //-----------------------------------------------------------------------------
388
389
390 // Helper used by both find_pc_desc methods.
391 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
392 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
393 if (!approximate) {
394 return pc->pc_offset() == pc_offset;
395 } else {
396 // Do not look before the sentinel
397 assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset");
398 return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset;
399 }
400 }
401
402 void PcDescCache::init_to(PcDesc* initial_pc_desc) {
403 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
404 // initialize the cache by filling it with benign (non-null) values
405 assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
406 "must start with a sentinel");
407 for (int i = 0; i < cache_size; i++) {
408 _pc_descs[i] = initial_pc_desc;
409 }
410 }
411
412 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
413 // Note: one might think that caching the most recently
414 // read value separately would be a win, but one would be
415 // wrong. When many threads are updating it, the cache
416 // line it's in would bounce between caches, negating
417 // any benefit.
418
419 // In order to prevent race conditions do not load cache elements
420 // repeatedly, but use a local copy:
421 PcDesc* res;
422
423 // Step one: Check the most recently added value.
424 res = _pc_descs[0];
425 assert(res != nullptr, "PcDesc cache should be initialized already");
426
427 // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
428 if (approximate && match_desc(res, pc_offset, approximate)) {
429 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
430 return res;
431 }
432
433 // Step two: Check the rest of the LRU cache.
434 for (int i = 1; i < cache_size; ++i) {
435 res = _pc_descs[i];
436 if (res->pc_offset() < 0) break; // optimization: skip empty cache
437 if (match_desc(res, pc_offset, approximate)) {
438 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
439 return res;
440 }
441 }
442
443 // Report failure.
444 return nullptr;
445 }
446
447 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
448 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
449 // Update the LRU cache by shifting pc_desc forward.
450 for (int i = 0; i < cache_size; i++) {
451 PcDesc* next = _pc_descs[i];
452 _pc_descs[i] = pc_desc;
453 pc_desc = next;
454 }
455 }
456
457 // adjust pcs_size so that it is a multiple of both oopSize and
458 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
459 // of oopSize, then 2*sizeof(PcDesc) is)
460 static int adjust_pcs_size(int pcs_size) {
461 int nsize = align_up(pcs_size, oopSize);
462 if ((nsize % sizeof(PcDesc)) != 0) {
463 nsize = pcs_size + sizeof(PcDesc);
464 }
465 assert((nsize % oopSize) == 0, "correct alignment");
466 return nsize;
467 }
468
469 // Returns a string version of the method state.
470 const char* nmethod::state() const {
471 int state = get_state();
472 switch (state) {
473 case not_installed:
474 return "not installed";
475 case in_use:
476 return "in use";
477 case not_entrant:
478 return "not_entrant";
479 default:
480 fatal("unexpected method state: %d", state);
481 return nullptr;
482 }
483 }
484
485 void nmethod::set_deoptimized_done() {
486 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
487 if (_deoptimization_status != deoptimize_done) { // can't go backwards
488 AtomicAccess::store(&_deoptimization_status, deoptimize_done);
489 }
490 }
491
492 ExceptionCache* nmethod::exception_cache_acquire() const {
493 return AtomicAccess::load_acquire(&_exception_cache);
494 }
495
496 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
497 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
498 assert(new_entry != nullptr,"Must be non null");
499 assert(new_entry->next() == nullptr, "Must be null");
500
501 for (;;) {
502 ExceptionCache *ec = exception_cache();
503 if (ec != nullptr) {
504 Klass* ex_klass = ec->exception_type();
505 if (!ex_klass->is_loader_alive()) {
506 // We must guarantee that entries are not inserted with new next pointer
507 // edges to ExceptionCache entries with dead klasses, due to bad interactions
508 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
509 // the head pointer forward to the first live ExceptionCache, so that the new
510 // next pointers always point at live ExceptionCaches, that are not removed due
511 // to concurrent ExceptionCache cleanup.
512 ExceptionCache* next = ec->next();
513 if (AtomicAccess::cmpxchg(&_exception_cache, ec, next) == ec) {
514 CodeCache::release_exception_cache(ec);
515 }
516 continue;
517 }
518 ec = exception_cache();
519 if (ec != nullptr) {
520 new_entry->set_next(ec);
521 }
522 }
523 if (AtomicAccess::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
524 return;
525 }
526 }
527 }
528
529 void nmethod::clean_exception_cache() {
530 // For each nmethod, only a single thread may call this cleanup function
531 // at the same time, whether called in STW cleanup or concurrent cleanup.
532 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
533 // then a single writer may contend with cleaning up the head pointer to the
534 // first ExceptionCache node that has a Klass* that is alive. That is fine,
535 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
536 // And the concurrent writers do not clean up next pointers, only the head.
537 // Also note that concurrent readers will walk through Klass* pointers that are not
538 // alive. That does not cause ABA problems, because Klass* is deleted after
539 // a handshake with all threads, after all stale ExceptionCaches have been
540 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
541 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
542 // That similarly implies that CAS operations on ExceptionCache entries do not
543 // suffer from ABA problems as unlinking and deletion is separated by a global
544 // handshake operation.
545 ExceptionCache* prev = nullptr;
546 ExceptionCache* curr = exception_cache_acquire();
547
548 while (curr != nullptr) {
549 ExceptionCache* next = curr->next();
550
551 if (!curr->exception_type()->is_loader_alive()) {
552 if (prev == nullptr) {
553 // Try to clean head; this is contended by concurrent inserts, that
554 // both lazily clean the head, and insert entries at the head. If
555 // the CAS fails, the operation is restarted.
556 if (AtomicAccess::cmpxchg(&_exception_cache, curr, next) != curr) {
557 prev = nullptr;
558 curr = exception_cache_acquire();
559 continue;
560 }
561 } else {
562 // It is impossible to during cleanup connect the next pointer to
563 // an ExceptionCache that has not been published before a safepoint
564 // prior to the cleanup. Therefore, release is not required.
565 prev->set_next(next);
566 }
567 // prev stays the same.
568
569 CodeCache::release_exception_cache(curr);
570 } else {
571 prev = curr;
572 }
573
574 curr = next;
575 }
576 }
577
578 // public method for accessing the exception cache
579 // These are the public access methods.
580 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
581 // We never grab a lock to read the exception cache, so we may
582 // have false negatives. This is okay, as it can only happen during
583 // the first few exception lookups for a given nmethod.
584 ExceptionCache* ec = exception_cache_acquire();
585 while (ec != nullptr) {
586 address ret_val;
587 if ((ret_val = ec->match(exception,pc)) != nullptr) {
588 return ret_val;
589 }
590 ec = ec->next();
591 }
592 return nullptr;
593 }
594
595 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
596 // There are potential race conditions during exception cache updates, so we
597 // must own the ExceptionCache_lock before doing ANY modifications. Because
598 // we don't lock during reads, it is possible to have several threads attempt
599 // to update the cache with the same data. We need to check for already inserted
600 // copies of the current data before adding it.
601
602 MutexLocker ml(ExceptionCache_lock);
603 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
604
605 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
606 target_entry = new ExceptionCache(exception,pc,handler);
607 add_exception_cache_entry(target_entry);
608 }
609 }
610
611 // private method for handling exception cache
612 // These methods are private, and used to manipulate the exception cache
613 // directly.
614 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
615 ExceptionCache* ec = exception_cache_acquire();
616 while (ec != nullptr) {
617 if (ec->match_exception_with_space(exception)) {
618 return ec;
619 }
620 ec = ec->next();
621 }
622 return nullptr;
623 }
624
625 bool nmethod::is_at_poll_return(address pc) {
626 RelocIterator iter(this, pc, pc+1);
627 while (iter.next()) {
628 if (iter.type() == relocInfo::poll_return_type)
629 return true;
630 }
631 return false;
632 }
633
634
635 bool nmethod::is_at_poll_or_poll_return(address pc) {
636 RelocIterator iter(this, pc, pc+1);
637 while (iter.next()) {
638 relocInfo::relocType t = iter.type();
639 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
640 return true;
641 }
642 return false;
643 }
644
645 void nmethod::verify_oop_relocations() {
646 // Ensure sure that the code matches the current oop values
647 RelocIterator iter(this, nullptr, nullptr);
648 while (iter.next()) {
649 if (iter.type() == relocInfo::oop_type) {
650 oop_Relocation* reloc = iter.oop_reloc();
651 if (!reloc->oop_is_immediate()) {
652 reloc->verify_oop_relocation();
653 }
654 }
655 }
656 }
657
658
659 ScopeDesc* nmethod::scope_desc_at(address pc) {
660 PcDesc* pd = pc_desc_at(pc);
661 guarantee(pd != nullptr, "scope must be present");
662 return new ScopeDesc(this, pd);
663 }
664
665 ScopeDesc* nmethod::scope_desc_near(address pc) {
666 PcDesc* pd = pc_desc_near(pc);
667 guarantee(pd != nullptr, "scope must be present");
668 return new ScopeDesc(this, pd);
669 }
670
671 address nmethod::oops_reloc_begin() const {
672 // If the method is not entrant then a JMP is plastered over the
673 // first few bytes. If an oop in the old code was there, that oop
674 // should not get GC'd. Skip the first few bytes of oops on
675 // not-entrant methods.
676 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
677 code_begin() + frame_complete_offset() >
678 verified_entry_point() + NativeJump::instruction_size)
679 {
680 // If we have a frame_complete_offset after the native jump, then there
681 // is no point trying to look for oops before that. This is a requirement
682 // for being allowed to scan oops concurrently.
683 return code_begin() + frame_complete_offset();
684 }
685
686 address low_boundary = verified_entry_point();
687 return low_boundary;
688 }
689
690 // Method that knows how to preserve outgoing arguments at call. This method must be
691 // called with a frame corresponding to a Java invoke
692 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
693 if (method() == nullptr) {
694 return;
695 }
696
697 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
698 JavaThread* thread = reg_map->thread();
699 if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
700 JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
701 return;
702 }
703
704 if (!method()->is_native()) {
705 address pc = fr.pc();
706 bool has_receiver, has_appendix;
707 Symbol* signature;
708
709 // The method attached by JIT-compilers should be used, if present.
710 // Bytecode can be inaccurate in such case.
711 Method* callee = attached_method_before_pc(pc);
712 if (callee != nullptr) {
713 has_receiver = !(callee->access_flags().is_static());
714 has_appendix = false;
715 signature = callee->signature();
716
717 // If inline types are passed as fields, use the extended signature
718 // which contains the types of all (oop) fields of the inline type.
719 if (is_compiled_by_c2() && callee->has_scalarized_args()) {
720 const GrowableArray<SigEntry>* sig = callee->adapter()->get_sig_cc();
721 assert(sig != nullptr, "sig should never be null");
722 TempNewSymbol tmp_sig = SigEntry::create_symbol(sig);
723 has_receiver = false; // The extended signature contains the receiver type
724 fr.oops_compiled_arguments_do(tmp_sig, has_receiver, has_appendix, reg_map, f);
725 return;
726 }
727 } else {
728 SimpleScopeDesc ssd(this, pc);
729
730 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
731 has_receiver = call.has_receiver();
732 has_appendix = call.has_appendix();
733 signature = call.signature();
734 }
735
736 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
737 } else if (method()->is_continuation_enter_intrinsic()) {
738 // This method only calls Continuation.enter()
739 Symbol* signature = vmSymbols::continuationEnter_signature();
740 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
741 }
742 }
743
744 Method* nmethod::attached_method(address call_instr) {
745 assert(code_contains(call_instr), "not part of the nmethod");
746 RelocIterator iter(this, call_instr, call_instr + 1);
747 while (iter.next()) {
748 if (iter.addr() == call_instr) {
749 switch(iter.type()) {
750 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
751 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
752 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
753 default: break;
754 }
755 }
756 }
757 return nullptr; // not found
758 }
759
760 Method* nmethod::attached_method_before_pc(address pc) {
761 if (NativeCall::is_call_before(pc)) {
762 NativeCall* ncall = nativeCall_before(pc);
763 return attached_method(ncall->instruction_address());
764 }
765 return nullptr; // not a call
766 }
767
768 void nmethod::clear_inline_caches() {
769 assert(SafepointSynchronize::is_at_safepoint() || (NMethodState_lock->owned_by_self() && is_not_installed()), "clearing of IC's only allowed at safepoint or when not installed");
770 RelocIterator iter(this);
771 while (iter.next()) {
772 iter.reloc()->clear_inline_cache();
773 }
774 }
775
776 #ifdef ASSERT
777 // Check class_loader is alive for this bit of metadata.
778 class CheckClass : public MetadataClosure {
779 void do_metadata(Metadata* md) {
780 Klass* klass = nullptr;
781 if (md->is_klass()) {
782 klass = ((Klass*)md);
783 } else if (md->is_method()) {
784 klass = ((Method*)md)->method_holder();
785 } else if (md->is_methodData()) {
786 klass = ((MethodData*)md)->method()->method_holder();
787 } else if (md->is_methodCounters()) {
788 klass = ((MethodCounters*)md)->method()->method_holder();
789 } else {
790 md->print();
791 ShouldNotReachHere();
792 }
793 assert(klass->is_loader_alive(), "must be alive");
794 }
795 };
796 #endif // ASSERT
797
798 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
799 template <typename CallsiteT>
800 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, bool clean_all) {
801 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
802 if (!cb->is_nmethod()) {
803 return;
804 }
805 nmethod* nm = cb->as_nmethod();
806 if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
807 callsite->set_to_clean();
808 }
809 }
810
811 // Cleans caches in nmethods that point to either classes that are unloaded
812 // or nmethods that are unloaded.
813 //
814 // Can be called either in parallel by G1 currently or after all
815 // nmethods are unloaded. Return postponed=true in the parallel case for
816 // inline caches found that point to nmethods that are not yet visited during
817 // the do_unloading walk.
818 void nmethod::unload_nmethod_caches(bool unloading_occurred) {
819 ResourceMark rm;
820
821 // Exception cache only needs to be called if unloading occurred
822 if (unloading_occurred) {
823 clean_exception_cache();
824 }
825
826 cleanup_inline_caches_impl(unloading_occurred, false);
827
828 #ifdef ASSERT
829 // Check that the metadata embedded in the nmethod is alive
830 CheckClass check_class;
831 metadata_do(&check_class);
832 #endif
833 }
834
835 void nmethod::run_nmethod_entry_barrier() {
836 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
837 if (bs_nm != nullptr) {
838 // We want to keep an invariant that nmethods found through iterations of a Thread's
839 // nmethods found in safepoints have gone through an entry barrier and are not armed.
840 // By calling this nmethod entry barrier, it plays along and acts
841 // like any other nmethod found on the stack of a thread (fewer surprises).
842 nmethod* nm = this;
843 bool alive = bs_nm->nmethod_entry_barrier(nm);
844 assert(alive, "should be alive");
845 }
846 }
847
848 // Only called by whitebox test
849 void nmethod::cleanup_inline_caches_whitebox() {
850 assert_locked_or_safepoint(CodeCache_lock);
851 CompiledICLocker ic_locker(this);
852 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
853 }
854
855 address* nmethod::orig_pc_addr(const frame* fr) {
856 return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
857 }
858
859 // Called to clean up after class unloading for live nmethods
860 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
861 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
862 ResourceMark rm;
863
864 // Find all calls in an nmethod and clear the ones that point to bad nmethods.
865 RelocIterator iter(this, oops_reloc_begin());
866 bool is_in_static_stub = false;
867 while(iter.next()) {
868
869 switch (iter.type()) {
870
871 case relocInfo::virtual_call_type:
872 if (unloading_occurred) {
873 // If class unloading occurred we first clear ICs where the cached metadata
874 // is referring to an unloaded klass or method.
875 CompiledIC_at(&iter)->clean_metadata();
876 }
877
878 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), clean_all);
879 break;
880
881 case relocInfo::opt_virtual_call_type:
882 case relocInfo::static_call_type:
883 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), clean_all);
884 break;
885
886 case relocInfo::static_stub_type: {
887 is_in_static_stub = true;
888 break;
889 }
890
891 case relocInfo::metadata_type: {
892 // Only the metadata relocations contained in static/opt virtual call stubs
893 // contains the Method* passed to c2i adapters. It is the only metadata
894 // relocation that needs to be walked, as it is the one metadata relocation
895 // that violates the invariant that all metadata relocations have an oop
896 // in the compiled method (due to deferred resolution and code patching).
897
898 // This causes dead metadata to remain in compiled methods that are not
899 // unloading. Unless these slippery metadata relocations of the static
900 // stubs are at least cleared, subsequent class redefinition operations
901 // will access potentially free memory, and JavaThread execution
902 // concurrent to class unloading may call c2i adapters with dead methods.
903 if (!is_in_static_stub) {
904 // The first metadata relocation after a static stub relocation is the
905 // metadata relocation of the static stub used to pass the Method* to
906 // c2i adapters.
907 continue;
908 }
909 is_in_static_stub = false;
910 if (is_unloading()) {
911 // If the nmethod itself is dying, then it may point at dead metadata.
912 // Nobody should follow that metadata; it is strictly unsafe.
913 continue;
914 }
915 metadata_Relocation* r = iter.metadata_reloc();
916 Metadata* md = r->metadata_value();
917 if (md != nullptr && md->is_method()) {
918 Method* method = static_cast<Method*>(md);
919 if (!method->method_holder()->is_loader_alive()) {
920 AtomicAccess::store(r->metadata_addr(), (Method*)nullptr);
921
922 if (!r->metadata_is_immediate()) {
923 r->fix_metadata_relocation();
924 }
925 }
926 }
927 break;
928 }
929
930 default:
931 break;
932 }
933 }
934 }
935
936 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
937 // Exception happened outside inline-cache check code => we are inside
938 // an active nmethod => use cpc to determine a return address
939 int exception_offset = int(pc - code_begin());
940 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
941 #ifdef ASSERT
942 if (cont_offset == 0) {
943 Thread* thread = Thread::current();
944 ResourceMark rm(thread);
945 CodeBlob* cb = CodeCache::find_blob(pc);
946 assert(cb != nullptr && cb == this, "");
947
948 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
949 stringStream ss;
950 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
951 print_on(&ss);
952 method()->print_codes_on(&ss);
953 print_code_on(&ss);
954 print_pcs_on(&ss);
955 tty->print("%s", ss.as_string()); // print all at once
956 }
957 #endif
958 if (cont_offset == 0) {
959 // Let the normal error handling report the exception
960 return nullptr;
961 }
962 if (cont_offset == exception_offset) {
963 #if INCLUDE_JVMCI
964 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
965 JavaThread *thread = JavaThread::current();
966 thread->set_jvmci_implicit_exception_pc(pc);
967 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
968 Deoptimization::Action_reinterpret));
969 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
970 #else
971 ShouldNotReachHere();
972 #endif
973 }
974 return code_begin() + cont_offset;
975 }
976
977 class HasEvolDependency : public MetadataClosure {
978 bool _has_evol_dependency;
979 public:
980 HasEvolDependency() : _has_evol_dependency(false) {}
981 void do_metadata(Metadata* md) {
982 if (md->is_method()) {
983 Method* method = (Method*)md;
984 if (method->is_old()) {
985 _has_evol_dependency = true;
986 }
987 }
988 }
989 bool has_evol_dependency() const { return _has_evol_dependency; }
990 };
991
992 bool nmethod::has_evol_metadata() {
993 // Check the metadata in relocIter and CompiledIC and also deoptimize
994 // any nmethod that has reference to old methods.
995 HasEvolDependency check_evol;
996 metadata_do(&check_evol);
997 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
998 ResourceMark rm;
999 log_debug(redefine, class, nmethod)
1000 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
1001 _method->method_holder()->external_name(),
1002 _method->name()->as_C_string(),
1003 _method->signature()->as_C_string(),
1004 compile_id());
1005 }
1006 return check_evol.has_evol_dependency();
1007 }
1008
1009 int nmethod::total_size() const {
1010 return
1011 consts_size() +
1012 insts_size() +
1013 stub_size() +
1014 scopes_data_size() +
1015 scopes_pcs_size() +
1016 handler_table_size() +
1017 nul_chk_table_size();
1018 }
1019
1020 const char* nmethod::compile_kind() const {
1021 if (is_osr_method()) return "osr";
1022 if (method() != nullptr && is_native_method()) {
1023 if (method()->is_continuation_native_intrinsic()) {
1024 return "cnt";
1025 }
1026 return "c2n";
1027 }
1028 return nullptr;
1029 }
1030
1031 const char* nmethod::compiler_name() const {
1032 return compilertype2name(_compiler_type);
1033 }
1034
1035 #ifdef ASSERT
1036 class CheckForOopsClosure : public OopClosure {
1037 bool _found_oop = false;
1038 public:
1039 virtual void do_oop(oop* o) { _found_oop = true; }
1040 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1041 bool found_oop() { return _found_oop; }
1042 };
1043 class CheckForMetadataClosure : public MetadataClosure {
1044 bool _found_metadata = false;
1045 Metadata* _ignore = nullptr;
1046 public:
1047 CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1048 virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1049 bool found_metadata() { return _found_metadata; }
1050 };
1051
1052 static void assert_no_oops_or_metadata(nmethod* nm) {
1053 if (nm == nullptr) return;
1054 assert(nm->oop_maps() == nullptr, "expectation");
1055
1056 CheckForOopsClosure cfo;
1057 nm->oops_do(&cfo);
1058 assert(!cfo.found_oop(), "no oops allowed");
1059
1060 // We allow an exception for the own Method, but require its class to be permanent.
1061 Method* own_method = nm->method();
1062 CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1063 nm->metadata_do(&cfm);
1064 assert(!cfm.found_metadata(), "no metadata allowed");
1065
1066 assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1067 "Method's class needs to be permanent");
1068 }
1069 #endif
1070
1071 static int required_mutable_data_size(CodeBuffer* code_buffer,
1072 int jvmci_data_size = 0) {
1073 return align_up(code_buffer->total_relocation_size(), oopSize) +
1074 align_up(code_buffer->total_metadata_size(), oopSize) +
1075 align_up(jvmci_data_size, oopSize);
1076 }
1077
1078 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1079 int compile_id,
1080 CodeBuffer *code_buffer,
1081 int vep_offset,
1082 int frame_complete,
1083 int frame_size,
1084 ByteSize basic_lock_owner_sp_offset,
1085 ByteSize basic_lock_sp_offset,
1086 OopMapSet* oop_maps,
1087 int exception_handler) {
1088 code_buffer->finalize_oop_references(method);
1089 // create nmethod
1090 nmethod* nm = nullptr;
1091 int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1092 {
1093 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1094
1095 CodeOffsets offsets;
1096 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1097 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1098 if (exception_handler != -1) {
1099 offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1100 }
1101
1102 int mutable_data_size = required_mutable_data_size(code_buffer);
1103
1104 // MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1105 // IsUnloadingBehaviour::is_unloading needs to handle them separately.
1106 bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1107 nm = new (native_nmethod_size, allow_NonNMethod_space)
1108 nmethod(method(), compiler_none, native_nmethod_size,
1109 compile_id, &offsets,
1110 code_buffer, frame_size,
1111 basic_lock_owner_sp_offset,
1112 basic_lock_sp_offset,
1113 oop_maps, mutable_data_size);
1114 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1115 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1116 }
1117
1118 if (nm != nullptr) {
1119 // verify nmethod
1120 DEBUG_ONLY(nm->verify();) // might block
1121
1122 nm->log_new_nmethod();
1123 }
1124 return nm;
1125 }
1126
1127 nmethod* nmethod::new_nmethod(const methodHandle& method,
1128 int compile_id,
1129 int entry_bci,
1130 CodeOffsets* offsets,
1131 int orig_pc_offset,
1132 DebugInformationRecorder* debug_info,
1133 Dependencies* dependencies,
1134 CodeBuffer* code_buffer, int frame_size,
1135 OopMapSet* oop_maps,
1136 ExceptionHandlerTable* handler_table,
1137 ImplicitExceptionTable* nul_chk_table,
1138 AbstractCompiler* compiler,
1139 CompLevel comp_level
1140 #if INCLUDE_JVMCI
1141 , char* speculations,
1142 int speculations_len,
1143 JVMCINMethodData* jvmci_data
1144 #endif
1145 )
1146 {
1147 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1148 code_buffer->finalize_oop_references(method);
1149 // create nmethod
1150 nmethod* nm = nullptr;
1151 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1152
1153 int immutable_data_size =
1154 adjust_pcs_size(debug_info->pcs_size())
1155 + align_up((int)dependencies->size_in_bytes(), oopSize)
1156 + align_up(handler_table->size_in_bytes() , oopSize)
1157 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1158 #if INCLUDE_JVMCI
1159 + align_up(speculations_len , oopSize)
1160 #endif
1161 + align_up(debug_info->data_size() , oopSize);
1162
1163 // First, allocate space for immutable data in C heap.
1164 address immutable_data = nullptr;
1165 if (immutable_data_size > 0) {
1166 immutable_data_size += ImmutableDataRefCountSize;
1167 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1168 if (immutable_data == nullptr) {
1169 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1170 return nullptr;
1171 }
1172 }
1173
1174 int mutable_data_size = required_mutable_data_size(code_buffer
1175 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1176
1177 {
1178 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1179
1180 nm = new (nmethod_size, comp_level)
1181 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1182 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1183 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1184 handler_table, nul_chk_table, compiler, comp_level
1185 #if INCLUDE_JVMCI
1186 , speculations,
1187 speculations_len,
1188 jvmci_data
1189 #endif
1190 );
1191
1192 if (nm != nullptr) {
1193 // To make dependency checking during class loading fast, record
1194 // the nmethod dependencies in the classes it is dependent on.
1195 // This allows the dependency checking code to simply walk the
1196 // class hierarchy above the loaded class, checking only nmethods
1197 // which are dependent on those classes. The slow way is to
1198 // check every nmethod for dependencies which makes it linear in
1199 // the number of methods compiled. For applications with a lot
1200 // classes the slow way is too slow.
1201 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1202 if (deps.type() == Dependencies::call_site_target_value) {
1203 // CallSite dependencies are managed on per-CallSite instance basis.
1204 oop call_site = deps.argument_oop(0);
1205 MethodHandles::add_dependent_nmethod(call_site, nm);
1206 } else {
1207 InstanceKlass* ik = deps.context_type();
1208 if (ik == nullptr) {
1209 continue; // ignore things like evol_method
1210 }
1211 // record this nmethod as dependent on this klass
1212 ik->add_dependent_nmethod(nm);
1213 }
1214 }
1215 NOT_PRODUCT(if (nm != nullptr) note_java_nmethod(nm));
1216 }
1217 }
1218 // Do verification and logging outside CodeCache_lock.
1219 if (nm != nullptr) {
1220 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1221 DEBUG_ONLY(nm->verify();)
1222 nm->log_new_nmethod();
1223 }
1224 return nm;
1225 }
1226
1227 // Fill in default values for various fields
1228 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1229 // avoid uninitialized fields, even for short time periods
1230 _exception_cache = nullptr;
1231 _gc_data = nullptr;
1232 _oops_do_mark_link = nullptr;
1233 _compiled_ic_data = nullptr;
1234
1235 _is_unloading_state = 0;
1236 _state = not_installed;
1237
1238 _has_unsafe_access = 0;
1239 _has_wide_vectors = 0;
1240 _has_monitors = 0;
1241 _has_scoped_access = 0;
1242 _has_flushed_dependencies = 0;
1243 _is_unlinked = 0;
1244 _load_reported = 0; // jvmti state
1245
1246 _deoptimization_status = not_marked;
1247
1248 // SECT_CONSTS is first in code buffer so the offset should be 0.
1249 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1250 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1251
1252 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1253
1254 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1255 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1256
1257 _inline_entry_point = entry_point();
1258 _verified_inline_entry_point = verified_entry_point();
1259 _verified_inline_ro_entry_point = verified_entry_point();
1260
1261 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1262 }
1263
1264 // Post initialization
1265 void nmethod::post_init() {
1266 clear_unloading_state();
1267
1268 finalize_relocations();
1269
1270 Universe::heap()->register_nmethod(this);
1271 DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
1272
1273 CodeCache::commit(this);
1274 }
1275
1276 // For native wrappers
1277 nmethod::nmethod(
1278 Method* method,
1279 CompilerType type,
1280 int nmethod_size,
1281 int compile_id,
1282 CodeOffsets* offsets,
1283 CodeBuffer* code_buffer,
1284 int frame_size,
1285 ByteSize basic_lock_owner_sp_offset,
1286 ByteSize basic_lock_sp_offset,
1287 OopMapSet* oop_maps,
1288 int mutable_data_size)
1289 : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1290 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1291 _deoptimization_generation(0),
1292 _gc_epoch(CodeCache::gc_epoch()),
1293 _method(method),
1294 _native_receiver_sp_offset(basic_lock_owner_sp_offset),
1295 _native_basic_lock_sp_offset(basic_lock_sp_offset)
1296 {
1297 {
1298 DEBUG_ONLY(NoSafepointVerifier nsv;)
1299 assert_locked_or_safepoint(CodeCache_lock);
1300 assert(!method->has_scalarized_args(), "scalarized native wrappers not supported yet");
1301 init_defaults(code_buffer, offsets);
1302
1303 _osr_entry_point = nullptr;
1304 _pc_desc_container = nullptr;
1305 _entry_bci = InvocationEntryBci;
1306 _compile_id = compile_id;
1307 _comp_level = CompLevel_none;
1308 _compiler_type = type;
1309 _orig_pc_offset = 0;
1310 _num_stack_arg_slots = 0;
1311
1312 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1313 // Continuation enter intrinsic
1314 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1315 } else {
1316 _exception_offset = 0;
1317 }
1318 // Native wrappers do not have deopt handlers. Make the values
1319 // something that will never match a pc like the nmethod vtable entry
1320 _deopt_handler_entry_offset = 0;
1321 _unwind_handler_offset = 0;
1322
1323 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1324 uint16_t metadata_size;
1325 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1326 JVMCI_ONLY( _metadata_size = metadata_size; )
1327 assert(_mutable_data_size == _relocation_size + metadata_size,
1328 "wrong mutable data size: %d != %d + %d",
1329 _mutable_data_size, _relocation_size, metadata_size);
1330
1331 // native wrapper does not have read-only data but we need unique not null address
1332 _immutable_data = blob_end();
1333 _immutable_data_size = 0;
1334 _nul_chk_table_offset = 0;
1335 _handler_table_offset = 0;
1336 _scopes_pcs_offset = 0;
1337 _scopes_data_offset = 0;
1338 #if INCLUDE_JVMCI
1339 _speculations_offset = 0;
1340 #endif
1341 _immutable_data_ref_count_offset = 0;
1342
1343 code_buffer->copy_code_and_locs_to(this);
1344 code_buffer->copy_values_to(this);
1345
1346 post_init();
1347 }
1348
1349 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1350 ttyLocker ttyl; // keep the following output all in one block
1351 // This output goes directly to the tty, not the compiler log.
1352 // To enable tools to match it up with the compilation activity,
1353 // be sure to tag this tty output with the compile ID.
1354 if (xtty != nullptr) {
1355 xtty->begin_head("print_native_nmethod");
1356 xtty->method(_method);
1357 xtty->stamp();
1358 xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1359 }
1360 // Print the header part, then print the requested information.
1361 // This is both handled in decode2(), called via print_code() -> decode()
1362 if (PrintNativeNMethods) {
1363 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1364 print_code();
1365 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1366 #if defined(SUPPORT_DATA_STRUCTS)
1367 if (AbstractDisassembler::show_structs()) {
1368 if (oop_maps != nullptr) {
1369 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1370 oop_maps->print_on(tty);
1371 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1372 }
1373 }
1374 #endif
1375 } else {
1376 print(); // print the header part only.
1377 }
1378 #if defined(SUPPORT_DATA_STRUCTS)
1379 if (AbstractDisassembler::show_structs()) {
1380 if (PrintRelocations) {
1381 print_relocations();
1382 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1383 }
1384 }
1385 #endif
1386 if (xtty != nullptr) {
1387 xtty->tail("print_native_nmethod");
1388 }
1389 }
1390 }
1391
1392
1393 nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm._header_size)
1394 {
1395
1396 if (nm._oop_maps != nullptr) {
1397 _oop_maps = nm._oop_maps->clone();
1398 } else {
1399 _oop_maps = nullptr;
1400 }
1401
1402 _size = nm._size;
1403 _relocation_size = nm._relocation_size;
1404 _content_offset = nm._content_offset;
1405 _code_offset = nm._code_offset;
1406 _data_offset = nm._data_offset;
1407 _frame_size = nm._frame_size;
1408
1409 S390_ONLY( _ctable_offset = nm._ctable_offset; )
1410
1411 _header_size = nm._header_size;
1412 _frame_complete_offset = nm._frame_complete_offset;
1413
1414 _kind = nm._kind;
1415
1416 _caller_must_gc_arguments = nm._caller_must_gc_arguments;
1417
1418 #ifndef PRODUCT
1419 _asm_remarks.share(nm._asm_remarks);
1420 _dbg_strings.share(nm._dbg_strings);
1421 #endif
1422
1423 // Allocate memory and copy mutable data to C heap
1424 _mutable_data_size = nm._mutable_data_size;
1425 if (_mutable_data_size > 0) {
1426 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
1427 if (_mutable_data == nullptr) {
1428 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for mutable data");
1429 }
1430 memcpy(mutable_data_begin(), nm.mutable_data_begin(), nm.mutable_data_size());
1431 } else {
1432 _mutable_data = nullptr;
1433 }
1434
1435 _deoptimization_generation = 0;
1436 _gc_epoch = CodeCache::gc_epoch();
1437 _method = nm._method;
1438 _osr_link = nullptr;
1439
1440 _exception_cache = nullptr;
1441 _gc_data = nullptr;
1442 _oops_do_mark_nmethods = nullptr;
1443 _oops_do_mark_link = nullptr;
1444 _compiled_ic_data = nullptr;
1445
1446 if (nm._osr_entry_point != nullptr) {
1447 _osr_entry_point = (nm._osr_entry_point - (address) &nm) + (address) this;
1448 } else {
1449 _osr_entry_point = nullptr;
1450 }
1451
1452 _entry_offset = nm._entry_offset;
1453 _verified_entry_offset = nm._verified_entry_offset;
1454 _entry_bci = nm._entry_bci;
1455 _immutable_data_size = nm._immutable_data_size;
1456
1457 _skipped_instructions_size = nm._skipped_instructions_size;
1458 _stub_offset = nm._stub_offset;
1459 _exception_offset = nm._exception_offset;
1460 _deopt_handler_entry_offset = nm._deopt_handler_entry_offset;
1461 _unwind_handler_offset = nm._unwind_handler_offset;
1462 _num_stack_arg_slots = nm._num_stack_arg_slots;
1463 _oops_size = nm._oops_size;
1464 #if INCLUDE_JVMCI
1465 _metadata_size = nm._metadata_size;
1466 #endif
1467 _nul_chk_table_offset = nm._nul_chk_table_offset;
1468 _handler_table_offset = nm._handler_table_offset;
1469 _scopes_pcs_offset = nm._scopes_pcs_offset;
1470 _scopes_data_offset = nm._scopes_data_offset;
1471 #if INCLUDE_JVMCI
1472 _speculations_offset = nm._speculations_offset;
1473 #endif
1474 _immutable_data_ref_count_offset = nm._immutable_data_ref_count_offset;
1475
1476 // Increment number of references to immutable data to share it between nmethods
1477 if (_immutable_data_size > 0) {
1478 _immutable_data = nm._immutable_data;
1479 inc_immutable_data_ref_count();
1480 } else {
1481 _immutable_data = blob_end();
1482 }
1483
1484 _orig_pc_offset = nm._orig_pc_offset;
1485 _compile_id = nm._compile_id;
1486 _comp_level = nm._comp_level;
1487 _compiler_type = nm._compiler_type;
1488 _is_unloading_state = nm._is_unloading_state;
1489 _state = not_installed;
1490
1491 _has_unsafe_access = nm._has_unsafe_access;
1492 _has_wide_vectors = nm._has_wide_vectors;
1493 _has_monitors = nm._has_monitors;
1494 _has_scoped_access = nm._has_scoped_access;
1495 _has_flushed_dependencies = nm._has_flushed_dependencies;
1496 _is_unlinked = nm._is_unlinked;
1497 _load_reported = nm._load_reported;
1498
1499 _deoptimization_status = nm._deoptimization_status;
1500
1501 if (nm._pc_desc_container != nullptr) {
1502 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1503 } else {
1504 _pc_desc_container = nullptr;
1505 }
1506
1507 // Copy nmethod contents excluding header
1508 // - Constant part (doubles, longs and floats used in nmethod)
1509 // - Code part:
1510 // - Code body
1511 // - Exception handler
1512 // - Stub code
1513 // - OOP table
1514 memcpy(consts_begin(), nm.consts_begin(), nm.data_end() - nm.consts_begin());
1515
1516 // Fix relocation
1517 RelocIterator iter(this);
1518 CodeBuffer src(&nm);
1519 CodeBuffer dst(this);
1520 while (iter.next()) {
1521 #ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
1522 // After an nmethod is moved, some direct call sites may end up out of range.
1523 // CallRelocation::fix_relocation_after_move() assumes the target is always
1524 // reachable and does not check branch range. Calling it without range checks
1525 // could cause us to write an offset too large for the instruction.
1526 //
1527 // If a call site has a trampoline, we skip the normal call relocation. The
1528 // associated trampoline_stub_Relocation will handle the call and the
1529 // trampoline, including range checks and updating the branch as needed.
1530 //
1531 // If no trampoline exists, we can assume the call target is always
1532 // reachable and therefore within direct branch range, so calling
1533 // CallRelocation::fix_relocation_after_move() is safe.
1534 if (iter.reloc()->is_call()) {
1535 address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), this);
1536 if (trampoline != nullptr) {
1537 continue;
1538 }
1539 }
1540 #endif
1541
1542 iter.reloc()->fix_relocation_after_move(&src, &dst);
1543 }
1544
1545 {
1546 MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
1547 clear_inline_caches();
1548 }
1549
1550 post_init();
1551 }
1552
1553 nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
1554 assert(NMethodRelocation, "must enable use of function");
1555
1556 // Locks required to be held by caller to ensure the nmethod
1557 // is not modified or purged from code cache during relocation
1558 assert_lock_strong(CodeCache_lock);
1559 assert_lock_strong(Compile_lock);
1560 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1561
1562 if (!is_relocatable()) {
1563 return nullptr;
1564 }
1565
1566 run_nmethod_entry_barrier();
1567 nmethod* nm_copy = new (size(), code_blob_type) nmethod(*this);
1568
1569 if (nm_copy == nullptr) {
1570 return nullptr;
1571 }
1572
1573 // To make dependency checking during class loading fast, record
1574 // the nmethod dependencies in the classes it is dependent on.
1575 // This allows the dependency checking code to simply walk the
1576 // class hierarchy above the loaded class, checking only nmethods
1577 // which are dependent on those classes. The slow way is to
1578 // check every nmethod for dependencies which makes it linear in
1579 // the number of methods compiled. For applications with a lot
1580 // classes the slow way is too slow.
1581 for (Dependencies::DepStream deps(nm_copy); deps.next(); ) {
1582 if (deps.type() == Dependencies::call_site_target_value) {
1583 // CallSite dependencies are managed on per-CallSite instance basis.
1584 oop call_site = deps.argument_oop(0);
1585 MethodHandles::add_dependent_nmethod(call_site, nm_copy);
1586 } else {
1587 InstanceKlass* ik = deps.context_type();
1588 if (ik == nullptr) {
1589 continue; // ignore things like evol_method
1590 }
1591 // record this nmethod as dependent on this klass
1592 ik->add_dependent_nmethod(nm_copy);
1593 }
1594 }
1595
1596 MutexLocker ml_NMethodState_lock(NMethodState_lock, Mutex::_no_safepoint_check_flag);
1597
1598 // Verify the nm we copied from is still valid
1599 if (!is_marked_for_deoptimization() && is_in_use()) {
1600 assert(method() != nullptr && method()->code() == this, "should be if is in use");
1601
1602 // Attempt to start using the copy
1603 if (nm_copy->make_in_use()) {
1604 ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size());
1605
1606 methodHandle mh(Thread::current(), nm_copy->method());
1607 nm_copy->method()->set_code(mh, nm_copy);
1608
1609 make_not_entrant(InvalidationReason::RELOCATED);
1610
1611 nm_copy->post_compiled_method_load_event();
1612
1613 nm_copy->log_relocated_nmethod(this);
1614
1615 return nm_copy;
1616 }
1617 }
1618
1619 nm_copy->make_not_used();
1620
1621 return nullptr;
1622 }
1623
1624 bool nmethod::is_relocatable() {
1625 if (!is_java_method()) {
1626 return false;
1627 }
1628
1629 if (!is_in_use()) {
1630 return false;
1631 }
1632
1633 if (is_osr_method()) {
1634 return false;
1635 }
1636
1637 if (is_marked_for_deoptimization()) {
1638 return false;
1639 }
1640
1641 #if INCLUDE_JVMCI
1642 if (jvmci_nmethod_data() != nullptr && jvmci_nmethod_data()->has_mirror()) {
1643 return false;
1644 }
1645 #endif
1646
1647 if (is_unloading()) {
1648 return false;
1649 }
1650
1651 if (has_evol_metadata()) {
1652 return false;
1653 }
1654
1655 return true;
1656 }
1657
1658 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1659 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1660 }
1661
1662 void* nmethod::operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw () {
1663 return CodeCache::allocate(nmethod_size, code_blob_type);
1664 }
1665
1666 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1667 // Try MethodNonProfiled and MethodProfiled.
1668 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1669 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1670 // Try NonNMethod or give up.
1671 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1672 }
1673
1674 // For normal JIT compiled code
1675 nmethod::nmethod(
1676 Method* method,
1677 CompilerType type,
1678 int nmethod_size,
1679 int immutable_data_size,
1680 int mutable_data_size,
1681 int compile_id,
1682 int entry_bci,
1683 address immutable_data,
1684 CodeOffsets* offsets,
1685 int orig_pc_offset,
1686 DebugInformationRecorder* debug_info,
1687 Dependencies* dependencies,
1688 CodeBuffer *code_buffer,
1689 int frame_size,
1690 OopMapSet* oop_maps,
1691 ExceptionHandlerTable* handler_table,
1692 ImplicitExceptionTable* nul_chk_table,
1693 AbstractCompiler* compiler,
1694 CompLevel comp_level
1695 #if INCLUDE_JVMCI
1696 , char* speculations,
1697 int speculations_len,
1698 JVMCINMethodData* jvmci_data
1699 #endif
1700 )
1701 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1702 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1703 _deoptimization_generation(0),
1704 _gc_epoch(CodeCache::gc_epoch()),
1705 _method(method),
1706 _osr_link(nullptr)
1707 {
1708 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1709 {
1710 DEBUG_ONLY(NoSafepointVerifier nsv;)
1711 assert_locked_or_safepoint(CodeCache_lock);
1712
1713 init_defaults(code_buffer, offsets);
1714
1715 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1716 _entry_bci = entry_bci;
1717 _compile_id = compile_id;
1718 _comp_level = comp_level;
1719 _compiler_type = type;
1720 _orig_pc_offset = orig_pc_offset;
1721
1722 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1723
1724 set_ctable_begin(header_begin() + content_offset());
1725
1726 #if INCLUDE_JVMCI
1727 if (compiler->is_jvmci()) {
1728 // JVMCI might not produce any stub sections
1729 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1730 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1731 } else {
1732 _exception_offset = -1;
1733 }
1734 if (offsets->value(CodeOffsets::Deopt) != -1) {
1735 _deopt_handler_entry_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
1736 } else {
1737 _deopt_handler_entry_offset = -1;
1738 }
1739 } else
1740 #endif
1741 {
1742 // Exception handler and deopt handler are in the stub section
1743 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
1744
1745 bool has_exception_handler = (offsets->value(CodeOffsets::Exceptions) != -1);
1746 assert(has_exception_handler == (compiler->type() != compiler_c2),
1747 "C2 compiler doesn't provide exception handler stub code.");
1748 if (has_exception_handler) {
1749 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1750 } else {
1751 _exception_offset = -1;
1752 }
1753
1754 _deopt_handler_entry_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
1755 }
1756 if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1757 // C1 generates UnwindHandler at the end of instructions section.
1758 // Calculate positive offset as distance between the start of stubs section
1759 // (which is also the end of instructions section) and the start of the handler.
1760 int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1761 CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1762 } else {
1763 _unwind_handler_offset = -1;
1764 }
1765
1766 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1767 uint16_t metadata_size;
1768 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1769 JVMCI_ONLY( _metadata_size = metadata_size; )
1770 int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
1771 _inline_entry_point = code_begin() + offsets->value(CodeOffsets::Inline_Entry);
1772 _verified_inline_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Inline_Entry);
1773 _verified_inline_ro_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Inline_Entry_RO);
1774
1775 assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,
1776 "wrong mutable data size: %d != %d + %d + %d",
1777 _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size);
1778 assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d",
1779 nmethod_size, (int)(code_end() - header_begin()));
1780
1781 _immutable_data_size = immutable_data_size;
1782 if (immutable_data_size > 0) {
1783 assert(immutable_data != nullptr, "required");
1784 _immutable_data = immutable_data;
1785 } else {
1786 // We need unique not null address
1787 _immutable_data = blob_end();
1788 }
1789 CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1790 CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1791 _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1792 _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
1793
1794 #if INCLUDE_JVMCI
1795 _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1796 _immutable_data_ref_count_offset = _speculations_offset + align_up(speculations_len, oopSize);
1797 #else
1798 _immutable_data_ref_count_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1799 #endif
1800 DEBUG_ONLY( int immutable_data_end_offset = _immutable_data_ref_count_offset + ImmutableDataRefCountSize; )
1801 assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1802 immutable_data_end_offset, immutable_data_size);
1803
1804 // Copy code and relocation info
1805 code_buffer->copy_code_and_locs_to(this);
1806 // Copy oops and metadata
1807 code_buffer->copy_values_to(this);
1808 dependencies->copy_to(this);
1809 // Copy PcDesc and ScopeDesc data
1810 debug_info->copy_to(this);
1811
1812 // Create cache after PcDesc data is copied - it will be used to initialize cache
1813 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1814
1815 #if INCLUDE_JVMCI
1816 if (compiler->is_jvmci()) {
1817 // Initialize the JVMCINMethodData object inlined into nm
1818 jvmci_nmethod_data()->copy(jvmci_data);
1819 }
1820 #endif
1821
1822 // Copy contents of ExceptionHandlerTable to nmethod
1823 handler_table->copy_to(this);
1824 nul_chk_table->copy_to(this);
1825
1826 #if INCLUDE_JVMCI
1827 // Copy speculations to nmethod
1828 if (speculations_size() != 0) {
1829 memcpy(speculations_begin(), speculations, speculations_len);
1830 }
1831 #endif
1832 init_immutable_data_ref_count();
1833
1834 post_init();
1835
1836 // we use the information of entry points to find out if a method is
1837 // static or non static
1838 assert(compiler->is_c2() || compiler->is_jvmci() ||
1839 _method->is_static() == (entry_point() == verified_entry_point()),
1840 " entry points must be same for static methods and vice versa");
1841 }
1842 }
1843
1844 // Print a short set of xml attributes to identify this nmethod. The
1845 // output should be embedded in some other element.
1846 void nmethod::log_identity(xmlStream* log) const {
1847 log->print(" compile_id='%d'", compile_id());
1848 const char* nm_kind = compile_kind();
1849 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1850 log->print(" compiler='%s'", compiler_name());
1851 if (TieredCompilation) {
1852 log->print(" level='%d'", comp_level());
1853 }
1854 #if INCLUDE_JVMCI
1855 if (jvmci_nmethod_data() != nullptr) {
1856 const char* jvmci_name = jvmci_nmethod_data()->name();
1857 if (jvmci_name != nullptr) {
1858 log->print(" jvmci_mirror_name='");
1859 log->text("%s", jvmci_name);
1860 log->print("'");
1861 }
1862 }
1863 #endif
1864 }
1865
1866
1867 #define LOG_OFFSET(log, name) \
1868 if (p2i(name##_end()) - p2i(name##_begin())) \
1869 log->print(" " XSTR(name) "_offset='%zd'" , \
1870 p2i(name##_begin()) - p2i(this))
1871
1872
1873 void nmethod::log_new_nmethod() const {
1874 if (LogCompilation && xtty != nullptr) {
1875 ttyLocker ttyl;
1876 xtty->begin_elem("nmethod");
1877 log_identity(xtty);
1878 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1879 xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1880
1881 LOG_OFFSET(xtty, relocation);
1882 LOG_OFFSET(xtty, consts);
1883 LOG_OFFSET(xtty, insts);
1884 LOG_OFFSET(xtty, stub);
1885 LOG_OFFSET(xtty, scopes_data);
1886 LOG_OFFSET(xtty, scopes_pcs);
1887 LOG_OFFSET(xtty, dependencies);
1888 LOG_OFFSET(xtty, handler_table);
1889 LOG_OFFSET(xtty, nul_chk_table);
1890 LOG_OFFSET(xtty, oops);
1891 LOG_OFFSET(xtty, metadata);
1892
1893 xtty->method(method());
1894 xtty->stamp();
1895 xtty->end_elem();
1896 }
1897 }
1898
1899
1900 void nmethod::log_relocated_nmethod(nmethod* original) const {
1901 if (LogCompilation && xtty != nullptr) {
1902 ttyLocker ttyl;
1903 xtty->begin_elem("relocated nmethod");
1904 log_identity(xtty);
1905 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1906
1907 const char* original_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(original));
1908 xtty->print(" original_address='" INTPTR_FORMAT "'", p2i(original));
1909 xtty->print(" original_code_heap='%s'", original_code_heap_name);
1910
1911 const char* new_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(this));
1912 xtty->print(" new_address='" INTPTR_FORMAT "'", p2i(this));
1913 xtty->print(" new_code_heap='%s'", new_code_heap_name);
1914
1915 LOG_OFFSET(xtty, relocation);
1916 LOG_OFFSET(xtty, consts);
1917 LOG_OFFSET(xtty, insts);
1918 LOG_OFFSET(xtty, stub);
1919 LOG_OFFSET(xtty, scopes_data);
1920 LOG_OFFSET(xtty, scopes_pcs);
1921 LOG_OFFSET(xtty, dependencies);
1922 LOG_OFFSET(xtty, handler_table);
1923 LOG_OFFSET(xtty, nul_chk_table);
1924 LOG_OFFSET(xtty, oops);
1925 LOG_OFFSET(xtty, metadata);
1926
1927 xtty->method(method());
1928 xtty->stamp();
1929 xtty->end_elem();
1930 }
1931 }
1932
1933 #undef LOG_OFFSET
1934
1935
1936 // Print out more verbose output usually for a newly created nmethod.
1937 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const {
1938 if (st != nullptr) {
1939 ttyLocker ttyl;
1940 if (WizardMode) {
1941 CompileTask::print(st, this, msg, /*short_form:*/ true);
1942 st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
1943 } else {
1944 CompileTask::print(st, this, msg, /*short_form:*/ false);
1945 }
1946 }
1947 }
1948
1949 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
1950 bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
1951 if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
1952 print_nmethod(printnmethods);
1953 }
1954 }
1955
1956 void nmethod::print_nmethod(bool printmethod) {
1957 ttyLocker ttyl; // keep the following output all in one block
1958 if (xtty != nullptr) {
1959 xtty->begin_head("print_nmethod");
1960 log_identity(xtty);
1961 xtty->stamp();
1962 xtty->end_head();
1963 }
1964 // Print the header part, then print the requested information.
1965 // This is both handled in decode2().
1966 if (printmethod) {
1967 ResourceMark m;
1968 if (is_compiled_by_c1()) {
1969 tty->cr();
1970 tty->print_cr("============================= C1-compiled nmethod ==============================");
1971 }
1972 if (is_compiled_by_jvmci()) {
1973 tty->cr();
1974 tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
1975 }
1976 tty->print_cr("----------------------------------- Assembly -----------------------------------");
1977 decode2(tty);
1978 #if defined(SUPPORT_DATA_STRUCTS)
1979 if (AbstractDisassembler::show_structs()) {
1980 // Print the oops from the underlying CodeBlob as well.
1981 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1982 print_oops(tty);
1983 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1984 print_metadata(tty);
1985 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1986 print_pcs_on(tty);
1987 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1988 if (oop_maps() != nullptr) {
1989 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1990 oop_maps()->print_on(tty);
1991 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1992 }
1993 }
1994 #endif
1995 } else {
1996 print(); // print the header part only.
1997 }
1998
1999 #if defined(SUPPORT_DATA_STRUCTS)
2000 if (AbstractDisassembler::show_structs()) {
2001 methodHandle mh(Thread::current(), _method);
2002 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
2003 print_scopes();
2004 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2005 }
2006 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
2007 print_relocations();
2008 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2009 }
2010 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
2011 print_dependencies_on(tty);
2012 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2013 }
2014 if (printmethod || PrintExceptionHandlers) {
2015 print_handler_table();
2016 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2017 print_nul_chk_table();
2018 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2019 }
2020
2021 if (printmethod) {
2022 print_recorded_oops();
2023 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2024 print_recorded_metadata();
2025 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2026 }
2027 }
2028 #endif
2029
2030 if (xtty != nullptr) {
2031 xtty->tail("print_nmethod");
2032 }
2033 }
2034
2035
2036 // Promote one word from an assembly-time handle to a live embedded oop.
2037 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
2038 if (handle == nullptr ||
2039 // As a special case, IC oops are initialized to 1 or -1.
2040 handle == (jobject) Universe::non_oop_word()) {
2041 *(void**)dest = handle;
2042 } else {
2043 *dest = JNIHandles::resolve_non_null(handle);
2044 }
2045 }
2046
2047
2048 // Have to have the same name because it's called by a template
2049 void nmethod::copy_values(GrowableArray<jobject>* array) {
2050 int length = array->length();
2051 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
2052 oop* dest = oops_begin();
2053 for (int index = 0 ; index < length; index++) {
2054 initialize_immediate_oop(&dest[index], array->at(index));
2055 }
2056
2057 // Now we can fix up all the oops in the code. We need to do this
2058 // in the code because the assembler uses jobjects as placeholders.
2059 // The code and relocations have already been initialized by the
2060 // CodeBlob constructor, so it is valid even at this early point to
2061 // iterate over relocations and patch the code.
2062 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
2063 }
2064
2065 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
2066 int length = array->length();
2067 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
2068 Metadata** dest = metadata_begin();
2069 for (int index = 0 ; index < length; index++) {
2070 dest[index] = array->at(index);
2071 }
2072 }
2073
2074 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
2075 // re-patch all oop-bearing instructions, just in case some oops moved
2076 RelocIterator iter(this, begin, end);
2077 while (iter.next()) {
2078 if (iter.type() == relocInfo::oop_type) {
2079 oop_Relocation* reloc = iter.oop_reloc();
2080 if (initialize_immediates && reloc->oop_is_immediate()) {
2081 oop* dest = reloc->oop_addr();
2082 jobject obj = *reinterpret_cast<jobject*>(dest);
2083 initialize_immediate_oop(dest, obj);
2084 }
2085 // Refresh the oop-related bits of this instruction.
2086 reloc->fix_oop_relocation();
2087 } else if (iter.type() == relocInfo::metadata_type) {
2088 metadata_Relocation* reloc = iter.metadata_reloc();
2089 reloc->fix_metadata_relocation();
2090 }
2091 }
2092 }
2093
2094 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
2095 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
2096 intptr_t cbaddr = (intptr_t) nm;
2097 intptr_t offset = ((intptr_t) pc) - cbaddr;
2098
2099 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
2100 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
2101 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
2102 } else if (!nop->patch(oopmap_slot, offset)) {
2103 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
2104 }
2105 }
2106
2107 void nmethod::finalize_relocations() {
2108 NoSafepointVerifier nsv;
2109
2110 GrowableArray<NativeMovConstReg*> virtual_call_data;
2111
2112 // Make sure that post call nops fill in nmethod offsets eagerly so
2113 // we don't have to race with deoptimization
2114 RelocIterator iter(this);
2115 while (iter.next()) {
2116 if (iter.type() == relocInfo::virtual_call_type) {
2117 virtual_call_Relocation* r = iter.virtual_call_reloc();
2118 NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
2119 virtual_call_data.append(value);
2120 } else if (iter.type() == relocInfo::post_call_nop_type) {
2121 post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
2122 address pc = reloc->addr();
2123 install_post_call_nop_displacement(this, pc);
2124 }
2125 }
2126
2127 if (virtual_call_data.length() > 0) {
2128 // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
2129 _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
2130 CompiledICData* next_data = _compiled_ic_data;
2131
2132 for (NativeMovConstReg* value : virtual_call_data) {
2133 value->set_data((intptr_t)next_data);
2134 next_data++;
2135 }
2136 }
2137 }
2138
2139 void nmethod::make_deoptimized() {
2140 if (!Continuations::enabled()) {
2141 // Don't deopt this again.
2142 set_deoptimized_done();
2143 return;
2144 }
2145
2146 assert(method() == nullptr || can_be_deoptimized(), "");
2147
2148 CompiledICLocker ml(this);
2149 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2150
2151 // If post call nops have been already patched, we can just bail-out.
2152 if (has_been_deoptimized()) {
2153 return;
2154 }
2155
2156 ResourceMark rm;
2157 RelocIterator iter(this, oops_reloc_begin());
2158
2159 while (iter.next()) {
2160
2161 switch (iter.type()) {
2162 case relocInfo::virtual_call_type: {
2163 CompiledIC *ic = CompiledIC_at(&iter);
2164 address pc = ic->end_of_call();
2165 NativePostCallNop* nop = nativePostCallNop_at(pc);
2166 if (nop != nullptr) {
2167 nop->make_deopt();
2168 }
2169 assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2170 break;
2171 }
2172 case relocInfo::static_call_type:
2173 case relocInfo::opt_virtual_call_type: {
2174 CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
2175 address pc = csc->end_of_call();
2176 NativePostCallNop* nop = nativePostCallNop_at(pc);
2177 //tty->print_cr(" - static pc %p", pc);
2178 if (nop != nullptr) {
2179 nop->make_deopt();
2180 }
2181 // We can't assert here, there are some calls to stubs / runtime
2182 // that have reloc data and doesn't have a post call NOP.
2183 //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2184 break;
2185 }
2186 default:
2187 break;
2188 }
2189 }
2190 // Don't deopt this again.
2191 set_deoptimized_done();
2192 }
2193
2194 void nmethod::verify_clean_inline_caches() {
2195 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2196
2197 ResourceMark rm;
2198 RelocIterator iter(this, oops_reloc_begin());
2199 while(iter.next()) {
2200 switch(iter.type()) {
2201 case relocInfo::virtual_call_type: {
2202 CompiledIC *ic = CompiledIC_at(&iter);
2203 CodeBlob *cb = CodeCache::find_blob(ic->destination());
2204 assert(cb != nullptr, "destination not in CodeBlob?");
2205 nmethod* nm = cb->as_nmethod_or_null();
2206 if (nm != nullptr) {
2207 // Verify that inline caches pointing to bad nmethods are clean
2208 if (!nm->is_in_use() || nm->is_unloading()) {
2209 assert(ic->is_clean(), "IC should be clean");
2210 }
2211 }
2212 break;
2213 }
2214 case relocInfo::static_call_type:
2215 case relocInfo::opt_virtual_call_type: {
2216 CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
2217 CodeBlob *cb = CodeCache::find_blob(cdc->destination());
2218 assert(cb != nullptr, "destination not in CodeBlob?");
2219 nmethod* nm = cb->as_nmethod_or_null();
2220 if (nm != nullptr) {
2221 // Verify that inline caches pointing to bad nmethods are clean
2222 if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
2223 assert(cdc->is_clean(), "IC should be clean");
2224 }
2225 }
2226 break;
2227 }
2228 default:
2229 break;
2230 }
2231 }
2232 }
2233
2234 void nmethod::mark_as_maybe_on_stack() {
2235 AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch());
2236 }
2237
2238 bool nmethod::is_maybe_on_stack() {
2239 // If the condition below is true, it means that the nmethod was found to
2240 // be alive the previous completed marking cycle.
2241 return AtomicAccess::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
2242 }
2243
2244 void nmethod::inc_decompile_count() {
2245 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
2246 // Could be gated by ProfileTraps, but do not bother...
2247 #if INCLUDE_JVMCI
2248 if (jvmci_skip_profile_deopt()) {
2249 return;
2250 }
2251 #endif
2252 Method* m = method();
2253 if (m == nullptr) return;
2254 MethodData* mdo = m->method_data();
2255 if (mdo == nullptr) return;
2256 // There is a benign race here. See comments in methodData.hpp.
2257 mdo->inc_decompile_count();
2258 }
2259
2260 bool nmethod::try_transition(signed char new_state_int) {
2261 signed char new_state = new_state_int;
2262 assert_lock_strong(NMethodState_lock);
2263 signed char old_state = _state;
2264 if (old_state >= new_state) {
2265 // Ensure monotonicity of transitions.
2266 return false;
2267 }
2268 AtomicAccess::store(&_state, new_state);
2269 return true;
2270 }
2271
2272 void nmethod::invalidate_osr_method() {
2273 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
2274 // Remove from list of active nmethods
2275 if (method() != nullptr) {
2276 method()->method_holder()->remove_osr_nmethod(this);
2277 }
2278 }
2279
2280 void nmethod::log_state_change(InvalidationReason invalidation_reason) const {
2281 if (LogCompilation) {
2282 if (xtty != nullptr) {
2283 ttyLocker ttyl; // keep the following output all in one block
2284 xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
2285 os::current_thread_id(), invalidation_reason_to_string(invalidation_reason));
2286 log_identity(xtty);
2287 xtty->stamp();
2288 xtty->end_elem();
2289 }
2290 }
2291
2292 ResourceMark rm;
2293 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
2294 ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason));
2295
2296 CompileTask::print_ul(this, ss.freeze());
2297 if (PrintCompilation) {
2298 print_on_with_msg(tty, ss.freeze());
2299 }
2300 }
2301
2302 void nmethod::unlink_from_method() {
2303 if (method() != nullptr) {
2304 method()->unlink_code(this);
2305 }
2306 }
2307
2308 // Invalidate code
2309 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {
2310 // This can be called while the system is already at a safepoint which is ok
2311 NoSafepointVerifier nsv;
2312
2313 if (is_unloading()) {
2314 // If the nmethod is unloading, then it is already not entrant through
2315 // the nmethod entry barriers. No need to do anything; GC will unload it.
2316 return false;
2317 }
2318
2319 if (AtomicAccess::load(&_state) == not_entrant) {
2320 // Avoid taking the lock if already in required state.
2321 // This is safe from races because the state is an end-state,
2322 // which the nmethod cannot back out of once entered.
2323 // No need for fencing either.
2324 return false;
2325 }
2326
2327 {
2328 // Enter critical section. Does not block for safepoint.
2329 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2330
2331 if (AtomicAccess::load(&_state) == not_entrant) {
2332 // another thread already performed this transition so nothing
2333 // to do, but return false to indicate this.
2334 return false;
2335 }
2336
2337 if (is_osr_method()) {
2338 // This logic is equivalent to the logic below for patching the
2339 // verified entry point of regular methods.
2340 // this effectively makes the osr nmethod not entrant
2341 invalidate_osr_method();
2342 } else {
2343 // The caller can be calling the method statically or through an inline
2344 // cache call.
2345 BarrierSet::barrier_set()->barrier_set_nmethod()->make_not_entrant(this);
2346 }
2347
2348 if (update_recompile_counts()) {
2349 // Mark the method as decompiled.
2350 inc_decompile_count();
2351 }
2352
2353 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2354 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2355 // If nmethod entry barriers are not supported, we won't mark
2356 // nmethods as on-stack when they become on-stack. So we
2357 // degrade to a less accurate flushing strategy, for now.
2358 mark_as_maybe_on_stack();
2359 }
2360
2361 // Change state
2362 bool success = try_transition(not_entrant);
2363 assert(success, "Transition can't fail");
2364
2365 // Log the transition once
2366 log_state_change(invalidation_reason);
2367
2368 // Remove nmethod from method.
2369 unlink_from_method();
2370
2371 } // leave critical region under NMethodState_lock
2372
2373 #if INCLUDE_JVMCI
2374 // Invalidate can't occur while holding the NMethodState_lock
2375 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2376 if (nmethod_data != nullptr) {
2377 nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason);
2378 }
2379 #endif
2380
2381 #ifdef ASSERT
2382 if (is_osr_method() && method() != nullptr) {
2383 // Make sure osr nmethod is invalidated, i.e. not on the list
2384 bool found = method()->method_holder()->remove_osr_nmethod(this);
2385 assert(!found, "osr nmethod should have been invalidated");
2386 }
2387 #endif
2388
2389 return true;
2390 }
2391
2392 // For concurrent GCs, there must be a handshake between unlink and flush
2393 void nmethod::unlink() {
2394 if (is_unlinked()) {
2395 // Already unlinked.
2396 return;
2397 }
2398
2399 flush_dependencies();
2400
2401 // unlink_from_method will take the NMethodState_lock.
2402 // In this case we don't strictly need it when unlinking nmethods from
2403 // the Method, because it is only concurrently unlinked by
2404 // the entry barrier, which acquires the per nmethod lock.
2405 unlink_from_method();
2406
2407 if (is_osr_method()) {
2408 invalidate_osr_method();
2409 }
2410
2411 #if INCLUDE_JVMCI
2412 // Clear the link between this nmethod and a HotSpotNmethod mirror
2413 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2414 if (nmethod_data != nullptr) {
2415 nmethod_data->invalidate_nmethod_mirror(this, is_cold() ?
2416 nmethod::InvalidationReason::UNLOADING_COLD :
2417 nmethod::InvalidationReason::UNLOADING);
2418 }
2419 #endif
2420
2421 // Post before flushing as jmethodID is being used
2422 post_compiled_method_unload();
2423
2424 // Register for flushing when it is safe. For concurrent class unloading,
2425 // that would be after the unloading handshake, and for STW class unloading
2426 // that would be when getting back to the VM thread.
2427 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2428 }
2429
2430 void nmethod::purge(bool unregister_nmethod) {
2431
2432 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2433
2434 // completely deallocate this method
2435 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2436
2437 LogTarget(Debug, codecache) lt;
2438 if (lt.is_enabled()) {
2439 ResourceMark rm;
2440 LogStream ls(lt);
2441 const char* method_name = method()->name()->as_C_string();
2442 const size_t codecache_capacity = CodeCache::capacity()/1024;
2443 const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024;
2444 ls.print("Flushing nmethod %6d/" INTPTR_FORMAT ", level=%d, osr=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". "
2445 "Cache capacity: %zuKb, free space: %zuKb. method %s (%s)",
2446 _compile_id, p2i(this), _comp_level, is_osr_method(), is_cold(), _gc_epoch, CodeCache::cold_gc_count(),
2447 codecache_capacity, codecache_free_space, method_name, compiler_name());
2448 }
2449
2450 // We need to deallocate any ExceptionCache data.
2451 // Note that we do not need to grab the nmethod lock for this, it
2452 // better be thread safe if we're disposing of it!
2453 ExceptionCache* ec = exception_cache();
2454 while(ec != nullptr) {
2455 ExceptionCache* next = ec->next();
2456 delete ec;
2457 ec = next;
2458 }
2459 if (_pc_desc_container != nullptr) {
2460 delete _pc_desc_container;
2461 }
2462 delete[] _compiled_ic_data;
2463
2464 if (_immutable_data != blob_end()) {
2465 // Free memory if this was the last nmethod referencing immutable data
2466 if (dec_immutable_data_ref_count() == 0) {
2467 os::free(_immutable_data);
2468 }
2469
2470 _immutable_data = blob_end(); // Valid not null address
2471 }
2472
2473 if (unregister_nmethod) {
2474 Universe::heap()->unregister_nmethod(this);
2475 }
2476 CodeCache::unregister_old_nmethod(this);
2477
2478 JVMCI_ONLY( _metadata_size = 0; )
2479 CodeBlob::purge();
2480 }
2481
2482 oop nmethod::oop_at(int index) const {
2483 if (index == 0) {
2484 return nullptr;
2485 }
2486
2487 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2488 return bs_nm->oop_load_no_keepalive(this, index);
2489 }
2490
2491 oop nmethod::oop_at_phantom(int index) const {
2492 if (index == 0) {
2493 return nullptr;
2494 }
2495
2496 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2497 return bs_nm->oop_load_phantom(this, index);
2498 }
2499
2500 //
2501 // Notify all classes this nmethod is dependent on that it is no
2502 // longer dependent.
2503
2504 void nmethod::flush_dependencies() {
2505 if (!has_flushed_dependencies()) {
2506 set_has_flushed_dependencies(true);
2507 for (Dependencies::DepStream deps(this); deps.next(); ) {
2508 if (deps.type() == Dependencies::call_site_target_value) {
2509 // CallSite dependencies are managed on per-CallSite instance basis.
2510 oop call_site = deps.argument_oop(0);
2511 MethodHandles::clean_dependency_context(call_site);
2512 } else {
2513 InstanceKlass* ik = deps.context_type();
2514 if (ik == nullptr) {
2515 continue; // ignore things like evol_method
2516 }
2517 // During GC liveness of dependee determines class that needs to be updated.
2518 // The GC may clean dependency contexts concurrently and in parallel.
2519 ik->clean_dependency_context();
2520 }
2521 }
2522 }
2523 }
2524
2525 void nmethod::post_compiled_method(CompileTask* task) {
2526 task->mark_success();
2527 task->set_nm_content_size(content_size());
2528 task->set_nm_insts_size(insts_size());
2529 task->set_nm_total_size(total_size());
2530
2531 // JVMTI -- compiled method notification (must be done outside lock)
2532 post_compiled_method_load_event();
2533
2534 if (CompilationLog::log() != nullptr) {
2535 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2536 }
2537
2538 const DirectiveSet* directive = task->directive();
2539 maybe_print_nmethod(directive);
2540 }
2541
2542 #if INCLUDE_CDS
2543 static GrowableArrayCHeap<nmethod*, mtClassShared>* _delayed_compiled_method_load_events = nullptr;
2544
2545 void nmethod::add_delayed_compiled_method_load_event(nmethod* nm) {
2546 precond(CDSConfig::is_using_aot_linked_classes());
2547 precond(!ServiceThread::has_started());
2548
2549 // We are still in single threaded stage of VM bootstrap. No need to lock.
2550 if (_delayed_compiled_method_load_events == nullptr) {
2551 _delayed_compiled_method_load_events = new GrowableArrayCHeap<nmethod*, mtClassShared>();
2552 }
2553 _delayed_compiled_method_load_events->append(nm);
2554 }
2555
2556 void nmethod::post_delayed_compiled_method_load_events() {
2557 precond(ServiceThread::has_started());
2558 if (_delayed_compiled_method_load_events != nullptr) {
2559 for (int i = 0; i < _delayed_compiled_method_load_events->length(); i++) {
2560 nmethod* nm = _delayed_compiled_method_load_events->at(i);
2561 nm->post_compiled_method_load_event();
2562 }
2563 delete _delayed_compiled_method_load_events;
2564 _delayed_compiled_method_load_events = nullptr;
2565 }
2566 }
2567 #endif
2568
2569 // ------------------------------------------------------------------
2570 // post_compiled_method_load_event
2571 // new method for install_code() path
2572 // Transfer information from compilation to jvmti
2573 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2574 #if INCLUDE_CDS
2575 if (!ServiceThread::has_started()) {
2576 // With AOT-linked classes, we could compile wrappers for native methods before the
2577 // ServiceThread has been started, so we must delay the events to be posted later.
2578 assert(state == nullptr, "must be");
2579 add_delayed_compiled_method_load_event(this);
2580 return;
2581 }
2582 #endif
2583
2584 // This is a bad time for a safepoint. We don't want
2585 // this nmethod to get unloaded while we're queueing the event.
2586 NoSafepointVerifier nsv;
2587
2588 Method* m = method();
2589 HOTSPOT_COMPILED_METHOD_LOAD(
2590 (char *) m->klass_name()->bytes(),
2591 m->klass_name()->utf8_length(),
2592 (char *) m->name()->bytes(),
2593 m->name()->utf8_length(),
2594 (char *) m->signature()->bytes(),
2595 m->signature()->utf8_length(),
2596 insts_begin(), insts_size());
2597
2598
2599 if (JvmtiExport::should_post_compiled_method_load()) {
2600 // Only post unload events if load events are found.
2601 set_load_reported();
2602 // If a JavaThread hasn't been passed in, let the Service thread
2603 // (which is a real Java thread) post the event
2604 JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2605 if (state == nullptr) {
2606 // Execute any barrier code for this nmethod as if it's called, since
2607 // keeping it alive looks like stack walking.
2608 run_nmethod_entry_barrier();
2609 ServiceThread::enqueue_deferred_event(&event);
2610 } else {
2611 // This enters the nmethod barrier outside in the caller.
2612 state->enqueue_event(&event);
2613 }
2614 }
2615 }
2616
2617 void nmethod::post_compiled_method_unload() {
2618 assert(_method != nullptr, "just checking");
2619 DTRACE_METHOD_UNLOAD_PROBE(method());
2620
2621 // If a JVMTI agent has enabled the CompiledMethodUnload event then
2622 // post the event. The Method* will not be valid when this is freed.
2623
2624 // Don't bother posting the unload if the load event wasn't posted.
2625 if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2626 JvmtiDeferredEvent event =
2627 JvmtiDeferredEvent::compiled_method_unload_event(
2628 method()->jmethod_id(), insts_begin());
2629 ServiceThread::enqueue_deferred_event(&event);
2630 }
2631 }
2632
2633 // Iterate over metadata calling this function. Used by RedefineClasses
2634 void nmethod::metadata_do(MetadataClosure* f) {
2635 {
2636 // Visit all immediate references that are embedded in the instruction stream.
2637 RelocIterator iter(this, oops_reloc_begin());
2638 while (iter.next()) {
2639 if (iter.type() == relocInfo::metadata_type) {
2640 metadata_Relocation* r = iter.metadata_reloc();
2641 // In this metadata, we must only follow those metadatas directly embedded in
2642 // the code. Other metadatas (oop_index>0) are seen as part of
2643 // the metadata section below.
2644 assert(1 == (r->metadata_is_immediate()) +
2645 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2646 "metadata must be found in exactly one place");
2647 if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2648 Metadata* md = r->metadata_value();
2649 if (md != _method) f->do_metadata(md);
2650 }
2651 } else if (iter.type() == relocInfo::virtual_call_type) {
2652 // Check compiledIC holders associated with this nmethod
2653 ResourceMark rm;
2654 CompiledIC *ic = CompiledIC_at(&iter);
2655 ic->metadata_do(f);
2656 }
2657 }
2658 }
2659
2660 // Visit the metadata section
2661 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2662 if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops
2663 Metadata* md = *p;
2664 f->do_metadata(md);
2665 }
2666
2667 // Visit metadata not embedded in the other places.
2668 if (_method != nullptr) f->do_metadata(_method);
2669 }
2670
2671 // Heuristic for nuking nmethods even though their oops are live.
2672 // Main purpose is to reduce code cache pressure and get rid of
2673 // nmethods that don't seem to be all that relevant any longer.
2674 bool nmethod::is_cold() {
2675 if (!MethodFlushing || is_not_installed()) {
2676 // No heuristic unloading at all
2677 return false;
2678 }
2679
2680 if (!is_maybe_on_stack() && is_not_entrant()) {
2681 // Not entrant nmethods that are not on any stack can just
2682 // be removed
2683 return true;
2684 }
2685
2686 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2687 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2688 // On platforms that don't support nmethod entry barriers, we can't
2689 // trust the temporal aspect of the gc epochs. So we can't detect
2690 // cold nmethods on such platforms.
2691 return false;
2692 }
2693
2694 if (!UseCodeCacheFlushing) {
2695 // Bail out if we don't heuristically remove nmethods
2696 return false;
2697 }
2698
2699 // Other code can be phased out more gradually after N GCs
2700 return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2701 }
2702
2703 // The _is_unloading_state encodes a tuple comprising the unloading cycle
2704 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2705 // This is the bit layout of the _is_unloading_state byte: 00000CCU
2706 // CC refers to the cycle, which has 2 bits, and U refers to the result of
2707 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2708
2709 class IsUnloadingState: public AllStatic {
2710 static const uint8_t _is_unloading_mask = 1;
2711 static const uint8_t _is_unloading_shift = 0;
2712 static const uint8_t _unloading_cycle_mask = 6;
2713 static const uint8_t _unloading_cycle_shift = 1;
2714
2715 static uint8_t set_is_unloading(uint8_t state, bool value) {
2716 state &= (uint8_t)~_is_unloading_mask;
2717 if (value) {
2718 state |= 1 << _is_unloading_shift;
2719 }
2720 assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2721 return state;
2722 }
2723
2724 static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2725 state &= (uint8_t)~_unloading_cycle_mask;
2726 state |= (uint8_t)(value << _unloading_cycle_shift);
2727 assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2728 return state;
2729 }
2730
2731 public:
2732 static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2733 static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2734
2735 static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2736 uint8_t state = 0;
2737 state = set_is_unloading(state, is_unloading);
2738 state = set_unloading_cycle(state, unloading_cycle);
2739 return state;
2740 }
2741 };
2742
2743 bool nmethod::is_unloading() {
2744 uint8_t state = AtomicAccess::load(&_is_unloading_state);
2745 bool state_is_unloading = IsUnloadingState::is_unloading(state);
2746 if (state_is_unloading) {
2747 return true;
2748 }
2749 uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2750 uint8_t current_cycle = CodeCache::unloading_cycle();
2751 if (state_unloading_cycle == current_cycle) {
2752 return false;
2753 }
2754
2755 // The IsUnloadingBehaviour is responsible for calculating if the nmethod
2756 // should be unloaded. This can be either because there is a dead oop,
2757 // or because is_cold() heuristically determines it is time to unload.
2758 state_unloading_cycle = current_cycle;
2759 state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2760 uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2761
2762 // Note that if an nmethod has dead oops, everyone will agree that the
2763 // nmethod is_unloading. However, the is_cold heuristics can yield
2764 // different outcomes, so we guard the computed result with a CAS
2765 // to ensure all threads have a shared view of whether an nmethod
2766 // is_unloading or not.
2767 uint8_t found_state = AtomicAccess::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2768
2769 if (found_state == state) {
2770 // First to change state, we win
2771 return state_is_unloading;
2772 } else {
2773 // State already set, so use it
2774 return IsUnloadingState::is_unloading(found_state);
2775 }
2776 }
2777
2778 void nmethod::clear_unloading_state() {
2779 uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2780 AtomicAccess::store(&_is_unloading_state, state);
2781 }
2782
2783
2784 // This is called at the end of the strong tracing/marking phase of a
2785 // GC to unload an nmethod if it contains otherwise unreachable
2786 // oops or is heuristically found to be not important.
2787 void nmethod::do_unloading(bool unloading_occurred) {
2788 // Make sure the oop's ready to receive visitors
2789 if (is_unloading()) {
2790 unlink();
2791 } else {
2792 unload_nmethod_caches(unloading_occurred);
2793 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2794 if (bs_nm != nullptr) {
2795 bs_nm->disarm(this);
2796 }
2797 }
2798 }
2799
2800 void nmethod::oops_do(OopClosure* f) {
2801 // Prevent extra code cache walk for platforms that don't have immediate oops.
2802 if (relocInfo::mustIterateImmediateOopsInCode()) {
2803 RelocIterator iter(this, oops_reloc_begin());
2804
2805 while (iter.next()) {
2806 if (iter.type() == relocInfo::oop_type ) {
2807 oop_Relocation* r = iter.oop_reloc();
2808 // In this loop, we must only follow those oops directly embedded in
2809 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
2810 assert(1 == (r->oop_is_immediate()) +
2811 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2812 "oop must be found in exactly one place");
2813 if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2814 f->do_oop(r->oop_addr());
2815 }
2816 }
2817 }
2818 }
2819
2820 // Scopes
2821 // This includes oop constants not inlined in the code stream.
2822 for (oop* p = oops_begin(); p < oops_end(); p++) {
2823 if (*p == Universe::non_oop_word()) continue; // skip non-oops
2824 f->do_oop(p);
2825 }
2826 }
2827
2828 void nmethod::follow_nmethod(OopIterateClosure* cl) {
2829 // Process oops in the nmethod
2830 oops_do(cl);
2831
2832 // CodeCache unloading support
2833 mark_as_maybe_on_stack();
2834
2835 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2836 bs_nm->disarm(this);
2837
2838 // There's an assumption made that this function is not used by GCs that
2839 // relocate objects, and therefore we don't call fix_oop_relocations.
2840 }
2841
2842 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2843
2844 void nmethod::oops_do_log_change(const char* state) {
2845 LogTarget(Trace, gc, nmethod) lt;
2846 if (lt.is_enabled()) {
2847 LogStream ls(lt);
2848 CompileTask::print(&ls, this, state, true /* short_form */);
2849 }
2850 }
2851
2852 bool nmethod::oops_do_try_claim() {
2853 if (oops_do_try_claim_weak_request()) {
2854 nmethod* result = oops_do_try_add_to_list_as_weak_done();
2855 assert(result == nullptr, "adding to global list as weak done must always succeed.");
2856 return true;
2857 }
2858 return false;
2859 }
2860
2861 bool nmethod::oops_do_try_claim_weak_request() {
2862 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2863
2864 if ((_oops_do_mark_link == nullptr) &&
2865 (AtomicAccess::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
2866 oops_do_log_change("oops_do, mark weak request");
2867 return true;
2868 }
2869 return false;
2870 }
2871
2872 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
2873 _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
2874 }
2875
2876 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
2877 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2878
2879 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
2880 if (old_next == nullptr) {
2881 oops_do_log_change("oops_do, mark strong done");
2882 }
2883 return old_next;
2884 }
2885
2886 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
2887 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2888 assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
2889
2890 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
2891 if (old_next == next) {
2892 oops_do_log_change("oops_do, mark strong request");
2893 }
2894 return old_next;
2895 }
2896
2897 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
2898 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2899 assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
2900
2901 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
2902 if (old_next == next) {
2903 oops_do_log_change("oops_do, mark weak done -> mark strong done");
2904 return true;
2905 }
2906 return false;
2907 }
2908
2909 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
2910 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2911
2912 assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
2913 extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2914 "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2915
2916 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
2917 // Self-loop if needed.
2918 if (old_head == nullptr) {
2919 old_head = this;
2920 }
2921 // Try to install end of list and weak done tag.
2922 if (AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
2923 oops_do_log_change("oops_do, mark weak done");
2924 return nullptr;
2925 } else {
2926 return old_head;
2927 }
2928 }
2929
2930 void nmethod::oops_do_add_to_list_as_strong_done() {
2931 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2932
2933 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
2934 // Self-loop if needed.
2935 if (old_head == nullptr) {
2936 old_head = this;
2937 }
2938 assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
2939 p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2940
2941 oops_do_set_strong_done(old_head);
2942 }
2943
2944 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
2945 if (!oops_do_try_claim_weak_request()) {
2946 // Failed to claim for weak processing.
2947 oops_do_log_change("oops_do, mark weak request fail");
2948 return;
2949 }
2950
2951 p->do_regular_processing(this);
2952
2953 nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
2954 if (old_head == nullptr) {
2955 return;
2956 }
2957 oops_do_log_change("oops_do, mark weak done fail");
2958 // Adding to global list failed, another thread added a strong request.
2959 assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2960 "must be but is %u", extract_state(_oops_do_mark_link));
2961
2962 oops_do_log_change("oops_do, mark weak request -> mark strong done");
2963
2964 oops_do_set_strong_done(old_head);
2965 // Do missing strong processing.
2966 p->do_remaining_strong_processing(this);
2967 }
2968
2969 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
2970 oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
2971 if (next_raw == nullptr) {
2972 p->do_regular_processing(this);
2973 oops_do_add_to_list_as_strong_done();
2974 return;
2975 }
2976 // Claim failed. Figure out why and handle it.
2977 if (oops_do_has_weak_request(next_raw)) {
2978 oops_do_mark_link* old = next_raw;
2979 // Claim failed because being weak processed (state == "weak request").
2980 // Try to request deferred strong processing.
2981 next_raw = oops_do_try_add_strong_request(old);
2982 if (next_raw == old) {
2983 // Successfully requested deferred strong processing.
2984 return;
2985 }
2986 // Failed because of a concurrent transition. No longer in "weak request" state.
2987 }
2988 if (oops_do_has_any_strong_state(next_raw)) {
2989 // Already claimed for strong processing or requested for such.
2990 return;
2991 }
2992 if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
2993 // Successfully claimed "weak done" as "strong done". Do the missing marking.
2994 p->do_remaining_strong_processing(this);
2995 return;
2996 }
2997 // Claim failed, some other thread got it.
2998 }
2999
3000 void nmethod::oops_do_marking_prologue() {
3001 assert_at_safepoint();
3002
3003 log_trace(gc, nmethod)("oops_do_marking_prologue");
3004 assert(_oops_do_mark_nmethods == nullptr, "must be empty");
3005 }
3006
3007 void nmethod::oops_do_marking_epilogue() {
3008 assert_at_safepoint();
3009
3010 nmethod* next = _oops_do_mark_nmethods;
3011 _oops_do_mark_nmethods = nullptr;
3012 if (next != nullptr) {
3013 nmethod* cur;
3014 do {
3015 cur = next;
3016 next = extract_nmethod(cur->_oops_do_mark_link);
3017 cur->_oops_do_mark_link = nullptr;
3018 DEBUG_ONLY(cur->verify_oop_relocations());
3019
3020 LogTarget(Trace, gc, nmethod) lt;
3021 if (lt.is_enabled()) {
3022 LogStream ls(lt);
3023 CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
3024 }
3025 // End if self-loop has been detected.
3026 } while (cur != next);
3027 }
3028 log_trace(gc, nmethod)("oops_do_marking_epilogue");
3029 }
3030
3031 inline bool includes(void* p, void* from, void* to) {
3032 return from <= p && p < to;
3033 }
3034
3035
3036 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
3037 assert(count >= 2, "must be sentinel values, at least");
3038
3039 #ifdef ASSERT
3040 // must be sorted and unique; we do a binary search in find_pc_desc()
3041 int prev_offset = pcs[0].pc_offset();
3042 assert(prev_offset == PcDesc::lower_offset_limit,
3043 "must start with a sentinel");
3044 for (int i = 1; i < count; i++) {
3045 int this_offset = pcs[i].pc_offset();
3046 assert(this_offset > prev_offset, "offsets must be sorted");
3047 prev_offset = this_offset;
3048 }
3049 assert(prev_offset == PcDesc::upper_offset_limit,
3050 "must end with a sentinel");
3051 #endif //ASSERT
3052
3053 int size = count * sizeof(PcDesc);
3054 assert(scopes_pcs_size() >= size, "oob");
3055 memcpy(scopes_pcs_begin(), pcs, size);
3056
3057 // Adjust the final sentinel downward.
3058 PcDesc* last_pc = &scopes_pcs_begin()[count-1];
3059 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
3060 last_pc->set_pc_offset(content_size() + 1);
3061 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
3062 // Fill any rounding gaps with copies of the last record.
3063 last_pc[1] = last_pc[0];
3064 }
3065 // The following assert could fail if sizeof(PcDesc) is not
3066 // an integral multiple of oopSize (the rounding term).
3067 // If it fails, change the logic to always allocate a multiple
3068 // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
3069 assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
3070 }
3071
3072 void nmethod::copy_scopes_data(u_char* buffer, int size) {
3073 assert(scopes_data_size() >= size, "oob");
3074 memcpy(scopes_data_begin(), buffer, size);
3075 }
3076
3077 #ifdef ASSERT
3078 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
3079 PcDesc* res = nullptr;
3080 assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
3081 "must start with a sentinel");
3082 // lower + 1 to exclude initial sentinel
3083 for (PcDesc* p = lower + 1; p < upper; p++) {
3084 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
3085 if (match_desc(p, pc_offset, approximate)) {
3086 if (res == nullptr) {
3087 res = p;
3088 } else {
3089 res = (PcDesc*) badAddress;
3090 }
3091 }
3092 }
3093 return res;
3094 }
3095 #endif
3096
3097
3098 #ifndef PRODUCT
3099 // Version of method to collect statistic
3100 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
3101 PcDesc* lower, PcDesc* upper) {
3102 ++pc_nmethod_stats.pc_desc_queries;
3103 if (approximate) ++pc_nmethod_stats.pc_desc_approx;
3104
3105 PcDesc* desc = _pc_desc_cache.last_pc_desc();
3106 assert(desc != nullptr, "PcDesc cache should be initialized already");
3107 if (desc->pc_offset() == (pc - code_begin)) {
3108 // Cached value matched
3109 ++pc_nmethod_stats.pc_desc_tests;
3110 ++pc_nmethod_stats.pc_desc_repeats;
3111 return desc;
3112 }
3113 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
3114 }
3115 #endif
3116
3117 // Finds a PcDesc with real-pc equal to "pc"
3118 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
3119 PcDesc* lower_incl, PcDesc* upper_incl) {
3120 if ((pc < code_begin) ||
3121 (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
3122 return nullptr; // PC is wildly out of range
3123 }
3124 int pc_offset = (int) (pc - code_begin);
3125
3126 // Check the PcDesc cache if it contains the desired PcDesc
3127 // (This as an almost 100% hit rate.)
3128 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
3129 if (res != nullptr) {
3130 assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
3131 return res;
3132 }
3133
3134 // Fallback algorithm: quasi-linear search for the PcDesc
3135 // Find the last pc_offset less than the given offset.
3136 // The successor must be the required match, if there is a match at all.
3137 // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
3138 PcDesc* lower = lower_incl; // this is initial sentinel
3139 PcDesc* upper = upper_incl - 1; // exclude final sentinel
3140 if (lower >= upper) return nullptr; // no PcDescs at all
3141
3142 #define assert_LU_OK \
3143 /* invariant on lower..upper during the following search: */ \
3144 assert(lower->pc_offset() < pc_offset, "sanity"); \
3145 assert(upper->pc_offset() >= pc_offset, "sanity")
3146 assert_LU_OK;
3147
3148 // Use the last successful return as a split point.
3149 PcDesc* mid = _pc_desc_cache.last_pc_desc();
3150 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3151 if (mid->pc_offset() < pc_offset) {
3152 lower = mid;
3153 } else {
3154 upper = mid;
3155 }
3156
3157 // Take giant steps at first (4096, then 256, then 16, then 1)
3158 const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
3159 const int RADIX = (1 << LOG2_RADIX);
3160 for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
3161 while ((mid = lower + step) < upper) {
3162 assert_LU_OK;
3163 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3164 if (mid->pc_offset() < pc_offset) {
3165 lower = mid;
3166 } else {
3167 upper = mid;
3168 break;
3169 }
3170 }
3171 assert_LU_OK;
3172 }
3173
3174 // Sneak up on the value with a linear search of length ~16.
3175 while (true) {
3176 assert_LU_OK;
3177 mid = lower + 1;
3178 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3179 if (mid->pc_offset() < pc_offset) {
3180 lower = mid;
3181 } else {
3182 upper = mid;
3183 break;
3184 }
3185 }
3186 #undef assert_LU_OK
3187
3188 if (match_desc(upper, pc_offset, approximate)) {
3189 assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3190 if (!Thread::current_in_asgct()) {
3191 // we don't want to modify the cache if we're in ASGCT
3192 // which is typically called in a signal handler
3193 _pc_desc_cache.add_pc_desc(upper);
3194 }
3195 return upper;
3196 } else {
3197 assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3198 return nullptr;
3199 }
3200 }
3201
3202 bool nmethod::check_dependency_on(DepChange& changes) {
3203 // What has happened:
3204 // 1) a new class dependee has been added
3205 // 2) dependee and all its super classes have been marked
3206 bool found_check = false; // set true if we are upset
3207 for (Dependencies::DepStream deps(this); deps.next(); ) {
3208 // Evaluate only relevant dependencies.
3209 if (deps.spot_check_dependency_at(changes) != nullptr) {
3210 found_check = true;
3211 NOT_DEBUG(break);
3212 }
3213 }
3214 return found_check;
3215 }
3216
3217 // Called from mark_for_deoptimization, when dependee is invalidated.
3218 bool nmethod::is_dependent_on_method(Method* dependee) {
3219 for (Dependencies::DepStream deps(this); deps.next(); ) {
3220 if (Dependencies::has_method_dep(deps.type())) {
3221 Method* method = deps.method_argument(0);
3222 if (method == dependee) return true;
3223 }
3224 }
3225 return false;
3226 }
3227
3228 void nmethod_init() {
3229 // make sure you didn't forget to adjust the filler fields
3230 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
3231 }
3232
3233 // -----------------------------------------------------------------------------
3234 // Verification
3235
3236 class VerifyOopsClosure: public OopClosure {
3237 nmethod* _nm;
3238 bool _ok;
3239 public:
3240 VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
3241 bool ok() { return _ok; }
3242 virtual void do_oop(oop* p) {
3243 if (oopDesc::is_oop_or_null(*p)) return;
3244 // Print diagnostic information before calling print_nmethod().
3245 // Assertions therein might prevent call from returning.
3246 tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
3247 p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
3248 if (_ok) {
3249 _nm->print_nmethod(true);
3250 _ok = false;
3251 }
3252 }
3253 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3254 };
3255
3256 class VerifyMetadataClosure: public MetadataClosure {
3257 public:
3258 void do_metadata(Metadata* md) {
3259 if (md->is_method()) {
3260 Method* method = (Method*)md;
3261 assert(!method->is_old(), "Should not be installing old methods");
3262 }
3263 }
3264 };
3265
3266
3267 void nmethod::verify() {
3268 if (is_not_entrant())
3269 return;
3270
3271 // assert(oopDesc::is_oop(method()), "must be valid");
3272
3273 ResourceMark rm;
3274
3275 if (!CodeCache::contains(this)) {
3276 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
3277 }
3278
3279 if(is_native_method() )
3280 return;
3281
3282 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
3283 if (nm != this) {
3284 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
3285 }
3286
3287 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3288 if (! p->verify(this)) {
3289 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
3290 }
3291 }
3292
3293 #ifdef ASSERT
3294 #if INCLUDE_JVMCI
3295 {
3296 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
3297 ImmutableOopMapSet* oms = oop_maps();
3298 ImplicitExceptionTable implicit_table(this);
3299 for (uint i = 0; i < implicit_table.len(); i++) {
3300 int exec_offset = (int) implicit_table.get_exec_offset(i);
3301 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
3302 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
3303 bool found = false;
3304 for (int i = 0, imax = oms->count(); i < imax; i++) {
3305 if (oms->pair_at(i)->pc_offset() == exec_offset) {
3306 found = true;
3307 break;
3308 }
3309 }
3310 assert(found, "missing oopmap");
3311 }
3312 }
3313 }
3314 #endif
3315 #endif
3316
3317 VerifyOopsClosure voc(this);
3318 oops_do(&voc);
3319 assert(voc.ok(), "embedded oops must be OK");
3320 Universe::heap()->verify_nmethod(this);
3321
3322 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
3323 nm->method()->external_name(), p2i(_oops_do_mark_link));
3324 verify_scopes();
3325
3326 CompiledICLocker nm_verify(this);
3327 VerifyMetadataClosure vmc;
3328 metadata_do(&vmc);
3329 }
3330
3331
3332 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3333
3334 // Verify IC only when nmethod installation is finished.
3335 if (!is_not_installed()) {
3336 if (CompiledICLocker::is_safe(this)) {
3337 if (is_inline_cache) {
3338 CompiledIC_at(this, call_site);
3339 } else {
3340 CompiledDirectCall::at(call_site);
3341 }
3342 } else {
3343 CompiledICLocker ml_verify(this);
3344 if (is_inline_cache) {
3345 CompiledIC_at(this, call_site);
3346 } else {
3347 CompiledDirectCall::at(call_site);
3348 }
3349 }
3350 }
3351
3352 HandleMark hm(Thread::current());
3353
3354 PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
3355 assert(pd != nullptr, "PcDesc must exist");
3356 for (ScopeDesc* sd = new ScopeDesc(this, pd);
3357 !sd->is_top(); sd = sd->sender()) {
3358 sd->verify();
3359 }
3360 }
3361
3362 void nmethod::verify_scopes() {
3363 if( !method() ) return; // Runtime stubs have no scope
3364 if (method()->is_native()) return; // Ignore stub methods.
3365 // iterate through all interrupt point
3366 // and verify the debug information is valid.
3367 RelocIterator iter(this);
3368 while (iter.next()) {
3369 address stub = nullptr;
3370 switch (iter.type()) {
3371 case relocInfo::virtual_call_type:
3372 verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3373 break;
3374 case relocInfo::opt_virtual_call_type:
3375 stub = iter.opt_virtual_call_reloc()->static_stub();
3376 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3377 break;
3378 case relocInfo::static_call_type:
3379 stub = iter.static_call_reloc()->static_stub();
3380 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3381 break;
3382 case relocInfo::runtime_call_type:
3383 case relocInfo::runtime_call_w_cp_type: {
3384 address destination = iter.reloc()->value();
3385 // Right now there is no way to find out which entries support
3386 // an interrupt point. It would be nice if we had this
3387 // information in a table.
3388 break;
3389 }
3390 default:
3391 break;
3392 }
3393 assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3394 }
3395 }
3396
3397
3398 // -----------------------------------------------------------------------------
3399 // Printing operations
3400
3401 void nmethod::print_on_impl(outputStream* st) const {
3402 ResourceMark rm;
3403
3404 st->print("Compiled method ");
3405
3406 if (is_compiled_by_c1()) {
3407 st->print("(c1) ");
3408 } else if (is_compiled_by_c2()) {
3409 st->print("(c2) ");
3410 } else if (is_compiled_by_jvmci()) {
3411 st->print("(JVMCI) ");
3412 } else {
3413 st->print("(n/a) ");
3414 }
3415
3416 print_on_with_msg(st, nullptr);
3417
3418 if (WizardMode) {
3419 st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3420 st->print(" for method " INTPTR_FORMAT , p2i(method()));
3421 st->print(" { ");
3422 st->print_cr("%s ", state());
3423 st->print_cr("}:");
3424 }
3425 if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3426 p2i(this),
3427 p2i(this) + size(),
3428 size());
3429 if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3430 p2i(consts_begin()),
3431 p2i(consts_end()),
3432 consts_size());
3433 if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3434 p2i(insts_begin()),
3435 p2i(insts_end()),
3436 insts_size());
3437 if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3438 p2i(stub_begin()),
3439 p2i(stub_end()),
3440 stub_size());
3441 if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3442 p2i(oops_begin()),
3443 p2i(oops_end()),
3444 oops_size());
3445 if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3446 p2i(mutable_data_begin()),
3447 p2i(mutable_data_end()),
3448 mutable_data_size());
3449 if (relocation_size() > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3450 p2i(relocation_begin()),
3451 p2i(relocation_end()),
3452 relocation_size());
3453 if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3454 p2i(metadata_begin()),
3455 p2i(metadata_end()),
3456 metadata_size());
3457 #if INCLUDE_JVMCI
3458 if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3459 p2i(jvmci_data_begin()),
3460 p2i(jvmci_data_end()),
3461 jvmci_data_size());
3462 #endif
3463 if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3464 p2i(immutable_data_begin()),
3465 p2i(immutable_data_end()),
3466 immutable_data_size());
3467 if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3468 p2i(dependencies_begin()),
3469 p2i(dependencies_end()),
3470 dependencies_size());
3471 if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3472 p2i(nul_chk_table_begin()),
3473 p2i(nul_chk_table_end()),
3474 nul_chk_table_size());
3475 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3476 p2i(handler_table_begin()),
3477 p2i(handler_table_end()),
3478 handler_table_size());
3479 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3480 p2i(scopes_pcs_begin()),
3481 p2i(scopes_pcs_end()),
3482 scopes_pcs_size());
3483 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3484 p2i(scopes_data_begin()),
3485 p2i(scopes_data_end()),
3486 scopes_data_size());
3487 #if INCLUDE_JVMCI
3488 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3489 p2i(speculations_begin()),
3490 p2i(speculations_end()),
3491 speculations_size());
3492 #endif
3493 }
3494
3495 void nmethod::print_code() {
3496 ResourceMark m;
3497 ttyLocker ttyl;
3498 // Call the specialized decode method of this class.
3499 decode(tty);
3500 }
3501
3502 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3503
3504 void nmethod::print_dependencies_on(outputStream* out) {
3505 ResourceMark rm;
3506 stringStream st;
3507 st.print_cr("Dependencies:");
3508 for (Dependencies::DepStream deps(this); deps.next(); ) {
3509 deps.print_dependency(&st);
3510 InstanceKlass* ctxk = deps.context_type();
3511 if (ctxk != nullptr) {
3512 if (ctxk->is_dependent_nmethod(this)) {
3513 st.print_cr(" [nmethod<=klass]%s", ctxk->external_name());
3514 }
3515 }
3516 deps.log_dependency(); // put it into the xml log also
3517 }
3518 out->print_raw(st.as_string());
3519 }
3520 #endif
3521
3522 #if defined(SUPPORT_DATA_STRUCTS)
3523
3524 // Print the oops from the underlying CodeBlob.
3525 void nmethod::print_oops(outputStream* st) {
3526 ResourceMark m;
3527 st->print("Oops:");
3528 if (oops_begin() < oops_end()) {
3529 st->cr();
3530 for (oop* p = oops_begin(); p < oops_end(); p++) {
3531 Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3532 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3533 if (Universe::contains_non_oop_word(p)) {
3534 st->print_cr("NON_OOP");
3535 continue; // skip non-oops
3536 }
3537 if (*p == nullptr) {
3538 st->print_cr("nullptr-oop");
3539 continue; // skip non-oops
3540 }
3541 (*p)->print_value_on(st);
3542 st->cr();
3543 }
3544 } else {
3545 st->print_cr(" <list empty>");
3546 }
3547 }
3548
3549 // Print metadata pool.
3550 void nmethod::print_metadata(outputStream* st) {
3551 ResourceMark m;
3552 st->print("Metadata:");
3553 if (metadata_begin() < metadata_end()) {
3554 st->cr();
3555 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3556 Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3557 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3558 if (*p && *p != Universe::non_oop_word()) {
3559 (*p)->print_value_on(st);
3560 }
3561 st->cr();
3562 }
3563 } else {
3564 st->print_cr(" <list empty>");
3565 }
3566 }
3567
3568 #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3569 void nmethod::print_scopes_on(outputStream* st) {
3570 // Find the first pc desc for all scopes in the code and print it.
3571 ResourceMark rm;
3572 st->print("scopes:");
3573 if (scopes_pcs_begin() < scopes_pcs_end()) {
3574 st->cr();
3575 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3576 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3577 continue;
3578
3579 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3580 while (sd != nullptr) {
3581 sd->print_on(st, p); // print output ends with a newline
3582 sd = sd->sender();
3583 }
3584 }
3585 } else {
3586 st->print_cr(" <list empty>");
3587 }
3588 }
3589 #endif
3590
3591 #ifndef PRODUCT // RelocIterator does support printing only then.
3592 void nmethod::print_relocations() {
3593 ResourceMark m; // in case methods get printed via the debugger
3594 tty->print_cr("relocations:");
3595 RelocIterator iter(this);
3596 iter.print_on(tty);
3597 }
3598 #endif
3599
3600 void nmethod::print_pcs_on(outputStream* st) {
3601 ResourceMark m; // in case methods get printed via debugger
3602 st->print("pc-bytecode offsets:");
3603 if (scopes_pcs_begin() < scopes_pcs_end()) {
3604 st->cr();
3605 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3606 p->print_on(st, this); // print output ends with a newline
3607 }
3608 } else {
3609 st->print_cr(" <list empty>");
3610 }
3611 }
3612
3613 void nmethod::print_handler_table() {
3614 ExceptionHandlerTable(this).print(code_begin());
3615 }
3616
3617 void nmethod::print_nul_chk_table() {
3618 ImplicitExceptionTable(this).print(code_begin());
3619 }
3620
3621 void nmethod::print_recorded_oop(int log_n, int i) {
3622 void* value;
3623
3624 if (i == 0) {
3625 value = nullptr;
3626 } else {
3627 // Be careful around non-oop words. Don't create an oop
3628 // with that value, or it will assert in verification code.
3629 if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3630 value = Universe::non_oop_word();
3631 } else {
3632 value = oop_at(i);
3633 }
3634 }
3635
3636 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3637
3638 if (value == Universe::non_oop_word()) {
3639 tty->print("non-oop word");
3640 } else {
3641 if (value == nullptr) {
3642 tty->print("nullptr-oop");
3643 } else {
3644 oop_at(i)->print_value_on(tty);
3645 }
3646 }
3647
3648 tty->cr();
3649 }
3650
3651 void nmethod::print_recorded_oops() {
3652 const int n = oops_count();
3653 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3654 tty->print("Recorded oops:");
3655 if (n > 0) {
3656 tty->cr();
3657 for (int i = 0; i < n; i++) {
3658 print_recorded_oop(log_n, i);
3659 }
3660 } else {
3661 tty->print_cr(" <list empty>");
3662 }
3663 }
3664
3665 void nmethod::print_recorded_metadata() {
3666 const int n = metadata_count();
3667 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3668 tty->print("Recorded metadata:");
3669 if (n > 0) {
3670 tty->cr();
3671 for (int i = 0; i < n; i++) {
3672 Metadata* m = metadata_at(i);
3673 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3674 if (m == (Metadata*)Universe::non_oop_word()) {
3675 tty->print("non-metadata word");
3676 } else if (m == nullptr) {
3677 tty->print("nullptr-oop");
3678 } else {
3679 Metadata::print_value_on_maybe_null(tty, m);
3680 }
3681 tty->cr();
3682 }
3683 } else {
3684 tty->print_cr(" <list empty>");
3685 }
3686 }
3687 #endif
3688
3689 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3690
3691 void nmethod::print_constant_pool(outputStream* st) {
3692 //-----------------------------------
3693 //---< Print the constant pool >---
3694 //-----------------------------------
3695 int consts_size = this->consts_size();
3696 if ( consts_size > 0 ) {
3697 unsigned char* cstart = this->consts_begin();
3698 unsigned char* cp = cstart;
3699 unsigned char* cend = cp + consts_size;
3700 unsigned int bytes_per_line = 4;
3701 unsigned int CP_alignment = 8;
3702 unsigned int n;
3703
3704 st->cr();
3705
3706 //---< print CP header to make clear what's printed >---
3707 if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3708 n = bytes_per_line;
3709 st->print_cr("[Constant Pool]");
3710 Disassembler::print_location(cp, cstart, cend, st, true, true);
3711 Disassembler::print_hexdata(cp, n, st, true);
3712 st->cr();
3713 } else {
3714 n = (int)((uintptr_t)cp & (bytes_per_line-1));
3715 st->print_cr("[Constant Pool (unaligned)]");
3716 }
3717
3718 //---< print CP contents, bytes_per_line at a time >---
3719 while (cp < cend) {
3720 Disassembler::print_location(cp, cstart, cend, st, true, false);
3721 Disassembler::print_hexdata(cp, n, st, false);
3722 cp += n;
3723 n = bytes_per_line;
3724 st->cr();
3725 }
3726
3727 //---< Show potential alignment gap between constant pool and code >---
3728 cend = code_begin();
3729 if( cp < cend ) {
3730 n = 4;
3731 st->print_cr("[Code entry alignment]");
3732 while (cp < cend) {
3733 Disassembler::print_location(cp, cstart, cend, st, false, false);
3734 cp += n;
3735 st->cr();
3736 }
3737 }
3738 } else {
3739 st->print_cr("[Constant Pool (empty)]");
3740 }
3741 st->cr();
3742 }
3743
3744 #endif
3745
3746 // Disassemble this nmethod.
3747 // Print additional debug information, if requested. This could be code
3748 // comments, block comments, profiling counters, etc.
3749 // The undisassembled format is useful no disassembler library is available.
3750 // The resulting hex dump (with markers) can be disassembled later, or on
3751 // another system, when/where a disassembler library is available.
3752 void nmethod::decode2(outputStream* ost) const {
3753
3754 // Called from frame::back_trace_with_decode without ResourceMark.
3755 ResourceMark rm;
3756
3757 // Make sure we have a valid stream to print on.
3758 outputStream* st = ost ? ost : tty;
3759
3760 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3761 const bool use_compressed_format = true;
3762 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3763 AbstractDisassembler::show_block_comment());
3764 #else
3765 const bool use_compressed_format = Disassembler::is_abstract();
3766 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3767 AbstractDisassembler::show_block_comment());
3768 #endif
3769
3770 st->cr();
3771 this->print_on(st);
3772 st->cr();
3773
3774 #if defined(SUPPORT_ASSEMBLY)
3775 //----------------------------------
3776 //---< Print real disassembly >---
3777 //----------------------------------
3778 if (! use_compressed_format) {
3779 st->print_cr("[Disassembly]");
3780 Disassembler::decode(const_cast<nmethod*>(this), st);
3781 st->bol();
3782 st->print_cr("[/Disassembly]");
3783 return;
3784 }
3785 #endif
3786
3787 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3788
3789 // Compressed undisassembled disassembly format.
3790 // The following status values are defined/supported:
3791 // = 0 - currently at bol() position, nothing printed yet on current line.
3792 // = 1 - currently at position after print_location().
3793 // > 1 - in the midst of printing instruction stream bytes.
3794 int compressed_format_idx = 0;
3795 int code_comment_column = 0;
3796 const int instr_maxlen = Assembler::instr_maxlen();
3797 const uint tabspacing = 8;
3798 unsigned char* start = this->code_begin();
3799 unsigned char* p = this->code_begin();
3800 unsigned char* end = this->code_end();
3801 unsigned char* pss = p; // start of a code section (used for offsets)
3802
3803 if ((start == nullptr) || (end == nullptr)) {
3804 st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3805 return;
3806 }
3807 #endif
3808
3809 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3810 //---< plain abstract disassembly, no comments or anything, just section headers >---
3811 if (use_compressed_format && ! compressed_with_comments) {
3812 const_cast<nmethod*>(this)->print_constant_pool(st);
3813
3814 st->bol();
3815 st->cr();
3816 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3817 //---< Open the output (Marker for post-mortem disassembler) >---
3818 st->print_cr("[MachCode]");
3819 const char* header = nullptr;
3820 address p0 = p;
3821 while (p < end) {
3822 address pp = p;
3823 while ((p < end) && (header == nullptr)) {
3824 header = nmethod_section_label(p);
3825 pp = p;
3826 p += Assembler::instr_len(p);
3827 }
3828 if (pp > p0) {
3829 AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3830 p0 = pp;
3831 p = pp;
3832 header = nullptr;
3833 } else if (header != nullptr) {
3834 st->bol();
3835 st->print_cr("%s", header);
3836 header = nullptr;
3837 }
3838 }
3839 //---< Close the output (Marker for post-mortem disassembler) >---
3840 st->bol();
3841 st->print_cr("[/MachCode]");
3842 return;
3843 }
3844 #endif
3845
3846 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3847 //---< abstract disassembly with comments and section headers merged in >---
3848 if (compressed_with_comments) {
3849 const_cast<nmethod*>(this)->print_constant_pool(st);
3850
3851 st->bol();
3852 st->cr();
3853 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3854 //---< Open the output (Marker for post-mortem disassembler) >---
3855 st->print_cr("[MachCode]");
3856 while ((p < end) && (p != nullptr)) {
3857 const int instruction_size_in_bytes = Assembler::instr_len(p);
3858
3859 //---< Block comments for nmethod. Interrupts instruction stream, if any. >---
3860 // Outputs a bol() before and a cr() after, but only if a comment is printed.
3861 // Prints nmethod_section_label as well.
3862 if (AbstractDisassembler::show_block_comment()) {
3863 print_block_comment(st, p);
3864 if (st->position() == 0) {
3865 compressed_format_idx = 0;
3866 }
3867 }
3868
3869 //---< New location information after line break >---
3870 if (compressed_format_idx == 0) {
3871 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3872 compressed_format_idx = 1;
3873 }
3874
3875 //---< Code comment for current instruction. Address range [p..(p+len)) >---
3876 unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
3877 S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
3878
3879 if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
3880 //---< interrupt instruction byte stream for code comment >---
3881 if (compressed_format_idx > 1) {
3882 st->cr(); // interrupt byte stream
3883 st->cr(); // add an empty line
3884 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3885 }
3886 const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
3887 st->bol();
3888 compressed_format_idx = 0;
3889 }
3890
3891 //---< New location information after line break >---
3892 if (compressed_format_idx == 0) {
3893 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3894 compressed_format_idx = 1;
3895 }
3896
3897 //---< Nicely align instructions for readability >---
3898 if (compressed_format_idx > 1) {
3899 Disassembler::print_delimiter(st);
3900 }
3901
3902 //---< Now, finally, print the actual instruction bytes >---
3903 unsigned char* p0 = p;
3904 p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
3905 compressed_format_idx += (int)(p - p0);
3906
3907 if (Disassembler::start_newline(compressed_format_idx-1)) {
3908 st->cr();
3909 compressed_format_idx = 0;
3910 }
3911 }
3912 //---< Close the output (Marker for post-mortem disassembler) >---
3913 st->bol();
3914 st->print_cr("[/MachCode]");
3915 return;
3916 }
3917 #endif
3918 }
3919
3920 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3921
3922 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3923 RelocIterator iter(this, begin, end);
3924 bool have_one = false;
3925 while (iter.next()) {
3926 have_one = true;
3927 switch (iter.type()) {
3928 case relocInfo::none: {
3929 // Skip it and check next
3930 break;
3931 }
3932 case relocInfo::oop_type: {
3933 // Get a non-resizable resource-allocated stringStream.
3934 // Our callees make use of (nested) ResourceMarks.
3935 stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
3936 oop_Relocation* r = iter.oop_reloc();
3937 oop obj = r->oop_value();
3938 st.print("oop(");
3939 if (obj == nullptr) st.print("nullptr");
3940 else obj->print_value_on(&st);
3941 st.print(")");
3942 return st.as_string();
3943 }
3944 case relocInfo::metadata_type: {
3945 stringStream st;
3946 metadata_Relocation* r = iter.metadata_reloc();
3947 Metadata* obj = r->metadata_value();
3948 st.print("metadata(");
3949 if (obj == nullptr) st.print("nullptr");
3950 else obj->print_value_on(&st);
3951 st.print(")");
3952 return st.as_string();
3953 }
3954 case relocInfo::runtime_call_type:
3955 case relocInfo::runtime_call_w_cp_type: {
3956 stringStream st;
3957 st.print("runtime_call");
3958 CallRelocation* r = (CallRelocation*)iter.reloc();
3959 address dest = r->destination();
3960 if (StubRoutines::contains(dest)) {
3961 StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
3962 if (desc == nullptr) {
3963 desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
3964 }
3965 if (desc != nullptr) {
3966 st.print(" Stub::%s", desc->name());
3967 return st.as_string();
3968 }
3969 }
3970 CodeBlob* cb = CodeCache::find_blob(dest);
3971 if (cb != nullptr) {
3972 st.print(" %s", cb->name());
3973 } else {
3974 ResourceMark rm;
3975 const int buflen = 1024;
3976 char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3977 int offset;
3978 if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3979 st.print(" %s", buf);
3980 if (offset != 0) {
3981 st.print("+%d", offset);
3982 }
3983 }
3984 }
3985 return st.as_string();
3986 }
3987 case relocInfo::virtual_call_type: {
3988 stringStream st;
3989 st.print_raw("virtual_call");
3990 virtual_call_Relocation* r = iter.virtual_call_reloc();
3991 Method* m = r->method_value();
3992 if (m != nullptr) {
3993 assert(m->is_method(), "");
3994 m->print_short_name(&st);
3995 }
3996 return st.as_string();
3997 }
3998 case relocInfo::opt_virtual_call_type: {
3999 stringStream st;
4000 st.print_raw("optimized virtual_call");
4001 opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
4002 Method* m = r->method_value();
4003 if (m != nullptr) {
4004 assert(m->is_method(), "");
4005 m->print_short_name(&st);
4006 }
4007 return st.as_string();
4008 }
4009 case relocInfo::static_call_type: {
4010 stringStream st;
4011 st.print_raw("static_call");
4012 static_call_Relocation* r = iter.static_call_reloc();
4013 Method* m = r->method_value();
4014 if (m != nullptr) {
4015 assert(m->is_method(), "");
4016 m->print_short_name(&st);
4017 }
4018 return st.as_string();
4019 }
4020 case relocInfo::static_stub_type: return "static_stub";
4021 case relocInfo::external_word_type: return "external_word";
4022 case relocInfo::internal_word_type: return "internal_word";
4023 case relocInfo::section_word_type: return "section_word";
4024 case relocInfo::poll_type: return "poll";
4025 case relocInfo::poll_return_type: return "poll_return";
4026 case relocInfo::trampoline_stub_type: return "trampoline_stub";
4027 case relocInfo::entry_guard_type: return "entry_guard";
4028 case relocInfo::post_call_nop_type: return "post_call_nop";
4029 case relocInfo::barrier_type: {
4030 barrier_Relocation* const reloc = iter.barrier_reloc();
4031 stringStream st;
4032 st.print("barrier format=%d", reloc->format());
4033 return st.as_string();
4034 }
4035
4036 case relocInfo::type_mask: return "type_bit_mask";
4037
4038 default: {
4039 stringStream st;
4040 st.print("unknown relocInfo=%d", (int) iter.type());
4041 return st.as_string();
4042 }
4043 }
4044 }
4045 return have_one ? "other" : nullptr;
4046 }
4047
4048 // Return the last scope in (begin..end]
4049 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
4050 PcDesc* p = pc_desc_near(begin+1);
4051 if (p != nullptr && p->real_pc(this) <= end) {
4052 return new ScopeDesc(this, p);
4053 }
4054 return nullptr;
4055 }
4056
4057 const char* nmethod::nmethod_section_label(address pos) const {
4058 const char* label = nullptr;
4059 if (pos == code_begin()) label = "[Instructions begin]";
4060 if (pos == entry_point()) label = "[Entry Point]";
4061 if (pos == inline_entry_point()) label = "[Inline Entry Point]";
4062 if (pos == verified_entry_point()) label = "[Verified Entry Point]";
4063 if (pos == verified_inline_entry_point()) label = "[Verified Inline Entry Point]";
4064 if (pos == verified_inline_ro_entry_point()) label = "[Verified Inline Entry Point (RO)]";
4065 if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
4066 // Check stub_code before checking exception_handler or deopt_handler.
4067 if (pos == this->stub_begin()) label = "[Stub Code]";
4068 if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
4069 if (JVMCI_ONLY(_deopt_handler_entry_offset != -1 &&) pos == deopt_handler_entry()) label = "[Deopt Handler Entry Point]";
4070 return label;
4071 }
4072
4073 static int maybe_print_entry_label(outputStream* stream, address pos, address entry, const char* label) {
4074 if (pos == entry) {
4075 stream->bol();
4076 stream->print_cr("%s", label);
4077 return 1;
4078 } else {
4079 return 0;
4080 }
4081 }
4082
4083 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
4084 if (print_section_labels) {
4085 int n = 0;
4086 // Multiple entry points may be at the same position. Print them all.
4087 n += maybe_print_entry_label(stream, block_begin, entry_point(), "[Entry Point]");
4088 n += maybe_print_entry_label(stream, block_begin, inline_entry_point(), "[Inline Entry Point]");
4089 n += maybe_print_entry_label(stream, block_begin, verified_entry_point(), "[Verified Entry Point]");
4090 n += maybe_print_entry_label(stream, block_begin, verified_inline_entry_point(), "[Verified Inline Entry Point]");
4091 n += maybe_print_entry_label(stream, block_begin, verified_inline_ro_entry_point(), "[Verified Inline Entry Point (RO)]");
4092 if (n == 0) {
4093 const char* label = nmethod_section_label(block_begin);
4094 if (label != nullptr) {
4095 stream->bol();
4096 stream->print_cr("%s", label);
4097 }
4098 }
4099 }
4100
4101 Method* m = method();
4102 if (m == nullptr || is_osr_method()) {
4103 return;
4104 }
4105
4106 // Print the name of the method (only once)
4107 address low = MIN3(entry_point(),
4108 verified_entry_point(),
4109 inline_entry_point());
4110 // The verified inline entry point and verified inline RO entry point are not always
4111 // used. When they are unused. CodeOffsets::Verified_Inline_Entry(_RO) is -1. Hence,
4112 // the calculated entry point is smaller than the block they are offsetting into.
4113 if (verified_inline_entry_point() >= block_begin) {
4114 low = MIN2(low, verified_inline_entry_point());
4115 }
4116 if (verified_inline_ro_entry_point() >= block_begin) {
4117 low = MIN2(low, verified_inline_ro_entry_point());
4118 }
4119 assert(low != 0, "sanity");
4120 if (block_begin == low) {
4121 stream->print(" # ");
4122 m->print_value_on(stream);
4123 stream->cr();
4124 }
4125
4126 // Print the arguments for the 3 types of verified entry points
4127 CompiledEntrySignature ces(m);
4128 ces.compute_calling_conventions(false);
4129 const GrowableArray<SigEntry>* sig_cc;
4130 const VMRegPair* regs;
4131 if (block_begin == verified_entry_point()) {
4132 sig_cc = ces.sig_cc();
4133 regs = ces.regs_cc();
4134 } else if (block_begin == verified_inline_entry_point()) {
4135 sig_cc = ces.sig();
4136 regs = ces.regs();
4137 } else if (block_begin == verified_inline_ro_entry_point()) {
4138 sig_cc = ces.sig_cc_ro();
4139 regs = ces.regs_cc_ro();
4140 } else {
4141 return;
4142 }
4143
4144 bool has_this = !m->is_static();
4145 if (ces.has_inline_recv() && block_begin == verified_entry_point()) {
4146 // <this> argument is scalarized for verified_entry_point()
4147 has_this = false;
4148 }
4149 const char* spname = "sp"; // make arch-specific?
4150 int stack_slot_offset = this->frame_size() * wordSize;
4151 int tab1 = 14, tab2 = 24;
4152 int sig_index = 0;
4153 int arg_index = has_this ? -1 : 0;
4154 bool did_old_sp = false;
4155 for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
4156 bool at_this = (arg_index == -1);
4157 bool at_old_sp = false;
4158 BasicType t = (*sig)._bt;
4159 if (at_this) {
4160 stream->print(" # this: ");
4161 } else {
4162 stream->print(" # parm%d: ", arg_index);
4163 }
4164 stream->move_to(tab1);
4165 VMReg fst = regs[sig_index].first();
4166 VMReg snd = regs[sig_index].second();
4167 if (fst->is_reg()) {
4168 stream->print("%s", fst->name());
4169 if (snd->is_valid()) {
4170 stream->print(":%s", snd->name());
4171 }
4172 } else if (fst->is_stack()) {
4173 int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
4174 if (offset == stack_slot_offset) at_old_sp = true;
4175 stream->print("[%s+0x%x]", spname, offset);
4176 } else {
4177 stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
4178 }
4179 stream->print(" ");
4180 stream->move_to(tab2);
4181 stream->print("= ");
4182 if (at_this) {
4183 m->method_holder()->print_value_on(stream);
4184 } else {
4185 bool did_name = false;
4186 if (is_reference_type(t)) {
4187 Symbol* name = (*sig)._name;
4188 name->print_value_on(stream);
4189 did_name = true;
4190 }
4191 if (!did_name)
4192 stream->print("%s", type2name(t));
4193 if ((*sig)._null_marker) {
4194 stream->print(" (null marker)");
4195 }
4196 }
4197 if (at_old_sp) {
4198 stream->print(" (%s of caller)", spname);
4199 did_old_sp = true;
4200 }
4201 stream->cr();
4202 sig_index += type2size[t];
4203 arg_index += 1;
4204 }
4205 if (!did_old_sp) {
4206 stream->print(" # ");
4207 stream->move_to(tab1);
4208 stream->print("[%s+0x%x]", spname, stack_slot_offset);
4209 stream->print(" (%s of caller)", spname);
4210 stream->cr();
4211 }
4212 }
4213
4214 // Returns whether this nmethod has code comments.
4215 bool nmethod::has_code_comment(address begin, address end) {
4216 // scopes?
4217 ScopeDesc* sd = scope_desc_in(begin, end);
4218 if (sd != nullptr) return true;
4219
4220 // relocations?
4221 const char* str = reloc_string_for(begin, end);
4222 if (str != nullptr) return true;
4223
4224 // implicit exceptions?
4225 int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
4226 if (cont_offset != 0) return true;
4227
4228 return false;
4229 }
4230
4231 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
4232 ImplicitExceptionTable implicit_table(this);
4233 int pc_offset = (int)(begin - code_begin());
4234 int cont_offset = implicit_table.continuation_offset(pc_offset);
4235 bool oop_map_required = false;
4236 if (cont_offset != 0) {
4237 st->move_to(column, 6, 0);
4238 if (pc_offset == cont_offset) {
4239 st->print("; implicit exception: deoptimizes");
4240 oop_map_required = true;
4241 } else {
4242 st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
4243 }
4244 }
4245
4246 // Find an oopmap in (begin, end]. We use the odd half-closed
4247 // interval so that oop maps and scope descs which are tied to the
4248 // byte after a call are printed with the call itself. OopMaps
4249 // associated with implicit exceptions are printed with the implicit
4250 // instruction.
4251 address base = code_begin();
4252 ImmutableOopMapSet* oms = oop_maps();
4253 if (oms != nullptr) {
4254 for (int i = 0, imax = oms->count(); i < imax; i++) {
4255 const ImmutableOopMapPair* pair = oms->pair_at(i);
4256 const ImmutableOopMap* om = pair->get_from(oms);
4257 address pc = base + pair->pc_offset();
4258 if (pc >= begin) {
4259 #if INCLUDE_JVMCI
4260 bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
4261 #else
4262 bool is_implicit_deopt = false;
4263 #endif
4264 if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
4265 st->move_to(column, 6, 0);
4266 st->print("; ");
4267 om->print_on(st);
4268 oop_map_required = false;
4269 }
4270 }
4271 if (pc > end) {
4272 break;
4273 }
4274 }
4275 }
4276 assert(!oop_map_required, "missed oopmap");
4277
4278 Thread* thread = Thread::current();
4279
4280 // Print any debug info present at this pc.
4281 ScopeDesc* sd = scope_desc_in(begin, end);
4282 if (sd != nullptr) {
4283 st->move_to(column, 6, 0);
4284 if (sd->bci() == SynchronizationEntryBCI) {
4285 st->print(";*synchronization entry");
4286 } else if (sd->bci() == AfterBci) {
4287 st->print(";* method exit (unlocked if synchronized)");
4288 } else if (sd->bci() == UnwindBci) {
4289 st->print(";* unwind (locked if synchronized)");
4290 } else if (sd->bci() == AfterExceptionBci) {
4291 st->print(";* unwind (unlocked if synchronized)");
4292 } else if (sd->bci() == UnknownBci) {
4293 st->print(";* unknown");
4294 } else if (sd->bci() == InvalidFrameStateBci) {
4295 st->print(";* invalid frame state");
4296 } else {
4297 if (sd->method() == nullptr) {
4298 st->print("method is nullptr");
4299 } else if (sd->method()->is_native()) {
4300 st->print("method is native");
4301 } else {
4302 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
4303 st->print(";*%s", Bytecodes::name(bc));
4304 switch (bc) {
4305 case Bytecodes::_invokevirtual:
4306 case Bytecodes::_invokespecial:
4307 case Bytecodes::_invokestatic:
4308 case Bytecodes::_invokeinterface:
4309 {
4310 Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
4311 st->print(" ");
4312 if (invoke.name() != nullptr)
4313 invoke.name()->print_symbol_on(st);
4314 else
4315 st->print("<UNKNOWN>");
4316 break;
4317 }
4318 case Bytecodes::_getfield:
4319 case Bytecodes::_putfield:
4320 case Bytecodes::_getstatic:
4321 case Bytecodes::_putstatic:
4322 {
4323 Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
4324 st->print(" ");
4325 if (field.name() != nullptr)
4326 field.name()->print_symbol_on(st);
4327 else
4328 st->print("<UNKNOWN>");
4329 }
4330 default:
4331 break;
4332 }
4333 }
4334 st->print(" {reexecute=%d rethrow=%d return_oop=%d return_scalarized=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop(), sd->return_scalarized());
4335 }
4336
4337 // Print all scopes
4338 for (;sd != nullptr; sd = sd->sender()) {
4339 st->move_to(column, 6, 0);
4340 st->print("; -");
4341 if (sd->should_reexecute()) {
4342 st->print(" (reexecute)");
4343 }
4344 if (sd->method() == nullptr) {
4345 st->print("method is nullptr");
4346 } else {
4347 sd->method()->print_short_name(st);
4348 }
4349 int lineno = sd->method()->line_number_from_bci(sd->bci());
4350 if (lineno != -1) {
4351 st->print("@%d (line %d)", sd->bci(), lineno);
4352 } else {
4353 st->print("@%d", sd->bci());
4354 }
4355 st->cr();
4356 }
4357 }
4358
4359 // Print relocation information
4360 // Prevent memory leak: allocating without ResourceMark.
4361 ResourceMark rm;
4362 const char* str = reloc_string_for(begin, end);
4363 if (str != nullptr) {
4364 if (sd != nullptr) st->cr();
4365 st->move_to(column, 6, 0);
4366 st->print("; {%s}", str);
4367 }
4368 }
4369
4370 #endif
4371
4372 address nmethod::call_instruction_address(address pc) const {
4373 if (NativeCall::is_call_before(pc)) {
4374 NativeCall *ncall = nativeCall_before(pc);
4375 return ncall->instruction_address();
4376 }
4377 return nullptr;
4378 }
4379
4380 void nmethod::print_value_on_impl(outputStream* st) const {
4381 st->print_cr("nmethod");
4382 #if defined(SUPPORT_DATA_STRUCTS)
4383 print_on_with_msg(st, nullptr);
4384 #endif
4385 }
4386
4387 void nmethod::print_code_snippet(outputStream* st, address addr) const {
4388 if (entry_point() <= addr && addr < code_end()) {
4389 // Pointing into the nmethod's code. Try to disassemble some instructions around addr.
4390 // Determine conservative start and end points.
4391 address start;
4392 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
4393 addr >= code_begin() + frame_complete_offset()) {
4394 start = code_begin() + frame_complete_offset();
4395 } else {
4396 start = (addr < verified_entry_point()) ? entry_point() : verified_entry_point();
4397 }
4398 address start_for_hex_dump = start; // We can choose a different starting point for hex dump, below.
4399 address end = code_end();
4400
4401 // Try using relocations to find closer instruction start and end points.
4402 // (Some platforms have variable length instructions and can only
4403 // disassemble correctly at instruction start addresses.)
4404 RelocIterator iter((nmethod*)this, start);
4405 while (iter.next() && iter.addr() < addr) { // find relocation before addr
4406 // Note: There's a relocation which doesn't point to an instruction start:
4407 // ZBarrierRelocationFormatStoreGoodAfterMov with ZGC on x86_64
4408 // We could detect and skip it, but hex dump is still usable when
4409 // disassembler produces garbage in such a very rare case.
4410 start = iter.addr();
4411 // We want at least 64 Bytes ahead in hex dump.
4412 if (iter.addr() <= (addr - 64)) start_for_hex_dump = iter.addr();
4413 }
4414 if (iter.has_current()) {
4415 if (iter.addr() == addr) iter.next(); // find relocation after addr
4416 if (iter.has_current()) end = iter.addr();
4417 }
4418
4419 // Always print hex. Disassembler may still have problems when hitting an incorrect instruction start.
4420 os::print_hex_dump(st, start_for_hex_dump, end, 1, /* print_ascii=*/false);
4421 if (!Disassembler::is_abstract()) {
4422 Disassembler::decode(start, end, st);
4423 }
4424 }
4425 }
4426
4427 #ifndef PRODUCT
4428
4429 void nmethod::print_calls(outputStream* st) {
4430 RelocIterator iter(this);
4431 while (iter.next()) {
4432 switch (iter.type()) {
4433 case relocInfo::virtual_call_type: {
4434 CompiledICLocker ml_verify(this);
4435 CompiledIC_at(&iter)->print();
4436 break;
4437 }
4438 case relocInfo::static_call_type:
4439 case relocInfo::opt_virtual_call_type:
4440 st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
4441 CompiledDirectCall::at(iter.reloc())->print();
4442 break;
4443 default:
4444 break;
4445 }
4446 }
4447 }
4448
4449 void nmethod::print_statistics() {
4450 ttyLocker ttyl;
4451 if (xtty != nullptr) xtty->head("statistics type='nmethod'");
4452 native_nmethod_stats.print_native_nmethod_stats();
4453 #ifdef COMPILER1
4454 c1_java_nmethod_stats.print_nmethod_stats("C1");
4455 #endif
4456 #ifdef COMPILER2
4457 c2_java_nmethod_stats.print_nmethod_stats("C2");
4458 #endif
4459 #if INCLUDE_JVMCI
4460 jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
4461 #endif
4462 unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
4463 DebugInformationRecorder::print_statistics();
4464 pc_nmethod_stats.print_pc_stats();
4465 Dependencies::print_statistics();
4466 ExternalsRecorder::print_statistics();
4467 if (xtty != nullptr) xtty->tail("statistics");
4468 }
4469
4470 #endif // !PRODUCT
4471
4472 #if INCLUDE_JVMCI
4473 void nmethod::update_speculation(JavaThread* thread) {
4474 jlong speculation = thread->pending_failed_speculation();
4475 if (speculation != 0) {
4476 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4477 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4478 thread->set_pending_failed_speculation(0);
4479 }
4480 }
4481
4482 const char* nmethod::jvmci_name() {
4483 if (jvmci_nmethod_data() != nullptr) {
4484 return jvmci_nmethod_data()->name();
4485 }
4486 return nullptr;
4487 }
4488
4489 bool nmethod::jvmci_skip_profile_deopt() const {
4490 return jvmci_nmethod_data() != nullptr && !jvmci_nmethod_data()->profile_deopt();
4491 }
4492 #endif