1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/aotCodeCache.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/nmethod.inline.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "compiler/abstractCompiler.hpp"
34 #include "compiler/compilationLog.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compileLog.hpp"
37 #include "compiler/compilerDirectives.hpp"
38 #include "compiler/compilerOracle.hpp"
39 #include "compiler/compileTask.hpp"
40 #include "compiler/directivesParser.hpp"
41 #include "compiler/disassembler.hpp"
42 #include "compiler/oopMap.inline.hpp"
43 #include "gc/shared/barrierSet.hpp"
44 #include "gc/shared/barrierSetNMethod.hpp"
45 #include "gc/shared/classUnloadingContext.hpp"
46 #include "gc/shared/collectedHeap.hpp"
47 #include "interpreter/bytecode.inline.hpp"
48 #include "jvm.h"
49 #include "logging/log.hpp"
50 #include "logging/logStream.hpp"
51 #include "memory/allocation.inline.hpp"
52 #include "memory/resourceArea.hpp"
53 #include "memory/universe.hpp"
54 #include "oops/access.inline.hpp"
55 #include "oops/klass.inline.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/methodData.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "oops/weakHandle.inline.hpp"
60 #include "prims/jvmtiImpl.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "prims/methodHandles.hpp"
63 #include "runtime/atomicAccess.hpp"
64 #include "runtime/continuation.hpp"
65 #include "runtime/deoptimization.hpp"
66 #include "runtime/flags/flagSetting.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/orderAccess.hpp"
71 #include "runtime/os.hpp"
72 #include "runtime/safepointVerifiers.hpp"
73 #include "runtime/serviceThread.hpp"
74 #include "runtime/sharedRuntime.hpp"
75 #include "runtime/signature.hpp"
76 #include "runtime/threadWXSetters.inline.hpp"
77 #include "runtime/vmThread.hpp"
78 #include "utilities/align.hpp"
79 #include "utilities/copy.hpp"
80 #include "utilities/dtrace.hpp"
81 #include "utilities/events.hpp"
82 #include "utilities/globalDefinitions.hpp"
83 #include "utilities/hashTable.hpp"
84 #include "utilities/xmlstream.hpp"
85 #if INCLUDE_JVMCI
86 #include "jvmci/jvmciRuntime.hpp"
87 #endif
88
89 #ifdef DTRACE_ENABLED
90
91 // Only bother with this argument setup if dtrace is available
92
93 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
94 { \
95 Method* m = (method); \
96 if (m != nullptr) { \
97 Symbol* klass_name = m->klass_name(); \
98 Symbol* name = m->name(); \
99 Symbol* signature = m->signature(); \
100 HOTSPOT_COMPILED_METHOD_UNLOAD( \
101 (char *) klass_name->bytes(), klass_name->utf8_length(), \
102 (char *) name->bytes(), name->utf8_length(), \
103 (char *) signature->bytes(), signature->utf8_length()); \
104 } \
105 }
106
107 #else // ndef DTRACE_ENABLED
108
109 #define DTRACE_METHOD_UNLOAD_PROBE(method)
110
111 #endif
112
113 // Cast from int value to narrow type
114 #define CHECKED_CAST(result, T, thing) \
115 result = static_cast<T>(thing); \
116 guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
117
118 //---------------------------------------------------------------------------------
119 // NMethod statistics
120 // They are printed under various flags, including:
121 // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
122 // (In the latter two cases, they like other stats are printed to the log only.)
123
124 #ifndef PRODUCT
125 // These variables are put into one block to reduce relocations
126 // and make it simpler to print from the debugger.
127 struct java_nmethod_stats_struct {
128 uint nmethod_count;
129 uint total_nm_size;
130 uint total_immut_size;
131 uint total_mut_size;
132 uint relocation_size;
133 uint consts_size;
134 uint insts_size;
135 uint stub_size;
136 uint oops_size;
137 uint metadata_size;
138 uint dependencies_size;
139 uint nul_chk_table_size;
140 uint handler_table_size;
141 uint scopes_pcs_size;
142 uint scopes_data_size;
143 #if INCLUDE_JVMCI
144 uint speculations_size;
145 uint jvmci_data_size;
146 #endif
147
148 void note_nmethod(nmethod* nm) {
149 nmethod_count += 1;
150 total_nm_size += nm->size();
151 total_immut_size += nm->immutable_data_size();
152 total_mut_size += nm->mutable_data_size();
153 relocation_size += nm->relocation_size();
154 consts_size += nm->consts_size();
155 insts_size += nm->insts_size();
156 stub_size += nm->stub_size();
157 oops_size += nm->oops_size();
158 metadata_size += nm->metadata_size();
159 scopes_data_size += nm->scopes_data_size();
160 scopes_pcs_size += nm->scopes_pcs_size();
161 dependencies_size += nm->dependencies_size();
162 handler_table_size += nm->handler_table_size();
163 nul_chk_table_size += nm->nul_chk_table_size();
164 #if INCLUDE_JVMCI
165 speculations_size += nm->speculations_size();
166 jvmci_data_size += nm->jvmci_data_size();
167 #endif
168 }
169 void print_nmethod_stats(const char* name) {
170 if (nmethod_count == 0) return;
171 tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
172 uint total_size = total_nm_size + total_immut_size + total_mut_size;
173 if (total_nm_size != 0) {
174 tty->print_cr(" total size = %u (100%%)", total_size);
175 tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
176 }
177 uint header_size = (uint)(nmethod_count * sizeof(nmethod));
178 if (nmethod_count != 0) {
179 tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
180 }
181 if (consts_size != 0) {
182 tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
183 }
184 if (insts_size != 0) {
185 tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
186 }
187 if (stub_size != 0) {
188 tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
189 }
190 if (oops_size != 0) {
191 tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
192 }
193 if (total_mut_size != 0) {
194 tty->print_cr(" mutable data = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size);
195 }
196 if (relocation_size != 0) {
197 tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size);
198 }
199 if (metadata_size != 0) {
200 tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size);
201 }
202 #if INCLUDE_JVMCI
203 if (jvmci_data_size != 0) {
204 tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size);
205 }
206 #endif
207 if (total_immut_size != 0) {
208 tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
209 }
210 if (dependencies_size != 0) {
211 tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
212 }
213 if (nul_chk_table_size != 0) {
214 tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
215 }
216 if (handler_table_size != 0) {
217 tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
218 }
219 if (scopes_pcs_size != 0) {
220 tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
221 }
222 if (scopes_data_size != 0) {
223 tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
224 }
225 #if INCLUDE_JVMCI
226 if (speculations_size != 0) {
227 tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
228 }
229 #endif
230 }
231 };
232
233 struct native_nmethod_stats_struct {
234 uint native_nmethod_count;
235 uint native_total_size;
236 uint native_relocation_size;
237 uint native_insts_size;
238 uint native_oops_size;
239 uint native_metadata_size;
240 void note_native_nmethod(nmethod* nm) {
241 native_nmethod_count += 1;
242 native_total_size += nm->size();
243 native_relocation_size += nm->relocation_size();
244 native_insts_size += nm->insts_size();
245 native_oops_size += nm->oops_size();
246 native_metadata_size += nm->metadata_size();
247 }
248 void print_native_nmethod_stats() {
249 if (native_nmethod_count == 0) return;
250 tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
251 if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size);
252 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size);
253 if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size);
254 if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size);
255 if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size);
256 }
257 };
258
259 struct pc_nmethod_stats_struct {
260 uint pc_desc_init; // number of initialization of cache (= number of caches)
261 uint pc_desc_queries; // queries to nmethod::find_pc_desc
262 uint pc_desc_approx; // number of those which have approximate true
263 uint pc_desc_repeats; // number of _pc_descs[0] hits
264 uint pc_desc_hits; // number of LRU cache hits
265 uint pc_desc_tests; // total number of PcDesc examinations
266 uint pc_desc_searches; // total number of quasi-binary search steps
267 uint pc_desc_adds; // number of LUR cache insertions
268
269 void print_pc_stats() {
270 tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query",
271 pc_desc_queries,
272 (double)(pc_desc_tests + pc_desc_searches)
273 / pc_desc_queries);
274 tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
275 pc_desc_init,
276 pc_desc_queries, pc_desc_approx,
277 pc_desc_repeats, pc_desc_hits,
278 pc_desc_tests, pc_desc_searches, pc_desc_adds);
279 }
280 };
281
282 #ifdef COMPILER1
283 static java_nmethod_stats_struct c1_java_nmethod_stats;
284 #endif
285 #ifdef COMPILER2
286 static java_nmethod_stats_struct c2_java_nmethod_stats;
287 #endif
288 #if INCLUDE_JVMCI
289 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
290 #endif
291 static java_nmethod_stats_struct unknown_java_nmethod_stats;
292
293 static native_nmethod_stats_struct native_nmethod_stats;
294 static pc_nmethod_stats_struct pc_nmethod_stats;
295
296 static void note_java_nmethod(nmethod* nm) {
297 #ifdef COMPILER1
298 if (nm->is_compiled_by_c1()) {
299 c1_java_nmethod_stats.note_nmethod(nm);
300 } else
301 #endif
302 #ifdef COMPILER2
303 if (nm->is_compiled_by_c2()) {
304 c2_java_nmethod_stats.note_nmethod(nm);
305 } else
306 #endif
307 #if INCLUDE_JVMCI
308 if (nm->is_compiled_by_jvmci()) {
309 jvmci_java_nmethod_stats.note_nmethod(nm);
310 } else
311 #endif
312 {
313 unknown_java_nmethod_stats.note_nmethod(nm);
314 }
315 }
316 #endif // !PRODUCT
317
318 //---------------------------------------------------------------------------------
319
320
321 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
322 assert(pc != nullptr, "Must be non null");
323 assert(exception.not_null(), "Must be non null");
324 assert(handler != nullptr, "Must be non null");
325
326 _count = 0;
327 _exception_type = exception->klass();
328 _next = nullptr;
329 _purge_list_next = nullptr;
330
331 add_address_and_handler(pc,handler);
332 }
333
334
335 address ExceptionCache::match(Handle exception, address pc) {
336 assert(pc != nullptr,"Must be non null");
337 assert(exception.not_null(),"Must be non null");
338 if (exception->klass() == exception_type()) {
339 return (test_address(pc));
340 }
341
342 return nullptr;
343 }
344
345
346 bool ExceptionCache::match_exception_with_space(Handle exception) {
347 assert(exception.not_null(),"Must be non null");
348 if (exception->klass() == exception_type() && count() < cache_size) {
349 return true;
350 }
351 return false;
352 }
353
354
355 address ExceptionCache::test_address(address addr) {
356 int limit = count();
357 for (int i = 0; i < limit; i++) {
358 if (pc_at(i) == addr) {
359 return handler_at(i);
360 }
361 }
362 return nullptr;
363 }
364
365
366 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
367 if (test_address(addr) == handler) return true;
368
369 int index = count();
370 if (index < cache_size) {
371 set_pc_at(index, addr);
372 set_handler_at(index, handler);
373 increment_count();
374 return true;
375 }
376 return false;
377 }
378
379 ExceptionCache* ExceptionCache::next() {
380 return AtomicAccess::load(&_next);
381 }
382
383 void ExceptionCache::set_next(ExceptionCache *ec) {
384 AtomicAccess::store(&_next, ec);
385 }
386
387 //-----------------------------------------------------------------------------
388
389
390 // Helper used by both find_pc_desc methods.
391 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
392 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
393 if (!approximate) {
394 return pc->pc_offset() == pc_offset;
395 } else {
396 // Do not look before the sentinel
397 assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset");
398 return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset;
399 }
400 }
401
402 void PcDescCache::init_to(PcDesc* initial_pc_desc) {
403 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
404 // initialize the cache by filling it with benign (non-null) values
405 assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
406 "must start with a sentinel");
407 for (int i = 0; i < cache_size; i++) {
408 _pc_descs[i] = initial_pc_desc;
409 }
410 }
411
412 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
413 // Note: one might think that caching the most recently
414 // read value separately would be a win, but one would be
415 // wrong. When many threads are updating it, the cache
416 // line it's in would bounce between caches, negating
417 // any benefit.
418
419 // In order to prevent race conditions do not load cache elements
420 // repeatedly, but use a local copy:
421 PcDesc* res;
422
423 // Step one: Check the most recently added value.
424 res = _pc_descs[0];
425 assert(res != nullptr, "PcDesc cache should be initialized already");
426
427 // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
428 if (approximate && match_desc(res, pc_offset, approximate)) {
429 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
430 return res;
431 }
432
433 // Step two: Check the rest of the LRU cache.
434 for (int i = 1; i < cache_size; ++i) {
435 res = _pc_descs[i];
436 if (res->pc_offset() < 0) break; // optimization: skip empty cache
437 if (match_desc(res, pc_offset, approximate)) {
438 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
439 return res;
440 }
441 }
442
443 // Report failure.
444 return nullptr;
445 }
446
447 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
448 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
449 // Update the LRU cache by shifting pc_desc forward.
450 for (int i = 0; i < cache_size; i++) {
451 PcDesc* next = _pc_descs[i];
452 _pc_descs[i] = pc_desc;
453 pc_desc = next;
454 }
455 }
456
457 // adjust pcs_size so that it is a multiple of both oopSize and
458 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
459 // of oopSize, then 2*sizeof(PcDesc) is)
460 static int adjust_pcs_size(int pcs_size) {
461 int nsize = align_up(pcs_size, oopSize);
462 if ((nsize % sizeof(PcDesc)) != 0) {
463 nsize = pcs_size + sizeof(PcDesc);
464 }
465 assert((nsize % oopSize) == 0, "correct alignment");
466 return nsize;
467 }
468
469 // Returns a string version of the method state.
470 const char* nmethod::state() const {
471 int state = get_state();
472 switch (state) {
473 case not_installed:
474 return "not installed";
475 case in_use:
476 return "in use";
477 case not_entrant:
478 return "not_entrant";
479 default:
480 fatal("unexpected method state: %d", state);
481 return nullptr;
482 }
483 }
484
485 void nmethod::set_deoptimized_done() {
486 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
487 if (_deoptimization_status != deoptimize_done) { // can't go backwards
488 AtomicAccess::store(&_deoptimization_status, deoptimize_done);
489 }
490 }
491
492 ExceptionCache* nmethod::exception_cache_acquire() const {
493 return AtomicAccess::load_acquire(&_exception_cache);
494 }
495
496 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
497 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
498 assert(new_entry != nullptr,"Must be non null");
499 assert(new_entry->next() == nullptr, "Must be null");
500
501 for (;;) {
502 ExceptionCache *ec = exception_cache();
503 if (ec != nullptr) {
504 Klass* ex_klass = ec->exception_type();
505 if (!ex_klass->is_loader_alive()) {
506 // We must guarantee that entries are not inserted with new next pointer
507 // edges to ExceptionCache entries with dead klasses, due to bad interactions
508 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
509 // the head pointer forward to the first live ExceptionCache, so that the new
510 // next pointers always point at live ExceptionCaches, that are not removed due
511 // to concurrent ExceptionCache cleanup.
512 ExceptionCache* next = ec->next();
513 if (AtomicAccess::cmpxchg(&_exception_cache, ec, next) == ec) {
514 CodeCache::release_exception_cache(ec);
515 }
516 continue;
517 }
518 ec = exception_cache();
519 if (ec != nullptr) {
520 new_entry->set_next(ec);
521 }
522 }
523 if (AtomicAccess::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
524 return;
525 }
526 }
527 }
528
529 void nmethod::clean_exception_cache() {
530 // For each nmethod, only a single thread may call this cleanup function
531 // at the same time, whether called in STW cleanup or concurrent cleanup.
532 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
533 // then a single writer may contend with cleaning up the head pointer to the
534 // first ExceptionCache node that has a Klass* that is alive. That is fine,
535 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
536 // And the concurrent writers do not clean up next pointers, only the head.
537 // Also note that concurrent readers will walk through Klass* pointers that are not
538 // alive. That does not cause ABA problems, because Klass* is deleted after
539 // a handshake with all threads, after all stale ExceptionCaches have been
540 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
541 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
542 // That similarly implies that CAS operations on ExceptionCache entries do not
543 // suffer from ABA problems as unlinking and deletion is separated by a global
544 // handshake operation.
545 ExceptionCache* prev = nullptr;
546 ExceptionCache* curr = exception_cache_acquire();
547
548 while (curr != nullptr) {
549 ExceptionCache* next = curr->next();
550
551 if (!curr->exception_type()->is_loader_alive()) {
552 if (prev == nullptr) {
553 // Try to clean head; this is contended by concurrent inserts, that
554 // both lazily clean the head, and insert entries at the head. If
555 // the CAS fails, the operation is restarted.
556 if (AtomicAccess::cmpxchg(&_exception_cache, curr, next) != curr) {
557 prev = nullptr;
558 curr = exception_cache_acquire();
559 continue;
560 }
561 } else {
562 // It is impossible to during cleanup connect the next pointer to
563 // an ExceptionCache that has not been published before a safepoint
564 // prior to the cleanup. Therefore, release is not required.
565 prev->set_next(next);
566 }
567 // prev stays the same.
568
569 CodeCache::release_exception_cache(curr);
570 } else {
571 prev = curr;
572 }
573
574 curr = next;
575 }
576 }
577
578 // public method for accessing the exception cache
579 // These are the public access methods.
580 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
581 // We never grab a lock to read the exception cache, so we may
582 // have false negatives. This is okay, as it can only happen during
583 // the first few exception lookups for a given nmethod.
584 ExceptionCache* ec = exception_cache_acquire();
585 while (ec != nullptr) {
586 address ret_val;
587 if ((ret_val = ec->match(exception,pc)) != nullptr) {
588 return ret_val;
589 }
590 ec = ec->next();
591 }
592 return nullptr;
593 }
594
595 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
596 // There are potential race conditions during exception cache updates, so we
597 // must own the ExceptionCache_lock before doing ANY modifications. Because
598 // we don't lock during reads, it is possible to have several threads attempt
599 // to update the cache with the same data. We need to check for already inserted
600 // copies of the current data before adding it.
601
602 MutexLocker ml(ExceptionCache_lock);
603 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
604
605 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
606 target_entry = new ExceptionCache(exception,pc,handler);
607 add_exception_cache_entry(target_entry);
608 }
609 }
610
611 // private method for handling exception cache
612 // These methods are private, and used to manipulate the exception cache
613 // directly.
614 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
615 ExceptionCache* ec = exception_cache_acquire();
616 while (ec != nullptr) {
617 if (ec->match_exception_with_space(exception)) {
618 return ec;
619 }
620 ec = ec->next();
621 }
622 return nullptr;
623 }
624
625 bool nmethod::is_at_poll_return(address pc) {
626 RelocIterator iter(this, pc, pc+1);
627 while (iter.next()) {
628 if (iter.type() == relocInfo::poll_return_type)
629 return true;
630 }
631 return false;
632 }
633
634
635 bool nmethod::is_at_poll_or_poll_return(address pc) {
636 RelocIterator iter(this, pc, pc+1);
637 while (iter.next()) {
638 relocInfo::relocType t = iter.type();
639 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
640 return true;
641 }
642 return false;
643 }
644
645 void nmethod::verify_oop_relocations() {
646 // Ensure sure that the code matches the current oop values
647 RelocIterator iter(this, nullptr, nullptr);
648 while (iter.next()) {
649 if (iter.type() == relocInfo::oop_type) {
650 oop_Relocation* reloc = iter.oop_reloc();
651 if (!reloc->oop_is_immediate()) {
652 reloc->verify_oop_relocation();
653 }
654 }
655 }
656 }
657
658
659 ScopeDesc* nmethod::scope_desc_at(address pc) {
660 PcDesc* pd = pc_desc_at(pc);
661 guarantee(pd != nullptr, "scope must be present");
662 return new ScopeDesc(this, pd);
663 }
664
665 ScopeDesc* nmethod::scope_desc_near(address pc) {
666 PcDesc* pd = pc_desc_near(pc);
667 guarantee(pd != nullptr, "scope must be present");
668 return new ScopeDesc(this, pd);
669 }
670
671 address nmethod::oops_reloc_begin() const {
672 // If the method is not entrant then a JMP is plastered over the
673 // first few bytes. If an oop in the old code was there, that oop
674 // should not get GC'd. Skip the first few bytes of oops on
675 // not-entrant methods.
676 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
677 code_begin() + frame_complete_offset() >
678 verified_entry_point() + NativeJump::instruction_size)
679 {
680 // If we have a frame_complete_offset after the native jump, then there
681 // is no point trying to look for oops before that. This is a requirement
682 // for being allowed to scan oops concurrently.
683 return code_begin() + frame_complete_offset();
684 }
685
686 address low_boundary = verified_entry_point();
687 return low_boundary;
688 }
689
690 // Method that knows how to preserve outgoing arguments at call. This method must be
691 // called with a frame corresponding to a Java invoke
692 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
693 if (method() == nullptr) {
694 return;
695 }
696
697 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
698 JavaThread* thread = reg_map->thread();
699 if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
700 JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
701 return;
702 }
703
704 if (!method()->is_native()) {
705 address pc = fr.pc();
706 bool has_receiver, has_appendix;
707 Symbol* signature;
708
709 // The method attached by JIT-compilers should be used, if present.
710 // Bytecode can be inaccurate in such case.
711 Method* callee = attached_method_before_pc(pc);
712 if (callee != nullptr) {
713 has_receiver = !(callee->access_flags().is_static());
714 has_appendix = false;
715 signature = callee->signature();
716 } else {
717 SimpleScopeDesc ssd(this, pc);
718
719 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
720 has_receiver = call.has_receiver();
721 has_appendix = call.has_appendix();
722 signature = call.signature();
723 }
724
725 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
726 } else if (method()->is_continuation_enter_intrinsic()) {
727 // This method only calls Continuation.enter()
728 Symbol* signature = vmSymbols::continuationEnter_signature();
729 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
730 }
731 }
732
733 Method* nmethod::attached_method(address call_instr) {
734 assert(code_contains(call_instr), "not part of the nmethod");
735 RelocIterator iter(this, call_instr, call_instr + 1);
736 while (iter.next()) {
737 if (iter.addr() == call_instr) {
738 switch(iter.type()) {
739 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
740 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
741 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
742 default: break;
743 }
744 }
745 }
746 return nullptr; // not found
747 }
748
749 Method* nmethod::attached_method_before_pc(address pc) {
750 if (NativeCall::is_call_before(pc)) {
751 NativeCall* ncall = nativeCall_before(pc);
752 return attached_method(ncall->instruction_address());
753 }
754 return nullptr; // not a call
755 }
756
757 void nmethod::clear_inline_caches() {
758 assert(SafepointSynchronize::is_at_safepoint() || (NMethodState_lock->owned_by_self() && is_not_installed()), "clearing of IC's only allowed at safepoint or when not installed");
759 RelocIterator iter(this);
760 while (iter.next()) {
761 iter.reloc()->clear_inline_cache();
762 }
763 }
764
765 #ifdef ASSERT
766 // Check class_loader is alive for this bit of metadata.
767 class CheckClass : public MetadataClosure {
768 void do_metadata(Metadata* md) {
769 Klass* klass = nullptr;
770 if (md->is_klass()) {
771 klass = ((Klass*)md);
772 } else if (md->is_method()) {
773 klass = ((Method*)md)->method_holder();
774 } else if (md->is_methodData()) {
775 klass = ((MethodData*)md)->method()->method_holder();
776 } else if (md->is_methodCounters()) {
777 klass = ((MethodCounters*)md)->method()->method_holder();
778 } else {
779 md->print();
780 ShouldNotReachHere();
781 }
782 assert(klass->is_loader_alive(), "must be alive");
783 }
784 };
785 #endif // ASSERT
786
787 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
788 template <typename CallsiteT>
789 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, bool clean_all) {
790 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
791 if (!cb->is_nmethod()) {
792 return;
793 }
794 nmethod* nm = cb->as_nmethod();
795 if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
796 callsite->set_to_clean();
797 }
798 }
799
800 // Cleans caches in nmethods that point to either classes that are unloaded
801 // or nmethods that are unloaded.
802 //
803 // Can be called either in parallel by G1 currently or after all
804 // nmethods are unloaded. Return postponed=true in the parallel case for
805 // inline caches found that point to nmethods that are not yet visited during
806 // the do_unloading walk.
807 void nmethod::unload_nmethod_caches(bool unloading_occurred) {
808 ResourceMark rm;
809
810 // Exception cache only needs to be called if unloading occurred
811 if (unloading_occurred) {
812 clean_exception_cache();
813 }
814
815 cleanup_inline_caches_impl(unloading_occurred, false);
816
817 #ifdef ASSERT
818 // Check that the metadata embedded in the nmethod is alive
819 CheckClass check_class;
820 metadata_do(&check_class);
821 #endif
822 }
823
824 void nmethod::run_nmethod_entry_barrier() {
825 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
826 if (bs_nm != nullptr) {
827 // We want to keep an invariant that nmethods found through iterations of a Thread's
828 // nmethods found in safepoints have gone through an entry barrier and are not armed.
829 // By calling this nmethod entry barrier, it plays along and acts
830 // like any other nmethod found on the stack of a thread (fewer surprises).
831 nmethod* nm = this;
832 bool alive = bs_nm->nmethod_entry_barrier(nm);
833 assert(alive, "should be alive");
834 }
835 }
836
837 // Only called by whitebox test
838 void nmethod::cleanup_inline_caches_whitebox() {
839 assert_locked_or_safepoint(CodeCache_lock);
840 CompiledICLocker ic_locker(this);
841 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
842 }
843
844 address* nmethod::orig_pc_addr(const frame* fr) {
845 return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
846 }
847
848 // Called to clean up after class unloading for live nmethods
849 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
850 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
851 ResourceMark rm;
852
853 // Find all calls in an nmethod and clear the ones that point to bad nmethods.
854 RelocIterator iter(this, oops_reloc_begin());
855 bool is_in_static_stub = false;
856 while(iter.next()) {
857
858 switch (iter.type()) {
859
860 case relocInfo::virtual_call_type:
861 if (unloading_occurred) {
862 // If class unloading occurred we first clear ICs where the cached metadata
863 // is referring to an unloaded klass or method.
864 CompiledIC_at(&iter)->clean_metadata();
865 }
866
867 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), clean_all);
868 break;
869
870 case relocInfo::opt_virtual_call_type:
871 case relocInfo::static_call_type:
872 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), clean_all);
873 break;
874
875 case relocInfo::static_stub_type: {
876 is_in_static_stub = true;
877 break;
878 }
879
880 case relocInfo::metadata_type: {
881 // Only the metadata relocations contained in static/opt virtual call stubs
882 // contains the Method* passed to c2i adapters. It is the only metadata
883 // relocation that needs to be walked, as it is the one metadata relocation
884 // that violates the invariant that all metadata relocations have an oop
885 // in the compiled method (due to deferred resolution and code patching).
886
887 // This causes dead metadata to remain in compiled methods that are not
888 // unloading. Unless these slippery metadata relocations of the static
889 // stubs are at least cleared, subsequent class redefinition operations
890 // will access potentially free memory, and JavaThread execution
891 // concurrent to class unloading may call c2i adapters with dead methods.
892 if (!is_in_static_stub) {
893 // The first metadata relocation after a static stub relocation is the
894 // metadata relocation of the static stub used to pass the Method* to
895 // c2i adapters.
896 continue;
897 }
898 is_in_static_stub = false;
899 if (is_unloading()) {
900 // If the nmethod itself is dying, then it may point at dead metadata.
901 // Nobody should follow that metadata; it is strictly unsafe.
902 continue;
903 }
904 metadata_Relocation* r = iter.metadata_reloc();
905 Metadata* md = r->metadata_value();
906 if (md != nullptr && md->is_method()) {
907 Method* method = static_cast<Method*>(md);
908 if (!method->method_holder()->is_loader_alive()) {
909 AtomicAccess::store(r->metadata_addr(), (Method*)nullptr);
910
911 if (!r->metadata_is_immediate()) {
912 r->fix_metadata_relocation();
913 }
914 }
915 }
916 break;
917 }
918
919 default:
920 break;
921 }
922 }
923 }
924
925 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
926 // Exception happened outside inline-cache check code => we are inside
927 // an active nmethod => use cpc to determine a return address
928 int exception_offset = int(pc - code_begin());
929 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
930 #ifdef ASSERT
931 if (cont_offset == 0) {
932 Thread* thread = Thread::current();
933 ResourceMark rm(thread);
934 CodeBlob* cb = CodeCache::find_blob(pc);
935 assert(cb != nullptr && cb == this, "");
936
937 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
938 stringStream ss;
939 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
940 print_on(&ss);
941 method()->print_codes_on(&ss);
942 print_code_on(&ss);
943 print_pcs_on(&ss);
944 tty->print("%s", ss.as_string()); // print all at once
945 }
946 #endif
947 if (cont_offset == 0) {
948 // Let the normal error handling report the exception
949 return nullptr;
950 }
951 if (cont_offset == exception_offset) {
952 #if INCLUDE_JVMCI
953 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
954 JavaThread *thread = JavaThread::current();
955 thread->set_jvmci_implicit_exception_pc(pc);
956 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
957 Deoptimization::Action_reinterpret));
958 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
959 #else
960 ShouldNotReachHere();
961 #endif
962 }
963 return code_begin() + cont_offset;
964 }
965
966 class HasEvolDependency : public MetadataClosure {
967 bool _has_evol_dependency;
968 public:
969 HasEvolDependency() : _has_evol_dependency(false) {}
970 void do_metadata(Metadata* md) {
971 if (md->is_method()) {
972 Method* method = (Method*)md;
973 if (method->is_old()) {
974 _has_evol_dependency = true;
975 }
976 }
977 }
978 bool has_evol_dependency() const { return _has_evol_dependency; }
979 };
980
981 bool nmethod::has_evol_metadata() {
982 // Check the metadata in relocIter and CompiledIC and also deoptimize
983 // any nmethod that has reference to old methods.
984 HasEvolDependency check_evol;
985 metadata_do(&check_evol);
986 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
987 ResourceMark rm;
988 log_debug(redefine, class, nmethod)
989 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
990 _method->method_holder()->external_name(),
991 _method->name()->as_C_string(),
992 _method->signature()->as_C_string(),
993 compile_id());
994 }
995 return check_evol.has_evol_dependency();
996 }
997
998 int nmethod::total_size() const {
999 return
1000 consts_size() +
1001 insts_size() +
1002 stub_size() +
1003 scopes_data_size() +
1004 scopes_pcs_size() +
1005 handler_table_size() +
1006 nul_chk_table_size();
1007 }
1008
1009 const char* nmethod::compile_kind() const {
1010 if (is_osr_method()) return "osr";
1011 if (preloaded()) return "AP";
1012 if (is_aot()) return "A";
1013
1014 if (method() != nullptr && is_native_method()) {
1015 if (method()->is_continuation_native_intrinsic()) {
1016 return "cnt";
1017 }
1018 return "c2n";
1019 }
1020 return nullptr;
1021 }
1022
1023 const char* nmethod::compiler_name() const {
1024 return compilertype2name(_compiler_type);
1025 }
1026
1027 #ifdef ASSERT
1028 class CheckForOopsClosure : public OopClosure {
1029 bool _found_oop = false;
1030 public:
1031 virtual void do_oop(oop* o) { _found_oop = true; }
1032 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1033 bool found_oop() { return _found_oop; }
1034 };
1035 class CheckForMetadataClosure : public MetadataClosure {
1036 bool _found_metadata = false;
1037 Metadata* _ignore = nullptr;
1038 public:
1039 CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1040 virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1041 bool found_metadata() { return _found_metadata; }
1042 };
1043
1044 static void assert_no_oops_or_metadata(nmethod* nm) {
1045 if (nm == nullptr) return;
1046 assert(nm->oop_maps() == nullptr, "expectation");
1047
1048 CheckForOopsClosure cfo;
1049 nm->oops_do(&cfo);
1050 assert(!cfo.found_oop(), "no oops allowed");
1051
1052 // We allow an exception for the own Method, but require its class to be permanent.
1053 Method* own_method = nm->method();
1054 CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1055 nm->metadata_do(&cfm);
1056 assert(!cfm.found_metadata(), "no metadata allowed");
1057
1058 assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1059 "Method's class needs to be permanent");
1060 }
1061 #endif
1062
1063 static int required_mutable_data_size(CodeBuffer* code_buffer,
1064 int jvmci_data_size = 0) {
1065 return align_up(code_buffer->total_relocation_size(), oopSize) +
1066 align_up(code_buffer->total_metadata_size(), oopSize) +
1067 align_up(jvmci_data_size, oopSize);
1068 }
1069
1070 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1071 int compile_id,
1072 CodeBuffer *code_buffer,
1073 int vep_offset,
1074 int frame_complete,
1075 int frame_size,
1076 ByteSize basic_lock_owner_sp_offset,
1077 ByteSize basic_lock_sp_offset,
1078 OopMapSet* oop_maps,
1079 int exception_handler) {
1080 code_buffer->finalize_oop_references(method);
1081 // create nmethod
1082 nmethod* nm = nullptr;
1083 int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1084 {
1085 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1086
1087 CodeOffsets offsets;
1088 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1089 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1090 if (exception_handler != -1) {
1091 offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1092 }
1093
1094 int mutable_data_size = required_mutable_data_size(code_buffer);
1095
1096 // MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1097 // IsUnloadingBehaviour::is_unloading needs to handle them separately.
1098 bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1099 nm = new (native_nmethod_size, allow_NonNMethod_space)
1100 nmethod(method(), compiler_none, native_nmethod_size,
1101 compile_id, &offsets,
1102 code_buffer, frame_size,
1103 basic_lock_owner_sp_offset,
1104 basic_lock_sp_offset,
1105 oop_maps, mutable_data_size);
1106 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1107 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1108 }
1109
1110 if (nm != nullptr) {
1111 // verify nmethod
1112 DEBUG_ONLY(nm->verify();) // might block
1113
1114 nm->log_new_nmethod();
1115 }
1116 return nm;
1117 }
1118
1119 void nmethod::record_nmethod_dependency() {
1120 // To make dependency checking during class loading fast, record
1121 // the nmethod dependencies in the classes it is dependent on.
1122 // This allows the dependency checking code to simply walk the
1123 // class hierarchy above the loaded class, checking only nmethods
1124 // which are dependent on those classes. The slow way is to
1125 // check every nmethod for dependencies which makes it linear in
1126 // the number of methods compiled. For applications with a lot
1127 // classes the slow way is too slow.
1128 for (Dependencies::DepStream deps(this); deps.next(); ) {
1129 if (deps.type() == Dependencies::call_site_target_value) {
1130 // CallSite dependencies are managed on per-CallSite instance basis.
1131 oop call_site = deps.argument_oop(0);
1132 MethodHandles::add_dependent_nmethod(call_site, this);
1133 } else {
1134 InstanceKlass* ik = deps.context_type();
1135 if (ik == nullptr) {
1136 continue; // ignore things like evol_method
1137 }
1138 // record this nmethod as dependent on this klass
1139 ik->add_dependent_nmethod(this);
1140 }
1141 }
1142 }
1143
1144 nmethod* nmethod::new_nmethod(const methodHandle& method,
1145 int compile_id,
1146 int entry_bci,
1147 CodeOffsets* offsets,
1148 int orig_pc_offset,
1149 DebugInformationRecorder* debug_info,
1150 Dependencies* dependencies,
1151 CodeBuffer* code_buffer, int frame_size,
1152 OopMapSet* oop_maps,
1153 ExceptionHandlerTable* handler_table,
1154 ImplicitExceptionTable* nul_chk_table,
1155 AbstractCompiler* compiler,
1156 CompLevel comp_level
1157 #if INCLUDE_JVMCI
1158 , char* speculations,
1159 int speculations_len,
1160 JVMCINMethodData* jvmci_data
1161 #endif
1162 )
1163 {
1164 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1165 code_buffer->finalize_oop_references(method);
1166 // create nmethod
1167 nmethod* nm = nullptr;
1168 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1169
1170 int immutable_data_size =
1171 adjust_pcs_size(debug_info->pcs_size())
1172 + align_up((int)dependencies->size_in_bytes(), oopSize)
1173 + align_up(handler_table->size_in_bytes() , oopSize)
1174 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1175 #if INCLUDE_JVMCI
1176 + align_up(speculations_len , oopSize)
1177 #endif
1178 + align_up(debug_info->data_size() , oopSize)
1179 + align_up(ImmutableDataReferencesCounterSize, oopSize);
1180
1181 // First, allocate space for immutable data in C heap.
1182 address immutable_data = nullptr;
1183 if (immutable_data_size > 0) {
1184 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1185 if (immutable_data == nullptr) {
1186 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1187 return nullptr;
1188 }
1189 }
1190
1191 int mutable_data_size = required_mutable_data_size(code_buffer
1192 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1193
1194 {
1195 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1196
1197 nm = new (nmethod_size, comp_level)
1198 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1199 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1200 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1201 handler_table, nul_chk_table, compiler, comp_level
1202 #if INCLUDE_JVMCI
1203 , speculations,
1204 speculations_len,
1205 jvmci_data
1206 #endif
1207 );
1208
1209 if (nm != nullptr) {
1210 nm->record_nmethod_dependency();
1211 NOT_PRODUCT(note_java_nmethod(nm));
1212 }
1213 }
1214 // Do verification and logging outside CodeCache_lock.
1215 if (nm != nullptr) {
1216
1217 #ifdef ASSERT
1218 LogTarget(Debug, aot, codecache, nmethod) log;
1219 if (log.is_enabled()) {
1220 LogStream out(log);
1221 out.print_cr("== new_nmethod 2");
1222 FlagSetting fs(PrintRelocations, true);
1223 nm->print_on_impl(&out);
1224 nm->decode(&out);
1225 }
1226 #endif
1227
1228 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1229 DEBUG_ONLY(nm->verify();)
1230 nm->log_new_nmethod();
1231 }
1232 return nm;
1233 }
1234
1235 nmethod* nmethod::restore(address code_cache_buffer,
1236 const methodHandle& method,
1237 int compile_id,
1238 address reloc_data,
1239 GrowableArray<Handle>& oop_list,
1240 GrowableArray<Metadata*>& metadata_list,
1241 ImmutableOopMapSet* oop_maps,
1242 address immutable_data,
1243 GrowableArray<Handle>& reloc_imm_oop_list,
1244 GrowableArray<Metadata*>& reloc_imm_metadata_list,
1245 AOTCodeReader* aot_code_reader)
1246 {
1247 CodeBlob::restore(code_cache_buffer, "nmethod", reloc_data, oop_maps);
1248 nmethod* nm = (nmethod*)code_cache_buffer;
1249 nm->set_method(method());
1250 nm->_compile_id = compile_id;
1251 nm->set_immutable_data(immutable_data);
1252 nm->copy_values(&oop_list);
1253 nm->copy_values(&metadata_list);
1254
1255 aot_code_reader->fix_relocations(nm, &reloc_imm_oop_list, &reloc_imm_metadata_list);
1256
1257 #ifndef PRODUCT
1258 nm->asm_remarks().init();
1259 aot_code_reader->read_asm_remarks(nm->asm_remarks(), /* use_string_table */ false);
1260 nm->dbg_strings().init();
1261 aot_code_reader->read_dbg_strings(nm->dbg_strings(), /* use_string_table */ false);
1262 #endif
1263
1264 // Flush the code block
1265 ICache::invalidate_range(nm->code_begin(), nm->code_size());
1266
1267 // Create cache after PcDesc data is copied - it will be used to initialize cache
1268 nm->_pc_desc_container = new PcDescContainer(nm->scopes_pcs_begin());
1269
1270 nm->set_aot_code_entry(aot_code_reader->aot_code_entry());
1271
1272 nm->post_init();
1273 return nm;
1274 }
1275
1276 nmethod* nmethod::new_nmethod(nmethod* archived_nm,
1277 const methodHandle& method,
1278 AbstractCompiler* compiler,
1279 int compile_id,
1280 address reloc_data,
1281 GrowableArray<Handle>& oop_list,
1282 GrowableArray<Metadata*>& metadata_list,
1283 ImmutableOopMapSet* oop_maps,
1284 address immutable_data,
1285 GrowableArray<Handle>& reloc_imm_oop_list,
1286 GrowableArray<Metadata*>& reloc_imm_metadata_list,
1287 AOTCodeReader* aot_code_reader)
1288 {
1289 nmethod* nm = nullptr;
1290 int nmethod_size = archived_nm->size();
1291 // create nmethod
1292 {
1293 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1294 address code_cache_buffer = (address)CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(archived_nm->comp_level()));
1295 if (code_cache_buffer != nullptr) {
1296 nm = archived_nm->restore(code_cache_buffer,
1297 method,
1298 compile_id,
1299 reloc_data,
1300 oop_list,
1301 metadata_list,
1302 oop_maps,
1303 immutable_data,
1304 reloc_imm_oop_list,
1305 reloc_imm_metadata_list,
1306 aot_code_reader);
1307 nm->record_nmethod_dependency();
1308 NOT_PRODUCT(note_java_nmethod(nm));
1309 }
1310 }
1311 // Do verification and logging outside CodeCache_lock.
1312 if (nm != nullptr) {
1313 #ifdef ASSERT
1314 LogTarget(Debug, aot, codecache, nmethod) log;
1315 if (log.is_enabled()) {
1316 LogStream out(log);
1317 out.print_cr("== new_nmethod 2");
1318 FlagSetting fs(PrintRelocations, true);
1319 nm->print_on_impl(&out);
1320 nm->decode(&out);
1321 }
1322 #endif
1323 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1324 DEBUG_ONLY(nm->verify();)
1325 nm->log_new_nmethod();
1326 }
1327 return nm;
1328 }
1329
1330 // Fill in default values for various fields
1331 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1332 // avoid uninitialized fields, even for short time periods
1333 _exception_cache = nullptr;
1334 _gc_data = nullptr;
1335 _oops_do_mark_link = nullptr;
1336 _compiled_ic_data = nullptr;
1337
1338 _is_unloading_state = 0;
1339 _state = not_installed;
1340
1341 _has_unsafe_access = 0;
1342 _has_wide_vectors = 0;
1343 _has_monitors = 0;
1344 _has_scoped_access = 0;
1345 _has_flushed_dependencies = 0;
1346 _is_unlinked = 0;
1347 _load_reported = 0; // jvmti state
1348 _preloaded = 0;
1349 _has_clinit_barriers = 0;
1350
1351 _used = false;
1352 _deoptimization_status = not_marked;
1353
1354 // SECT_CONSTS is first in code buffer so the offset should be 0.
1355 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1356 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1357
1358 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1359
1360 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1361 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1362
1363 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1364 }
1365
1366 // Post initialization
1367 void nmethod::post_init() {
1368 clear_unloading_state();
1369
1370 finalize_relocations();
1371
1372 Universe::heap()->register_nmethod(this);
1373 DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
1374
1375 CodeCache::commit(this);
1376 }
1377
1378 // For native wrappers
1379 nmethod::nmethod(
1380 Method* method,
1381 CompilerType type,
1382 int nmethod_size,
1383 int compile_id,
1384 CodeOffsets* offsets,
1385 CodeBuffer* code_buffer,
1386 int frame_size,
1387 ByteSize basic_lock_owner_sp_offset,
1388 ByteSize basic_lock_sp_offset,
1389 OopMapSet* oop_maps,
1390 int mutable_data_size)
1391 : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1392 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1393 _deoptimization_generation(0),
1394 _gc_epoch(CodeCache::gc_epoch()),
1395 _method(method),
1396 _native_receiver_sp_offset(basic_lock_owner_sp_offset),
1397 _native_basic_lock_sp_offset(basic_lock_sp_offset)
1398 {
1399 {
1400 DEBUG_ONLY(NoSafepointVerifier nsv;)
1401 assert_locked_or_safepoint(CodeCache_lock);
1402
1403 init_defaults(code_buffer, offsets);
1404
1405 _osr_entry_point = nullptr;
1406 _pc_desc_container = nullptr;
1407 _entry_bci = InvocationEntryBci;
1408 _compile_id = compile_id;
1409 _comp_level = CompLevel_none;
1410 _compiler_type = type;
1411 _orig_pc_offset = 0;
1412 _num_stack_arg_slots = 0;
1413
1414 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1415 // Continuation enter intrinsic
1416 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1417 } else {
1418 _exception_offset = 0;
1419 }
1420 // Native wrappers do not have deopt handlers. Make the values
1421 // something that will never match a pc like the nmethod vtable entry
1422 _deopt_handler_offset = 0;
1423 _aot_code_entry = nullptr;
1424 _method_profiling_count = 0;
1425 _unwind_handler_offset = 0;
1426
1427 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1428 uint16_t metadata_size;
1429 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1430 JVMCI_ONLY( _metadata_size = metadata_size; )
1431 assert(_mutable_data_size == _relocation_size + metadata_size,
1432 "wrong mutable data size: %d != %d + %d",
1433 _mutable_data_size, _relocation_size, metadata_size);
1434
1435 // native wrapper does not have read-only data but we need unique not null address
1436 _immutable_data = blob_end();
1437 _immutable_data_size = 0;
1438 _nul_chk_table_offset = 0;
1439 _handler_table_offset = 0;
1440 _scopes_pcs_offset = 0;
1441 _scopes_data_offset = 0;
1442 #if INCLUDE_JVMCI
1443 _speculations_offset = 0;
1444 #endif
1445
1446 code_buffer->copy_code_and_locs_to(this);
1447 code_buffer->copy_values_to(this);
1448
1449 post_init();
1450 }
1451
1452 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1453 ttyLocker ttyl; // keep the following output all in one block
1454 // This output goes directly to the tty, not the compiler log.
1455 // To enable tools to match it up with the compilation activity,
1456 // be sure to tag this tty output with the compile ID.
1457 if (xtty != nullptr) {
1458 xtty->begin_head("print_native_nmethod");
1459 xtty->method(_method);
1460 xtty->stamp();
1461 xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1462 }
1463 // Print the header part, then print the requested information.
1464 // This is both handled in decode2(), called via print_code() -> decode()
1465 if (PrintNativeNMethods) {
1466 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1467 print_code();
1468 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1469 #if defined(SUPPORT_DATA_STRUCTS)
1470 if (AbstractDisassembler::show_structs()) {
1471 if (oop_maps != nullptr) {
1472 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1473 oop_maps->print_on(tty);
1474 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1475 }
1476 }
1477 #endif
1478 } else {
1479 print(); // print the header part only.
1480 }
1481 #if defined(SUPPORT_DATA_STRUCTS)
1482 if (AbstractDisassembler::show_structs()) {
1483 if (PrintRelocations) {
1484 print_relocations_on(tty);
1485 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1486 }
1487 }
1488 #endif
1489 if (xtty != nullptr) {
1490 xtty->tail("print_native_nmethod");
1491 }
1492 }
1493 }
1494
1495
1496 nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm._header_size)
1497 {
1498
1499 if (nm._oop_maps != nullptr) {
1500 _oop_maps = nm._oop_maps->clone();
1501 } else {
1502 _oop_maps = nullptr;
1503 }
1504
1505 _size = nm._size;
1506 _relocation_size = nm._relocation_size;
1507 _content_offset = nm._content_offset;
1508 _code_offset = nm._code_offset;
1509 _data_offset = nm._data_offset;
1510 _frame_size = nm._frame_size;
1511
1512 S390_ONLY( _ctable_offset = nm._ctable_offset; )
1513
1514 _header_size = nm._header_size;
1515 _frame_complete_offset = nm._frame_complete_offset;
1516
1517 _kind = nm._kind;
1518
1519 _caller_must_gc_arguments = nm._caller_must_gc_arguments;
1520
1521 #ifndef PRODUCT
1522 _asm_remarks.share(nm._asm_remarks);
1523 _dbg_strings.share(nm._dbg_strings);
1524 #endif
1525
1526 // Allocate memory and copy mutable data to C heap
1527 _mutable_data_size = nm._mutable_data_size;
1528 if (_mutable_data_size > 0) {
1529 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
1530 if (_mutable_data == nullptr) {
1531 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for mutable data");
1532 }
1533 memcpy(mutable_data_begin(), nm.mutable_data_begin(), nm.mutable_data_size());
1534 } else {
1535 _mutable_data = nullptr;
1536 }
1537
1538 _deoptimization_generation = 0;
1539 _gc_epoch = CodeCache::gc_epoch();
1540 _method = nm._method;
1541 _osr_link = nullptr;
1542
1543 // Increment number of references to immutable data to share it between nmethods
1544 _immutable_data_size = nm._immutable_data_size;
1545 if (_immutable_data_size > 0) {
1546 _immutable_data = nm._immutable_data;
1547 set_immutable_data_references_counter(get_immutable_data_references_counter() + 1);
1548 } else {
1549 _immutable_data = blob_end();
1550 }
1551
1552 _exception_cache = nullptr;
1553 _gc_data = nullptr;
1554 _oops_do_mark_nmethods = nullptr;
1555 _oops_do_mark_link = nullptr;
1556 _compiled_ic_data = nullptr;
1557
1558 if (nm._osr_entry_point != nullptr) {
1559 _osr_entry_point = (nm._osr_entry_point - (address) &nm) + (address) this;
1560 } else {
1561 _osr_entry_point = nullptr;
1562 }
1563
1564 _entry_offset = nm._entry_offset;
1565 _verified_entry_offset = nm._verified_entry_offset;
1566 _entry_bci = nm._entry_bci;
1567
1568 _skipped_instructions_size = nm._skipped_instructions_size;
1569 _stub_offset = nm._stub_offset;
1570 _exception_offset = nm._exception_offset;
1571 _deopt_handler_offset = nm._deopt_handler_offset;
1572 _unwind_handler_offset = nm._unwind_handler_offset;
1573 _num_stack_arg_slots = nm._num_stack_arg_slots;
1574 _oops_size = nm._oops_size;
1575 #if INCLUDE_JVMCI
1576 _metadata_size = nm._metadata_size;
1577 #endif
1578 _nul_chk_table_offset = nm._nul_chk_table_offset;
1579 _handler_table_offset = nm._handler_table_offset;
1580 _scopes_pcs_offset = nm._scopes_pcs_offset;
1581 _scopes_data_offset = nm._scopes_data_offset;
1582 #if INCLUDE_JVMCI
1583 _speculations_offset = nm._speculations_offset;
1584 #endif
1585
1586 _orig_pc_offset = nm._orig_pc_offset;
1587 _compile_id = nm._compile_id;
1588 _comp_level = nm._comp_level;
1589 _compiler_type = nm._compiler_type;
1590 _is_unloading_state = nm._is_unloading_state;
1591 _state = not_installed;
1592
1593 _has_unsafe_access = nm._has_unsafe_access;
1594 _has_wide_vectors = nm._has_wide_vectors;
1595 _has_monitors = nm._has_monitors;
1596 _has_scoped_access = nm._has_scoped_access;
1597 _has_flushed_dependencies = nm._has_flushed_dependencies;
1598 _is_unlinked = nm._is_unlinked;
1599 _load_reported = nm._load_reported;
1600
1601 _deoptimization_status = nm._deoptimization_status;
1602
1603 if (nm._pc_desc_container != nullptr) {
1604 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1605 } else {
1606 _pc_desc_container = nullptr;
1607 }
1608
1609 // Copy nmethod contents excluding header
1610 // - Constant part (doubles, longs and floats used in nmethod)
1611 // - Code part:
1612 // - Code body
1613 // - Exception handler
1614 // - Stub code
1615 // - OOP table
1616 memcpy(consts_begin(), nm.consts_begin(), nm.data_end() - nm.consts_begin());
1617
1618 post_init();
1619 }
1620
1621 nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
1622 assert(NMethodRelocation, "must enable use of function");
1623
1624 // Locks required to be held by caller to ensure the nmethod
1625 // is not modified or purged from code cache during relocation
1626 assert_lock_strong(CodeCache_lock);
1627 assert_lock_strong(Compile_lock);
1628 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1629
1630 if (!is_relocatable()) {
1631 return nullptr;
1632 }
1633
1634 run_nmethod_entry_barrier();
1635 nmethod* nm_copy = new (size(), code_blob_type) nmethod(*this);
1636
1637 if (nm_copy == nullptr) {
1638 return nullptr;
1639 }
1640
1641 // Fix relocation
1642 RelocIterator iter(nm_copy);
1643 CodeBuffer src(this);
1644 CodeBuffer dst(nm_copy);
1645 while (iter.next()) {
1646 #ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
1647 // Direct calls may no longer be in range and the use of a trampoline may now be required.
1648 // Instead, allow trampoline relocations to update their owners and perform the necessary checks.
1649 if (iter.reloc()->is_call()) {
1650 address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), nm_copy);
1651 if (trampoline != nullptr) {
1652 continue;
1653 }
1654 }
1655 #endif
1656
1657 iter.reloc()->fix_relocation_after_move(&src, &dst);
1658 }
1659
1660 // To make dependency checking during class loading fast, record
1661 // the nmethod dependencies in the classes it is dependent on.
1662 // This allows the dependency checking code to simply walk the
1663 // class hierarchy above the loaded class, checking only nmethods
1664 // which are dependent on those classes. The slow way is to
1665 // check every nmethod for dependencies which makes it linear in
1666 // the number of methods compiled. For applications with a lot
1667 // classes the slow way is too slow.
1668 for (Dependencies::DepStream deps(nm_copy); deps.next(); ) {
1669 if (deps.type() == Dependencies::call_site_target_value) {
1670 // CallSite dependencies are managed on per-CallSite instance basis.
1671 oop call_site = deps.argument_oop(0);
1672 MethodHandles::add_dependent_nmethod(call_site, nm_copy);
1673 } else {
1674 InstanceKlass* ik = deps.context_type();
1675 if (ik == nullptr) {
1676 continue; // ignore things like evol_method
1677 }
1678 // record this nmethod as dependent on this klass
1679 ik->add_dependent_nmethod(nm_copy);
1680 }
1681 }
1682
1683 MutexLocker ml_NMethodState_lock(NMethodState_lock, Mutex::_no_safepoint_check_flag);
1684
1685 // Verify the nm we copied from is still valid
1686 if (!is_marked_for_deoptimization() && is_in_use()) {
1687 assert(method() != nullptr && method()->code() == this, "should be if is in use");
1688
1689 nm_copy->clear_inline_caches();
1690
1691 // Attempt to start using the copy
1692 if (nm_copy->make_in_use()) {
1693 ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size());
1694
1695 methodHandle mh(Thread::current(), nm_copy->method());
1696 nm_copy->method()->set_code(mh, nm_copy);
1697
1698 make_not_used();
1699
1700 nm_copy->post_compiled_method_load_event();
1701
1702 nm_copy->log_relocated_nmethod(this);
1703
1704 return nm_copy;
1705 }
1706 }
1707
1708 nm_copy->make_not_used();
1709
1710 return nullptr;
1711 }
1712
1713 bool nmethod::is_relocatable() {
1714 if (!is_java_method()) {
1715 return false;
1716 }
1717
1718 if (!is_in_use()) {
1719 return false;
1720 }
1721
1722 if (is_osr_method()) {
1723 return false;
1724 }
1725
1726 if (is_marked_for_deoptimization()) {
1727 return false;
1728 }
1729
1730 #if INCLUDE_JVMCI
1731 if (jvmci_nmethod_data() != nullptr && jvmci_nmethod_data()->has_mirror()) {
1732 return false;
1733 }
1734 #endif
1735
1736 if (is_unloading()) {
1737 return false;
1738 }
1739
1740 if (has_evol_metadata()) {
1741 return false;
1742 }
1743
1744 return true;
1745 }
1746
1747 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1748 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1749 }
1750
1751 void* nmethod::operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw () {
1752 return CodeCache::allocate(nmethod_size, code_blob_type);
1753 }
1754
1755 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1756 // Try MethodNonProfiled and MethodProfiled.
1757 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1758 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1759 // Try NonNMethod or give up.
1760 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1761 }
1762
1763 // For normal JIT compiled code
1764 nmethod::nmethod(
1765 Method* method,
1766 CompilerType type,
1767 int nmethod_size,
1768 int immutable_data_size,
1769 int mutable_data_size,
1770 int compile_id,
1771 int entry_bci,
1772 address immutable_data,
1773 CodeOffsets* offsets,
1774 int orig_pc_offset,
1775 DebugInformationRecorder* debug_info,
1776 Dependencies* dependencies,
1777 CodeBuffer *code_buffer,
1778 int frame_size,
1779 OopMapSet* oop_maps,
1780 ExceptionHandlerTable* handler_table,
1781 ImplicitExceptionTable* nul_chk_table,
1782 AbstractCompiler* compiler,
1783 CompLevel comp_level
1784 #if INCLUDE_JVMCI
1785 , char* speculations,
1786 int speculations_len,
1787 JVMCINMethodData* jvmci_data
1788 #endif
1789 )
1790 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1791 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1792 _deoptimization_generation(0),
1793 _gc_epoch(CodeCache::gc_epoch()),
1794 _method(method),
1795 _osr_link(nullptr)
1796 {
1797 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1798 {
1799 DEBUG_ONLY(NoSafepointVerifier nsv;)
1800 assert_locked_or_safepoint(CodeCache_lock);
1801
1802 init_defaults(code_buffer, offsets);
1803 _aot_code_entry = nullptr; // runtime compiled nmethod does not have AOTCodeEntry
1804 _method_profiling_count = 0;
1805
1806 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1807 _entry_bci = entry_bci;
1808 _compile_id = compile_id;
1809 _comp_level = comp_level;
1810 _compiler_type = type;
1811 _orig_pc_offset = orig_pc_offset;
1812
1813 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1814
1815 set_ctable_begin(header_begin() + content_offset());
1816
1817 #if INCLUDE_JVMCI
1818 if (compiler->is_jvmci()) {
1819 // JVMCI might not produce any stub sections
1820 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1821 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1822 } else {
1823 _exception_offset = -1;
1824 }
1825 if (offsets->value(CodeOffsets::Deopt) != -1) {
1826 _deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
1827 } else {
1828 _deopt_handler_offset = -1;
1829 }
1830 } else
1831 #endif
1832 {
1833 // Exception handler and deopt handler are in the stub section
1834 assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
1835 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
1836
1837 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1838 _deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
1839 }
1840 if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1841 // C1 generates UnwindHandler at the end of instructions section.
1842 // Calculate positive offset as distance between the start of stubs section
1843 // (which is also the end of instructions section) and the start of the handler.
1844 int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1845 CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1846 } else {
1847 _unwind_handler_offset = -1;
1848 }
1849
1850 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1851 uint16_t metadata_size;
1852 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1853 JVMCI_ONLY( _metadata_size = metadata_size; )
1854 int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
1855 assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,
1856 "wrong mutable data size: %d != %d + %d + %d",
1857 _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size);
1858 assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d",
1859 nmethod_size, (int)(code_end() - header_begin()));
1860
1861 _immutable_data_size = immutable_data_size;
1862 if (immutable_data_size > 0) {
1863 assert(immutable_data != nullptr, "required");
1864 _immutable_data = immutable_data;
1865 } else {
1866 // We need unique not null address
1867 _immutable_data = blob_end();
1868 }
1869 CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1870 CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1871 _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1872 _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
1873
1874 #if INCLUDE_JVMCI
1875 _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1876 DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
1877 #else
1878 DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
1879 #endif
1880 assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1881 immutable_data_end_offset, immutable_data_size);
1882
1883 // Copy code and relocation info
1884 code_buffer->copy_code_and_locs_to(this);
1885 // Copy oops and metadata
1886 code_buffer->copy_values_to(this);
1887 dependencies->copy_to(this);
1888 // Copy PcDesc and ScopeDesc data
1889 debug_info->copy_to(this);
1890
1891 // Create cache after PcDesc data is copied - it will be used to initialize cache
1892 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1893
1894 #if INCLUDE_JVMCI
1895 if (compiler->is_jvmci()) {
1896 // Initialize the JVMCINMethodData object inlined into nm
1897 jvmci_nmethod_data()->copy(jvmci_data);
1898 }
1899 #endif
1900
1901 // Copy contents of ExceptionHandlerTable to nmethod
1902 handler_table->copy_to(this);
1903 nul_chk_table->copy_to(this);
1904
1905 #if INCLUDE_JVMCI
1906 // Copy speculations to nmethod
1907 if (speculations_size() != 0) {
1908 memcpy(speculations_begin(), speculations, speculations_len);
1909 }
1910 #endif
1911 set_immutable_data_references_counter(1);
1912
1913 post_init();
1914
1915 // we use the information of entry points to find out if a method is
1916 // static or non static
1917 assert(compiler->is_c2() || compiler->is_jvmci() ||
1918 _method->is_static() == (entry_point() == verified_entry_point()),
1919 " entry points must be same for static methods and vice versa");
1920 }
1921 }
1922
1923 // Print a short set of xml attributes to identify this nmethod. The
1924 // output should be embedded in some other element.
1925 void nmethod::log_identity(xmlStream* log) const {
1926 assert(log->inside_attrs_or_error(), "printing attributes");
1927 log->print(" compile_id='%d'", compile_id());
1928 const char* nm_kind = compile_kind();
1929 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1930 log->print(" compiler='%s'", compiler_name());
1931 if (TieredCompilation) {
1932 log->print(" compile_level='%d'", comp_level());
1933 }
1934 #if INCLUDE_JVMCI
1935 if (jvmci_nmethod_data() != nullptr) {
1936 const char* jvmci_name = jvmci_nmethod_data()->name();
1937 if (jvmci_name != nullptr) {
1938 log->print(" jvmci_mirror_name='");
1939 log->text("%s", jvmci_name);
1940 log->print("'");
1941 }
1942 }
1943 #endif
1944 }
1945
1946
1947 #define LOG_OFFSET(log, name) \
1948 if (p2i(name##_end()) - p2i(name##_begin())) \
1949 log->print(" " XSTR(name) "_offset='%zd'" , \
1950 p2i(name##_begin()) - p2i(this))
1951
1952
1953 void nmethod::log_new_nmethod() const {
1954 if (LogCompilation && xtty != nullptr) {
1955 ttyLocker ttyl;
1956 xtty->begin_elem("nmethod");
1957 log_identity(xtty);
1958 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1959 xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1960
1961 LOG_OFFSET(xtty, relocation);
1962 LOG_OFFSET(xtty, consts);
1963 LOG_OFFSET(xtty, insts);
1964 LOG_OFFSET(xtty, stub);
1965 LOG_OFFSET(xtty, scopes_data);
1966 LOG_OFFSET(xtty, scopes_pcs);
1967 LOG_OFFSET(xtty, dependencies);
1968 LOG_OFFSET(xtty, handler_table);
1969 LOG_OFFSET(xtty, nul_chk_table);
1970 LOG_OFFSET(xtty, oops);
1971 LOG_OFFSET(xtty, metadata);
1972
1973 xtty->method(method());
1974 xtty->stamp();
1975 xtty->end_elem();
1976 }
1977 }
1978
1979
1980 void nmethod::log_relocated_nmethod(nmethod* original) const {
1981 if (LogCompilation && xtty != nullptr) {
1982 ttyLocker ttyl;
1983 xtty->begin_elem("relocated nmethod");
1984 log_identity(xtty);
1985 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1986
1987 const char* original_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(original));
1988 xtty->print(" original_address='" INTPTR_FORMAT "'", p2i(original));
1989 xtty->print(" original_code_heap='%s'", original_code_heap_name);
1990
1991 const char* new_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(this));
1992 xtty->print(" new_address='" INTPTR_FORMAT "'", p2i(this));
1993 xtty->print(" new_code_heap='%s'", new_code_heap_name);
1994
1995 LOG_OFFSET(xtty, relocation);
1996 LOG_OFFSET(xtty, consts);
1997 LOG_OFFSET(xtty, insts);
1998 LOG_OFFSET(xtty, stub);
1999 LOG_OFFSET(xtty, scopes_data);
2000 LOG_OFFSET(xtty, scopes_pcs);
2001 LOG_OFFSET(xtty, dependencies);
2002 LOG_OFFSET(xtty, handler_table);
2003 LOG_OFFSET(xtty, nul_chk_table);
2004 LOG_OFFSET(xtty, oops);
2005 LOG_OFFSET(xtty, metadata);
2006
2007 xtty->method(method());
2008 xtty->stamp();
2009 xtty->end_elem();
2010 }
2011 }
2012
2013 #undef LOG_OFFSET
2014
2015
2016 // Print out more verbose output usually for a newly created nmethod.
2017 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const {
2018 if (st != nullptr) {
2019 ttyLocker ttyl;
2020 if (WizardMode) {
2021 CompileTask::print(st, this, msg, /*short_form:*/ true);
2022 st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
2023 } else {
2024 CompileTask::print(st, this, msg, /*short_form:*/ false);
2025 }
2026 }
2027 }
2028
2029 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
2030 bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
2031 if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
2032 print_nmethod(printnmethods);
2033 }
2034 }
2035
2036 void nmethod::print_nmethod(bool printmethod) {
2037 ttyLocker ttyl; // keep the following output all in one block
2038 if (xtty != nullptr) {
2039 xtty->begin_head("print_nmethod");
2040 log_identity(xtty);
2041 xtty->stamp();
2042 xtty->end_head();
2043 }
2044 // Print the header part, then print the requested information.
2045 // This is both handled in decode2().
2046 if (printmethod) {
2047 ResourceMark m;
2048 if (is_compiled_by_c1()) {
2049 tty->cr();
2050 tty->print_cr("============================= C1-compiled nmethod ==============================");
2051 }
2052 if (is_compiled_by_jvmci()) {
2053 tty->cr();
2054 tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
2055 }
2056 tty->print_cr("----------------------------------- Assembly -----------------------------------");
2057 decode2(tty);
2058 #if defined(SUPPORT_DATA_STRUCTS)
2059 if (AbstractDisassembler::show_structs()) {
2060 // Print the oops from the underlying CodeBlob as well.
2061 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2062 print_oops(tty);
2063 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2064 print_metadata(tty);
2065 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2066 print_pcs_on(tty);
2067 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2068 if (oop_maps() != nullptr) {
2069 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
2070 oop_maps()->print_on(tty);
2071 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2072 }
2073 }
2074 #endif
2075 } else {
2076 print(); // print the header part only.
2077 }
2078
2079 #if defined(SUPPORT_DATA_STRUCTS)
2080 if (AbstractDisassembler::show_structs()) {
2081 methodHandle mh(Thread::current(), _method);
2082 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
2083 print_scopes();
2084 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2085 }
2086 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
2087 print_relocations_on(tty);
2088 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2089 }
2090 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
2091 print_dependencies_on(tty);
2092 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2093 }
2094 if (printmethod || PrintExceptionHandlers) {
2095 print_handler_table();
2096 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2097 print_nul_chk_table();
2098 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2099 }
2100
2101 if (printmethod) {
2102 print_recorded_oops();
2103 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2104 print_recorded_metadata();
2105 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
2106 }
2107 }
2108 #endif
2109
2110 if (xtty != nullptr) {
2111 xtty->tail("print_nmethod");
2112 }
2113 }
2114
2115
2116 // Promote one word from an assembly-time handle to a live embedded oop.
2117 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
2118 if (handle == nullptr ||
2119 // As a special case, IC oops are initialized to 1 or -1.
2120 handle == (jobject) Universe::non_oop_word()) {
2121 *(void**)dest = handle;
2122 } else {
2123 *dest = JNIHandles::resolve_non_null(handle);
2124 }
2125 }
2126
2127 void nmethod::copy_values(GrowableArray<Handle>* array) {
2128 int length = array->length();
2129 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
2130 oop* dest = oops_begin();
2131 for (int index = 0 ; index < length; index++) {
2132 dest[index] = array->at(index)();
2133 }
2134 }
2135
2136 // Have to have the same name because it's called by a template
2137 void nmethod::copy_values(GrowableArray<jobject>* array) {
2138 int length = array->length();
2139 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
2140 oop* dest = oops_begin();
2141 for (int index = 0 ; index < length; index++) {
2142 initialize_immediate_oop(&dest[index], array->at(index));
2143 }
2144
2145 // Now we can fix up all the oops in the code. We need to do this
2146 // in the code because the assembler uses jobjects as placeholders.
2147 // The code and relocations have already been initialized by the
2148 // CodeBlob constructor, so it is valid even at this early point to
2149 // iterate over relocations and patch the code.
2150 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
2151 }
2152
2153 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
2154 int length = array->length();
2155 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
2156 Metadata** dest = metadata_begin();
2157 for (int index = 0 ; index < length; index++) {
2158 dest[index] = array->at(index);
2159 }
2160 }
2161
2162 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
2163 // re-patch all oop-bearing instructions, just in case some oops moved
2164 RelocIterator iter(this, begin, end);
2165 while (iter.next()) {
2166 if (iter.type() == relocInfo::oop_type) {
2167 oop_Relocation* reloc = iter.oop_reloc();
2168 if (initialize_immediates && reloc->oop_is_immediate()) {
2169 oop* dest = reloc->oop_addr();
2170 jobject obj = *reinterpret_cast<jobject*>(dest);
2171 initialize_immediate_oop(dest, obj);
2172 }
2173 // Refresh the oop-related bits of this instruction.
2174 reloc->fix_oop_relocation();
2175 } else if (iter.type() == relocInfo::metadata_type) {
2176 metadata_Relocation* reloc = iter.metadata_reloc();
2177 reloc->fix_metadata_relocation();
2178 }
2179 }
2180 }
2181
2182 void nmethod::create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
2183 RelocIterator iter(this);
2184 while (iter.next()) {
2185 if (iter.type() == relocInfo::oop_type) {
2186 oop_Relocation* reloc = iter.oop_reloc();
2187 if (reloc->oop_is_immediate()) {
2188 oop dest = reloc->oop_value();
2189 Handle h(thread, dest);
2190 oop_list.append(h);
2191 }
2192 } else if (iter.type() == relocInfo::metadata_type) {
2193 metadata_Relocation* reloc = iter.metadata_reloc();
2194 if (reloc->metadata_is_immediate()) {
2195 Metadata* m = reloc->metadata_value();
2196 metadata_list.append(m);
2197 }
2198 }
2199 }
2200 }
2201
2202 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
2203 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
2204 intptr_t cbaddr = (intptr_t) nm;
2205 intptr_t offset = ((intptr_t) pc) - cbaddr;
2206
2207 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
2208 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
2209 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
2210 } else if (!nop->patch(oopmap_slot, offset)) {
2211 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
2212 }
2213 }
2214
2215 void nmethod::finalize_relocations() {
2216 NoSafepointVerifier nsv;
2217
2218 GrowableArray<NativeMovConstReg*> virtual_call_data;
2219
2220 // Make sure that post call nops fill in nmethod offsets eagerly so
2221 // we don't have to race with deoptimization
2222 RelocIterator iter(this);
2223 while (iter.next()) {
2224 if (iter.type() == relocInfo::virtual_call_type) {
2225 virtual_call_Relocation* r = iter.virtual_call_reloc();
2226 NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
2227 virtual_call_data.append(value);
2228 } else if (iter.type() == relocInfo::post_call_nop_type) {
2229 post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
2230 address pc = reloc->addr();
2231 install_post_call_nop_displacement(this, pc);
2232 }
2233 }
2234
2235 if (virtual_call_data.length() > 0) {
2236 // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
2237 _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
2238 CompiledICData* next_data = _compiled_ic_data;
2239
2240 for (NativeMovConstReg* value : virtual_call_data) {
2241 value->set_data((intptr_t)next_data);
2242 next_data++;
2243 }
2244 }
2245 }
2246
2247 void nmethod::make_deoptimized() {
2248 if (!Continuations::enabled()) {
2249 // Don't deopt this again.
2250 set_deoptimized_done();
2251 return;
2252 }
2253
2254 assert(method() == nullptr || can_be_deoptimized(), "");
2255
2256 CompiledICLocker ml(this);
2257 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2258
2259 // If post call nops have been already patched, we can just bail-out.
2260 if (has_been_deoptimized()) {
2261 return;
2262 }
2263
2264 ResourceMark rm;
2265 RelocIterator iter(this, oops_reloc_begin());
2266
2267 while (iter.next()) {
2268
2269 switch (iter.type()) {
2270 case relocInfo::virtual_call_type: {
2271 CompiledIC *ic = CompiledIC_at(&iter);
2272 address pc = ic->end_of_call();
2273 NativePostCallNop* nop = nativePostCallNop_at(pc);
2274 if (nop != nullptr) {
2275 nop->make_deopt();
2276 }
2277 assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2278 break;
2279 }
2280 case relocInfo::static_call_type:
2281 case relocInfo::opt_virtual_call_type: {
2282 CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
2283 address pc = csc->end_of_call();
2284 NativePostCallNop* nop = nativePostCallNop_at(pc);
2285 //tty->print_cr(" - static pc %p", pc);
2286 if (nop != nullptr) {
2287 nop->make_deopt();
2288 }
2289 // We can't assert here, there are some calls to stubs / runtime
2290 // that have reloc data and doesn't have a post call NOP.
2291 //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
2292 break;
2293 }
2294 default:
2295 break;
2296 }
2297 }
2298 // Don't deopt this again.
2299 set_deoptimized_done();
2300 }
2301
2302 void nmethod::verify_clean_inline_caches() {
2303 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
2304
2305 ResourceMark rm;
2306 RelocIterator iter(this, oops_reloc_begin());
2307 while(iter.next()) {
2308 switch(iter.type()) {
2309 case relocInfo::virtual_call_type: {
2310 CompiledIC *ic = CompiledIC_at(&iter);
2311 CodeBlob *cb = CodeCache::find_blob(ic->destination());
2312 assert(cb != nullptr, "destination not in CodeBlob?");
2313 nmethod* nm = cb->as_nmethod_or_null();
2314 if (nm != nullptr) {
2315 // Verify that inline caches pointing to bad nmethods are clean
2316 if (!nm->is_in_use() || nm->is_unloading()) {
2317 assert(ic->is_clean(), "IC should be clean");
2318 }
2319 }
2320 break;
2321 }
2322 case relocInfo::static_call_type:
2323 case relocInfo::opt_virtual_call_type: {
2324 CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
2325 CodeBlob *cb = CodeCache::find_blob(cdc->destination());
2326 assert(cb != nullptr, "destination not in CodeBlob?");
2327 nmethod* nm = cb->as_nmethod_or_null();
2328 if (nm != nullptr) {
2329 // Verify that inline caches pointing to bad nmethods are clean
2330 if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
2331 assert(cdc->is_clean(), "IC should be clean");
2332 }
2333 }
2334 break;
2335 }
2336 default:
2337 break;
2338 }
2339 }
2340 }
2341
2342 void nmethod::mark_as_maybe_on_stack() {
2343 AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch());
2344 }
2345
2346 bool nmethod::is_maybe_on_stack() {
2347 // If the condition below is true, it means that the nmethod was found to
2348 // be alive the previous completed marking cycle.
2349 return AtomicAccess::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
2350 }
2351
2352 void nmethod::inc_decompile_count() {
2353 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
2354 // Could be gated by ProfileTraps, but do not bother...
2355 #if INCLUDE_JVMCI
2356 if (jvmci_skip_profile_deopt()) {
2357 return;
2358 }
2359 #endif
2360 Method* m = method();
2361 if (m == nullptr) return;
2362 MethodData* mdo = m->method_data();
2363 if (mdo == nullptr) return;
2364 // There is a benign race here. See comments in methodData.hpp.
2365 mdo->inc_decompile_count();
2366 }
2367
2368 void nmethod::inc_method_profiling_count() {
2369 AtomicAccess::inc(&_method_profiling_count);
2370 }
2371
2372 uint64_t nmethod::method_profiling_count() {
2373 return _method_profiling_count;
2374 }
2375
2376 bool nmethod::try_transition(signed char new_state_int) {
2377 signed char new_state = new_state_int;
2378 assert_lock_strong(NMethodState_lock);
2379 signed char old_state = _state;
2380 if (old_state >= new_state) {
2381 // Ensure monotonicity of transitions.
2382 return false;
2383 }
2384 AtomicAccess::store(&_state, new_state);
2385 return true;
2386 }
2387
2388 void nmethod::invalidate_osr_method() {
2389 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
2390 // Remove from list of active nmethods
2391 if (method() != nullptr) {
2392 method()->method_holder()->remove_osr_nmethod(this);
2393 }
2394 }
2395
2396 void nmethod::log_state_change(InvalidationReason invalidation_reason) const {
2397 if (LogCompilation) {
2398 if (xtty != nullptr) {
2399 ttyLocker ttyl; // keep the following output all in one block
2400 xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
2401 os::current_thread_id(), invalidation_reason_to_string(invalidation_reason));
2402 log_identity(xtty);
2403 xtty->stamp();
2404 xtty->end_elem();
2405 }
2406 }
2407
2408 ResourceMark rm;
2409 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
2410 ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason));
2411
2412 CompileTask::print_ul(this, ss.freeze());
2413 if (PrintCompilation) {
2414 print_on_with_msg(tty, ss.freeze());
2415 }
2416 }
2417
2418 void nmethod::unlink_from_method() {
2419 if (method() != nullptr) {
2420 method()->unlink_code(this);
2421 }
2422 }
2423
2424 // Invalidate code
2425 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry) {
2426 // This can be called while the system is already at a safepoint which is ok
2427 NoSafepointVerifier nsv;
2428
2429 if (is_unloading()) {
2430 // If the nmethod is unloading, then it is already not entrant through
2431 // the nmethod entry barriers. No need to do anything; GC will unload it.
2432 return false;
2433 }
2434
2435 if (AtomicAccess::load(&_state) == not_entrant) {
2436 // Avoid taking the lock if already in required state.
2437 // This is safe from races because the state is an end-state,
2438 // which the nmethod cannot back out of once entered.
2439 // No need for fencing either.
2440 return false;
2441 }
2442
2443 {
2444 // Enter critical section. Does not block for safepoint.
2445 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2446
2447 if (AtomicAccess::load(&_state) == not_entrant) {
2448 // another thread already performed this transition so nothing
2449 // to do, but return false to indicate this.
2450 return false;
2451 }
2452
2453 if (is_osr_method()) {
2454 // This logic is equivalent to the logic below for patching the
2455 // verified entry point of regular methods.
2456 // this effectively makes the osr nmethod not entrant
2457 invalidate_osr_method();
2458 } else {
2459 // The caller can be calling the method statically or through an inline
2460 // cache call.
2461 BarrierSet::barrier_set()->barrier_set_nmethod()->make_not_entrant(this);
2462 }
2463
2464 if (update_recompile_counts()) {
2465 // Mark the method as decompiled.
2466 inc_decompile_count();
2467 }
2468
2469 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2470 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2471 // If nmethod entry barriers are not supported, we won't mark
2472 // nmethods as on-stack when they become on-stack. So we
2473 // degrade to a less accurate flushing strategy, for now.
2474 mark_as_maybe_on_stack();
2475 }
2476
2477 // Change state
2478 bool success = try_transition(not_entrant);
2479 assert(success, "Transition can't fail");
2480
2481 // Log the transition once
2482 log_state_change(invalidation_reason);
2483
2484 // Remove nmethod from method.
2485 unlink_from_method();
2486
2487 if (!keep_aot_entry) {
2488 // Keep AOT code if it was simply replaced
2489 // otherwise make it not entrant too.
2490 AOTCodeCache::invalidate(_aot_code_entry);
2491 }
2492
2493 CompileBroker::log_not_entrant(this);
2494 } // leave critical region under NMethodState_lock
2495
2496 #if INCLUDE_JVMCI
2497 // Invalidate can't occur while holding the NMethodState_lock
2498 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2499 if (nmethod_data != nullptr) {
2500 nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason);
2501 }
2502 #endif
2503
2504 #ifdef ASSERT
2505 if (is_osr_method() && method() != nullptr) {
2506 // Make sure osr nmethod is invalidated, i.e. not on the list
2507 bool found = method()->method_holder()->remove_osr_nmethod(this);
2508 assert(!found, "osr nmethod should have been invalidated");
2509 }
2510 #endif
2511
2512 return true;
2513 }
2514
2515 // For concurrent GCs, there must be a handshake between unlink and flush
2516 void nmethod::unlink() {
2517 if (is_unlinked()) {
2518 // Already unlinked.
2519 return;
2520 }
2521
2522 flush_dependencies();
2523
2524 // unlink_from_method will take the NMethodState_lock.
2525 // In this case we don't strictly need it when unlinking nmethods from
2526 // the Method, because it is only concurrently unlinked by
2527 // the entry barrier, which acquires the per nmethod lock.
2528 unlink_from_method();
2529
2530 if (is_osr_method()) {
2531 invalidate_osr_method();
2532 }
2533
2534 #if INCLUDE_JVMCI
2535 // Clear the link between this nmethod and a HotSpotNmethod mirror
2536 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2537 if (nmethod_data != nullptr) {
2538 nmethod_data->invalidate_nmethod_mirror(this, is_cold() ?
2539 nmethod::InvalidationReason::UNLOADING_COLD :
2540 nmethod::InvalidationReason::UNLOADING);
2541 }
2542 #endif
2543
2544 // Post before flushing as jmethodID is being used
2545 post_compiled_method_unload();
2546
2547 // Register for flushing when it is safe. For concurrent class unloading,
2548 // that would be after the unloading handshake, and for STW class unloading
2549 // that would be when getting back to the VM thread.
2550 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2551 }
2552
2553 void nmethod::purge(bool unregister_nmethod) {
2554
2555 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2556
2557 // completely deallocate this method
2558 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, compile_kind(), p2i(this));
2559
2560 LogTarget(Debug, codecache) lt;
2561 if (lt.is_enabled()) {
2562 ResourceMark rm;
2563 LogStream ls(lt);
2564 const char* method_name = method()->name()->as_C_string();
2565 const size_t codecache_capacity = CodeCache::capacity()/1024;
2566 const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024;
2567 ls.print("Flushing %s nmethod %6d/" INTPTR_FORMAT ", level=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". "
2568 "Cache capacity: %zuKb, free space: %zuKb. method %s (%s)",
2569 compile_kind(), _compile_id, p2i(this), _comp_level, is_cold(), _gc_epoch, CodeCache::cold_gc_count(),
2570 codecache_capacity, codecache_free_space, method_name, compiler_name());
2571 }
2572
2573 // We need to deallocate any ExceptionCache data.
2574 // Note that we do not need to grab the nmethod lock for this, it
2575 // better be thread safe if we're disposing of it!
2576 ExceptionCache* ec = exception_cache();
2577 while(ec != nullptr) {
2578 ExceptionCache* next = ec->next();
2579 delete ec;
2580 ec = next;
2581 }
2582 if (_pc_desc_container != nullptr) {
2583 delete _pc_desc_container;
2584 }
2585 if (_compiled_ic_data != nullptr) {
2586 delete[] _compiled_ic_data;
2587 }
2588
2589 if (_immutable_data != blob_end() && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) {
2590 int reference_count = get_immutable_data_references_counter();
2591 assert(reference_count > 0, "immutable data has no references");
2592
2593 set_immutable_data_references_counter(reference_count - 1);
2594 // Free memory if this is the last nmethod referencing immutable data
2595 if (reference_count == 0) {
2596 os::free(_immutable_data);
2597 }
2598
2599 _immutable_data = blob_end(); // Valid not null address
2600 }
2601
2602 if (unregister_nmethod) {
2603 Universe::heap()->unregister_nmethod(this);
2604 }
2605 CodeCache::unregister_old_nmethod(this);
2606
2607 JVMCI_ONLY( _metadata_size = 0; )
2608 CodeBlob::purge();
2609 }
2610
2611 oop nmethod::oop_at(int index) const {
2612 if (index == 0) {
2613 return nullptr;
2614 }
2615
2616 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2617 return bs_nm->oop_load_no_keepalive(this, index);
2618 }
2619
2620 oop nmethod::oop_at_phantom(int index) const {
2621 if (index == 0) {
2622 return nullptr;
2623 }
2624
2625 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2626 return bs_nm->oop_load_phantom(this, index);
2627 }
2628
2629 //
2630 // Notify all classes this nmethod is dependent on that it is no
2631 // longer dependent.
2632
2633 void nmethod::flush_dependencies() {
2634 if (!has_flushed_dependencies()) {
2635 set_has_flushed_dependencies(true);
2636 for (Dependencies::DepStream deps(this); deps.next(); ) {
2637 if (deps.type() == Dependencies::call_site_target_value) {
2638 // CallSite dependencies are managed on per-CallSite instance basis.
2639 oop call_site = deps.argument_oop(0);
2640 MethodHandles::clean_dependency_context(call_site);
2641 } else {
2642 InstanceKlass* ik = deps.context_type();
2643 if (ik == nullptr) {
2644 continue; // ignore things like evol_method
2645 }
2646 // During GC liveness of dependee determines class that needs to be updated.
2647 // The GC may clean dependency contexts concurrently and in parallel.
2648 ik->clean_dependency_context();
2649 }
2650 }
2651 }
2652 }
2653
2654 void nmethod::post_compiled_method(CompileTask* task) {
2655 task->mark_success();
2656 task->set_nm_content_size(content_size());
2657 task->set_nm_insts_size(insts_size());
2658 task->set_nm_total_size(total_size());
2659
2660 // task->is_aot_load() is true only for loaded AOT code.
2661 // nmethod::_aot_code_entry is set for loaded and stored AOT code
2662 // to invalidate the entry when nmethod is deoptimized.
2663 // VerifyAOTCode is option to not store in archive AOT code.
2664 guarantee((_aot_code_entry != nullptr) || !task->is_aot_load() || VerifyAOTCode, "sanity");
2665
2666 // JVMTI -- compiled method notification (must be done outside lock)
2667 post_compiled_method_load_event();
2668
2669 if (CompilationLog::log() != nullptr) {
2670 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2671 }
2672
2673 const DirectiveSet* directive = task->directive();
2674 maybe_print_nmethod(directive);
2675 }
2676
2677 // ------------------------------------------------------------------
2678 // post_compiled_method_load_event
2679 // new method for install_code() path
2680 // Transfer information from compilation to jvmti
2681 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2682 // This is a bad time for a safepoint. We don't want
2683 // this nmethod to get unloaded while we're queueing the event.
2684 NoSafepointVerifier nsv;
2685
2686 Method* m = method();
2687 HOTSPOT_COMPILED_METHOD_LOAD(
2688 (char *) m->klass_name()->bytes(),
2689 m->klass_name()->utf8_length(),
2690 (char *) m->name()->bytes(),
2691 m->name()->utf8_length(),
2692 (char *) m->signature()->bytes(),
2693 m->signature()->utf8_length(),
2694 insts_begin(), insts_size());
2695
2696
2697 if (JvmtiExport::should_post_compiled_method_load()) {
2698 // Only post unload events if load events are found.
2699 set_load_reported();
2700 // If a JavaThread hasn't been passed in, let the Service thread
2701 // (which is a real Java thread) post the event
2702 JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2703 if (state == nullptr) {
2704 // Execute any barrier code for this nmethod as if it's called, since
2705 // keeping it alive looks like stack walking.
2706 run_nmethod_entry_barrier();
2707 ServiceThread::enqueue_deferred_event(&event);
2708 } else {
2709 // This enters the nmethod barrier outside in the caller.
2710 state->enqueue_event(&event);
2711 }
2712 }
2713 }
2714
2715 void nmethod::post_compiled_method_unload() {
2716 assert(_method != nullptr, "just checking");
2717 DTRACE_METHOD_UNLOAD_PROBE(method());
2718
2719 // If a JVMTI agent has enabled the CompiledMethodUnload event then
2720 // post the event. The Method* will not be valid when this is freed.
2721
2722 // Don't bother posting the unload if the load event wasn't posted.
2723 if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2724 JvmtiDeferredEvent event =
2725 JvmtiDeferredEvent::compiled_method_unload_event(
2726 method()->jmethod_id(), insts_begin());
2727 ServiceThread::enqueue_deferred_event(&event);
2728 }
2729 }
2730
2731 // Iterate over metadata calling this function. Used by RedefineClasses
2732 void nmethod::metadata_do(MetadataClosure* f) {
2733 {
2734 // Visit all immediate references that are embedded in the instruction stream.
2735 RelocIterator iter(this, oops_reloc_begin());
2736 while (iter.next()) {
2737 if (iter.type() == relocInfo::metadata_type) {
2738 metadata_Relocation* r = iter.metadata_reloc();
2739 // In this metadata, we must only follow those metadatas directly embedded in
2740 // the code. Other metadatas (oop_index>0) are seen as part of
2741 // the metadata section below.
2742 assert(1 == (r->metadata_is_immediate()) +
2743 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2744 "metadata must be found in exactly one place");
2745 if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2746 Metadata* md = r->metadata_value();
2747 if (md != _method) f->do_metadata(md);
2748 }
2749 } else if (iter.type() == relocInfo::virtual_call_type) {
2750 // Check compiledIC holders associated with this nmethod
2751 ResourceMark rm;
2752 CompiledIC *ic = CompiledIC_at(&iter);
2753 ic->metadata_do(f);
2754 }
2755 }
2756 }
2757
2758 // Visit the metadata section
2759 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2760 if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops
2761 Metadata* md = *p;
2762 f->do_metadata(md);
2763 }
2764
2765 // Visit metadata not embedded in the other places.
2766 if (_method != nullptr) f->do_metadata(_method);
2767 }
2768
2769 // Heuristic for nuking nmethods even though their oops are live.
2770 // Main purpose is to reduce code cache pressure and get rid of
2771 // nmethods that don't seem to be all that relevant any longer.
2772 bool nmethod::is_cold() {
2773 if (!MethodFlushing || is_native_method() || is_not_installed()) {
2774 // No heuristic unloading at all
2775 return false;
2776 }
2777
2778 if (!is_maybe_on_stack() && is_not_entrant()) {
2779 // Not entrant nmethods that are not on any stack can just
2780 // be removed
2781 return true;
2782 }
2783
2784 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2785 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2786 // On platforms that don't support nmethod entry barriers, we can't
2787 // trust the temporal aspect of the gc epochs. So we can't detect
2788 // cold nmethods on such platforms.
2789 return false;
2790 }
2791
2792 if (!UseCodeCacheFlushing) {
2793 // Bail out if we don't heuristically remove nmethods
2794 return false;
2795 }
2796
2797 // Other code can be phased out more gradually after N GCs
2798 return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2799 }
2800
2801 // The _is_unloading_state encodes a tuple comprising the unloading cycle
2802 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2803 // This is the bit layout of the _is_unloading_state byte: 00000CCU
2804 // CC refers to the cycle, which has 2 bits, and U refers to the result of
2805 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2806
2807 class IsUnloadingState: public AllStatic {
2808 static const uint8_t _is_unloading_mask = 1;
2809 static const uint8_t _is_unloading_shift = 0;
2810 static const uint8_t _unloading_cycle_mask = 6;
2811 static const uint8_t _unloading_cycle_shift = 1;
2812
2813 static uint8_t set_is_unloading(uint8_t state, bool value) {
2814 state &= (uint8_t)~_is_unloading_mask;
2815 if (value) {
2816 state |= 1 << _is_unloading_shift;
2817 }
2818 assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2819 return state;
2820 }
2821
2822 static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2823 state &= (uint8_t)~_unloading_cycle_mask;
2824 state |= (uint8_t)(value << _unloading_cycle_shift);
2825 assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2826 return state;
2827 }
2828
2829 public:
2830 static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2831 static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2832
2833 static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2834 uint8_t state = 0;
2835 state = set_is_unloading(state, is_unloading);
2836 state = set_unloading_cycle(state, unloading_cycle);
2837 return state;
2838 }
2839 };
2840
2841 bool nmethod::is_unloading() {
2842 uint8_t state = AtomicAccess::load(&_is_unloading_state);
2843 bool state_is_unloading = IsUnloadingState::is_unloading(state);
2844 if (state_is_unloading) {
2845 return true;
2846 }
2847 uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2848 uint8_t current_cycle = CodeCache::unloading_cycle();
2849 if (state_unloading_cycle == current_cycle) {
2850 return false;
2851 }
2852
2853 // The IsUnloadingBehaviour is responsible for calculating if the nmethod
2854 // should be unloaded. This can be either because there is a dead oop,
2855 // or because is_cold() heuristically determines it is time to unload.
2856 state_unloading_cycle = current_cycle;
2857 state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2858 uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2859
2860 // Note that if an nmethod has dead oops, everyone will agree that the
2861 // nmethod is_unloading. However, the is_cold heuristics can yield
2862 // different outcomes, so we guard the computed result with a CAS
2863 // to ensure all threads have a shared view of whether an nmethod
2864 // is_unloading or not.
2865 uint8_t found_state = AtomicAccess::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2866
2867 if (found_state == state) {
2868 // First to change state, we win
2869 return state_is_unloading;
2870 } else {
2871 // State already set, so use it
2872 return IsUnloadingState::is_unloading(found_state);
2873 }
2874 }
2875
2876 void nmethod::clear_unloading_state() {
2877 uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2878 AtomicAccess::store(&_is_unloading_state, state);
2879 }
2880
2881
2882 // This is called at the end of the strong tracing/marking phase of a
2883 // GC to unload an nmethod if it contains otherwise unreachable
2884 // oops or is heuristically found to be not important.
2885 void nmethod::do_unloading(bool unloading_occurred) {
2886 // Make sure the oop's ready to receive visitors
2887 if (is_unloading()) {
2888 unlink();
2889 } else {
2890 unload_nmethod_caches(unloading_occurred);
2891 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2892 if (bs_nm != nullptr) {
2893 bs_nm->disarm(this);
2894 }
2895 }
2896 }
2897
2898 void nmethod::oops_do(OopClosure* f) {
2899 // Prevent extra code cache walk for platforms that don't have immediate oops.
2900 if (relocInfo::mustIterateImmediateOopsInCode()) {
2901 RelocIterator iter(this, oops_reloc_begin());
2902
2903 while (iter.next()) {
2904 if (iter.type() == relocInfo::oop_type ) {
2905 oop_Relocation* r = iter.oop_reloc();
2906 // In this loop, we must only follow those oops directly embedded in
2907 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
2908 assert(1 == (r->oop_is_immediate()) +
2909 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2910 "oop must be found in exactly one place");
2911 if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2912 f->do_oop(r->oop_addr());
2913 }
2914 }
2915 }
2916 }
2917
2918 // Scopes
2919 // This includes oop constants not inlined in the code stream.
2920 for (oop* p = oops_begin(); p < oops_end(); p++) {
2921 if (*p == Universe::non_oop_word()) continue; // skip non-oops
2922 f->do_oop(p);
2923 }
2924 }
2925
2926 void nmethod::follow_nmethod(OopIterateClosure* cl) {
2927 // Process oops in the nmethod
2928 oops_do(cl);
2929
2930 // CodeCache unloading support
2931 mark_as_maybe_on_stack();
2932
2933 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2934 bs_nm->disarm(this);
2935
2936 // There's an assumption made that this function is not used by GCs that
2937 // relocate objects, and therefore we don't call fix_oop_relocations.
2938 }
2939
2940 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2941
2942 void nmethod::oops_do_log_change(const char* state) {
2943 LogTarget(Trace, gc, nmethod) lt;
2944 if (lt.is_enabled()) {
2945 LogStream ls(lt);
2946 CompileTask::print(&ls, this, state, true /* short_form */);
2947 }
2948 }
2949
2950 bool nmethod::oops_do_try_claim() {
2951 if (oops_do_try_claim_weak_request()) {
2952 nmethod* result = oops_do_try_add_to_list_as_weak_done();
2953 assert(result == nullptr, "adding to global list as weak done must always succeed.");
2954 return true;
2955 }
2956 return false;
2957 }
2958
2959 bool nmethod::oops_do_try_claim_weak_request() {
2960 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2961
2962 if ((_oops_do_mark_link == nullptr) &&
2963 (AtomicAccess::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
2964 oops_do_log_change("oops_do, mark weak request");
2965 return true;
2966 }
2967 return false;
2968 }
2969
2970 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
2971 _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
2972 }
2973
2974 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
2975 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2976
2977 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
2978 if (old_next == nullptr) {
2979 oops_do_log_change("oops_do, mark strong done");
2980 }
2981 return old_next;
2982 }
2983
2984 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
2985 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2986 assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
2987
2988 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
2989 if (old_next == next) {
2990 oops_do_log_change("oops_do, mark strong request");
2991 }
2992 return old_next;
2993 }
2994
2995 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
2996 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2997 assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
2998
2999 oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
3000 if (old_next == next) {
3001 oops_do_log_change("oops_do, mark weak done -> mark strong done");
3002 return true;
3003 }
3004 return false;
3005 }
3006
3007 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
3008 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3009
3010 assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
3011 extract_state(_oops_do_mark_link) == claim_strong_request_tag,
3012 "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
3013
3014 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
3015 // Self-loop if needed.
3016 if (old_head == nullptr) {
3017 old_head = this;
3018 }
3019 // Try to install end of list and weak done tag.
3020 if (AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
3021 oops_do_log_change("oops_do, mark weak done");
3022 return nullptr;
3023 } else {
3024 return old_head;
3025 }
3026 }
3027
3028 void nmethod::oops_do_add_to_list_as_strong_done() {
3029 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
3030
3031 nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
3032 // Self-loop if needed.
3033 if (old_head == nullptr) {
3034 old_head = this;
3035 }
3036 assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
3037 p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
3038
3039 oops_do_set_strong_done(old_head);
3040 }
3041
3042 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
3043 if (!oops_do_try_claim_weak_request()) {
3044 // Failed to claim for weak processing.
3045 oops_do_log_change("oops_do, mark weak request fail");
3046 return;
3047 }
3048
3049 p->do_regular_processing(this);
3050
3051 nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
3052 if (old_head == nullptr) {
3053 return;
3054 }
3055 oops_do_log_change("oops_do, mark weak done fail");
3056 // Adding to global list failed, another thread added a strong request.
3057 assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
3058 "must be but is %u", extract_state(_oops_do_mark_link));
3059
3060 oops_do_log_change("oops_do, mark weak request -> mark strong done");
3061
3062 oops_do_set_strong_done(old_head);
3063 // Do missing strong processing.
3064 p->do_remaining_strong_processing(this);
3065 }
3066
3067 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
3068 oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
3069 if (next_raw == nullptr) {
3070 p->do_regular_processing(this);
3071 oops_do_add_to_list_as_strong_done();
3072 return;
3073 }
3074 // Claim failed. Figure out why and handle it.
3075 if (oops_do_has_weak_request(next_raw)) {
3076 oops_do_mark_link* old = next_raw;
3077 // Claim failed because being weak processed (state == "weak request").
3078 // Try to request deferred strong processing.
3079 next_raw = oops_do_try_add_strong_request(old);
3080 if (next_raw == old) {
3081 // Successfully requested deferred strong processing.
3082 return;
3083 }
3084 // Failed because of a concurrent transition. No longer in "weak request" state.
3085 }
3086 if (oops_do_has_any_strong_state(next_raw)) {
3087 // Already claimed for strong processing or requested for such.
3088 return;
3089 }
3090 if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
3091 // Successfully claimed "weak done" as "strong done". Do the missing marking.
3092 p->do_remaining_strong_processing(this);
3093 return;
3094 }
3095 // Claim failed, some other thread got it.
3096 }
3097
3098 void nmethod::oops_do_marking_prologue() {
3099 assert_at_safepoint();
3100
3101 log_trace(gc, nmethod)("oops_do_marking_prologue");
3102 assert(_oops_do_mark_nmethods == nullptr, "must be empty");
3103 }
3104
3105 void nmethod::oops_do_marking_epilogue() {
3106 assert_at_safepoint();
3107
3108 nmethod* next = _oops_do_mark_nmethods;
3109 _oops_do_mark_nmethods = nullptr;
3110 if (next != nullptr) {
3111 nmethod* cur;
3112 do {
3113 cur = next;
3114 next = extract_nmethod(cur->_oops_do_mark_link);
3115 cur->_oops_do_mark_link = nullptr;
3116 DEBUG_ONLY(cur->verify_oop_relocations());
3117
3118 LogTarget(Trace, gc, nmethod) lt;
3119 if (lt.is_enabled()) {
3120 LogStream ls(lt);
3121 CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
3122 }
3123 // End if self-loop has been detected.
3124 } while (cur != next);
3125 }
3126 log_trace(gc, nmethod)("oops_do_marking_epilogue");
3127 }
3128
3129 inline bool includes(void* p, void* from, void* to) {
3130 return from <= p && p < to;
3131 }
3132
3133
3134 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
3135 assert(count >= 2, "must be sentinel values, at least");
3136
3137 #ifdef ASSERT
3138 // must be sorted and unique; we do a binary search in find_pc_desc()
3139 int prev_offset = pcs[0].pc_offset();
3140 assert(prev_offset == PcDesc::lower_offset_limit,
3141 "must start with a sentinel");
3142 for (int i = 1; i < count; i++) {
3143 int this_offset = pcs[i].pc_offset();
3144 assert(this_offset > prev_offset, "offsets must be sorted");
3145 prev_offset = this_offset;
3146 }
3147 assert(prev_offset == PcDesc::upper_offset_limit,
3148 "must end with a sentinel");
3149 #endif //ASSERT
3150
3151 int size = count * sizeof(PcDesc);
3152 assert(scopes_pcs_size() >= size, "oob");
3153 memcpy(scopes_pcs_begin(), pcs, size);
3154
3155 // Adjust the final sentinel downward.
3156 PcDesc* last_pc = &scopes_pcs_begin()[count-1];
3157 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
3158 last_pc->set_pc_offset(content_size() + 1);
3159 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
3160 // Fill any rounding gaps with copies of the last record.
3161 last_pc[1] = last_pc[0];
3162 }
3163 // The following assert could fail if sizeof(PcDesc) is not
3164 // an integral multiple of oopSize (the rounding term).
3165 // If it fails, change the logic to always allocate a multiple
3166 // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
3167 assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
3168 }
3169
3170 void nmethod::copy_scopes_data(u_char* buffer, int size) {
3171 assert(scopes_data_size() >= size, "oob");
3172 memcpy(scopes_data_begin(), buffer, size);
3173 }
3174
3175 #ifdef ASSERT
3176 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
3177 PcDesc* res = nullptr;
3178 assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
3179 "must start with a sentinel");
3180 // lower + 1 to exclude initial sentinel
3181 for (PcDesc* p = lower + 1; p < upper; p++) {
3182 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
3183 if (match_desc(p, pc_offset, approximate)) {
3184 if (res == nullptr) {
3185 res = p;
3186 } else {
3187 res = (PcDesc*) badAddress;
3188 }
3189 }
3190 }
3191 return res;
3192 }
3193 #endif
3194
3195
3196 #ifndef PRODUCT
3197 // Version of method to collect statistic
3198 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
3199 PcDesc* lower, PcDesc* upper) {
3200 ++pc_nmethod_stats.pc_desc_queries;
3201 if (approximate) ++pc_nmethod_stats.pc_desc_approx;
3202
3203 PcDesc* desc = _pc_desc_cache.last_pc_desc();
3204 assert(desc != nullptr, "PcDesc cache should be initialized already");
3205 if (desc->pc_offset() == (pc - code_begin)) {
3206 // Cached value matched
3207 ++pc_nmethod_stats.pc_desc_tests;
3208 ++pc_nmethod_stats.pc_desc_repeats;
3209 return desc;
3210 }
3211 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
3212 }
3213 #endif
3214
3215 // Finds a PcDesc with real-pc equal to "pc"
3216 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
3217 PcDesc* lower_incl, PcDesc* upper_incl) {
3218 if ((pc < code_begin) ||
3219 (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
3220 return nullptr; // PC is wildly out of range
3221 }
3222 int pc_offset = (int) (pc - code_begin);
3223
3224 // Check the PcDesc cache if it contains the desired PcDesc
3225 // (This as an almost 100% hit rate.)
3226 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
3227 if (res != nullptr) {
3228 assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
3229 return res;
3230 }
3231
3232 // Fallback algorithm: quasi-linear search for the PcDesc
3233 // Find the last pc_offset less than the given offset.
3234 // The successor must be the required match, if there is a match at all.
3235 // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
3236 PcDesc* lower = lower_incl; // this is initial sentinel
3237 PcDesc* upper = upper_incl - 1; // exclude final sentinel
3238 if (lower >= upper) return nullptr; // no PcDescs at all
3239
3240 #define assert_LU_OK \
3241 /* invariant on lower..upper during the following search: */ \
3242 assert(lower->pc_offset() < pc_offset, "sanity"); \
3243 assert(upper->pc_offset() >= pc_offset, "sanity")
3244 assert_LU_OK;
3245
3246 // Use the last successful return as a split point.
3247 PcDesc* mid = _pc_desc_cache.last_pc_desc();
3248 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3249 if (mid->pc_offset() < pc_offset) {
3250 lower = mid;
3251 } else {
3252 upper = mid;
3253 }
3254
3255 // Take giant steps at first (4096, then 256, then 16, then 1)
3256 const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
3257 const int RADIX = (1 << LOG2_RADIX);
3258 for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
3259 while ((mid = lower + step) < upper) {
3260 assert_LU_OK;
3261 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3262 if (mid->pc_offset() < pc_offset) {
3263 lower = mid;
3264 } else {
3265 upper = mid;
3266 break;
3267 }
3268 }
3269 assert_LU_OK;
3270 }
3271
3272 // Sneak up on the value with a linear search of length ~16.
3273 while (true) {
3274 assert_LU_OK;
3275 mid = lower + 1;
3276 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
3277 if (mid->pc_offset() < pc_offset) {
3278 lower = mid;
3279 } else {
3280 upper = mid;
3281 break;
3282 }
3283 }
3284 #undef assert_LU_OK
3285
3286 if (match_desc(upper, pc_offset, approximate)) {
3287 assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3288 if (!Thread::current_in_asgct()) {
3289 // we don't want to modify the cache if we're in ASGCT
3290 // which is typically called in a signal handler
3291 _pc_desc_cache.add_pc_desc(upper);
3292 }
3293 return upper;
3294 } else {
3295 assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
3296 return nullptr;
3297 }
3298 }
3299
3300 bool nmethod::check_dependency_on(DepChange& changes) {
3301 // What has happened:
3302 // 1) a new class dependee has been added
3303 // 2) dependee and all its super classes have been marked
3304 bool found_check = false; // set true if we are upset
3305 for (Dependencies::DepStream deps(this); deps.next(); ) {
3306 // Evaluate only relevant dependencies.
3307 if (deps.spot_check_dependency_at(changes) != nullptr) {
3308 found_check = true;
3309 NOT_DEBUG(break);
3310 }
3311 }
3312 return found_check;
3313 }
3314
3315 // Called from mark_for_deoptimization, when dependee is invalidated.
3316 bool nmethod::is_dependent_on_method(Method* dependee) {
3317 for (Dependencies::DepStream deps(this); deps.next(); ) {
3318 if (deps.type() != Dependencies::evol_method)
3319 continue;
3320 Method* method = deps.method_argument(0);
3321 if (method == dependee) return true;
3322 }
3323 return false;
3324 }
3325
3326 void nmethod_init() {
3327 // make sure you didn't forget to adjust the filler fields
3328 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
3329 }
3330
3331 // -----------------------------------------------------------------------------
3332 // Verification
3333
3334 class VerifyOopsClosure: public OopClosure {
3335 nmethod* _nm;
3336 bool _ok;
3337 public:
3338 VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
3339 bool ok() { return _ok; }
3340 virtual void do_oop(oop* p) {
3341 if (oopDesc::is_oop_or_null(*p)) return;
3342 // Print diagnostic information before calling print_nmethod().
3343 // Assertions therein might prevent call from returning.
3344 tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
3345 p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
3346 if (_ok) {
3347 _nm->print_nmethod(true);
3348 _ok = false;
3349 }
3350 }
3351 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3352 };
3353
3354 class VerifyMetadataClosure: public MetadataClosure {
3355 public:
3356 void do_metadata(Metadata* md) {
3357 if (md->is_method()) {
3358 Method* method = (Method*)md;
3359 assert(!method->is_old(), "Should not be installing old methods");
3360 }
3361 }
3362 };
3363
3364
3365 void nmethod::verify() {
3366 if (is_not_entrant())
3367 return;
3368
3369 // assert(oopDesc::is_oop(method()), "must be valid");
3370
3371 ResourceMark rm;
3372
3373 if (!CodeCache::contains(this)) {
3374 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
3375 }
3376
3377 if(is_native_method() )
3378 return;
3379
3380 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
3381 if (nm != this) {
3382 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
3383 }
3384
3385 // Verification can triggered during shutdown after AOTCodeCache is closed.
3386 // If the Scopes data is in the AOT code cache, then we should avoid verification during shutdown.
3387 if (!is_aot() || AOTCodeCache::is_on()) {
3388 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3389 if (! p->verify(this)) {
3390 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
3391 }
3392 }
3393
3394 #ifdef ASSERT
3395 #if INCLUDE_JVMCI
3396 {
3397 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
3398 ImmutableOopMapSet* oms = oop_maps();
3399 ImplicitExceptionTable implicit_table(this);
3400 for (uint i = 0; i < implicit_table.len(); i++) {
3401 int exec_offset = (int) implicit_table.get_exec_offset(i);
3402 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
3403 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
3404 bool found = false;
3405 for (int i = 0, imax = oms->count(); i < imax; i++) {
3406 if (oms->pair_at(i)->pc_offset() == exec_offset) {
3407 found = true;
3408 break;
3409 }
3410 }
3411 assert(found, "missing oopmap");
3412 }
3413 }
3414 }
3415 #endif
3416 #endif
3417 }
3418
3419 VerifyOopsClosure voc(this);
3420 oops_do(&voc);
3421 assert(voc.ok(), "embedded oops must be OK");
3422 Universe::heap()->verify_nmethod(this);
3423
3424 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
3425 nm->method()->external_name(), p2i(_oops_do_mark_link));
3426 if (!is_aot() || AOTCodeCache::is_on()) {
3427 verify_scopes();
3428 }
3429
3430 CompiledICLocker nm_verify(this);
3431 VerifyMetadataClosure vmc;
3432 metadata_do(&vmc);
3433 }
3434
3435
3436 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3437
3438 // Verify IC only when nmethod installation is finished.
3439 if (!is_not_installed()) {
3440 if (CompiledICLocker::is_safe(this)) {
3441 if (is_inline_cache) {
3442 CompiledIC_at(this, call_site);
3443 } else {
3444 CompiledDirectCall::at(call_site);
3445 }
3446 } else {
3447 CompiledICLocker ml_verify(this);
3448 if (is_inline_cache) {
3449 CompiledIC_at(this, call_site);
3450 } else {
3451 CompiledDirectCall::at(call_site);
3452 }
3453 }
3454 }
3455
3456 HandleMark hm(Thread::current());
3457
3458 PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
3459 assert(pd != nullptr, "PcDesc must exist");
3460 for (ScopeDesc* sd = new ScopeDesc(this, pd);
3461 !sd->is_top(); sd = sd->sender()) {
3462 sd->verify();
3463 }
3464 }
3465
3466 void nmethod::verify_scopes() {
3467 if( !method() ) return; // Runtime stubs have no scope
3468 if (method()->is_native()) return; // Ignore stub methods.
3469 // iterate through all interrupt point
3470 // and verify the debug information is valid.
3471 RelocIterator iter(this);
3472 while (iter.next()) {
3473 address stub = nullptr;
3474 switch (iter.type()) {
3475 case relocInfo::virtual_call_type:
3476 verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3477 break;
3478 case relocInfo::opt_virtual_call_type:
3479 stub = iter.opt_virtual_call_reloc()->static_stub();
3480 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3481 break;
3482 case relocInfo::static_call_type:
3483 stub = iter.static_call_reloc()->static_stub();
3484 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3485 break;
3486 case relocInfo::runtime_call_type:
3487 case relocInfo::runtime_call_w_cp_type: {
3488 address destination = iter.reloc()->value();
3489 // Right now there is no way to find out which entries support
3490 // an interrupt point. It would be nice if we had this
3491 // information in a table.
3492 break;
3493 }
3494 default:
3495 break;
3496 }
3497 assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3498 }
3499 }
3500
3501
3502 // -----------------------------------------------------------------------------
3503 // Printing operations
3504
3505 void nmethod::print_on_impl(outputStream* st) const {
3506 ResourceMark rm;
3507
3508 st->print("Compiled method ");
3509
3510 if (is_compiled_by_c1()) {
3511 st->print("(c1) ");
3512 } else if (is_compiled_by_c2()) {
3513 st->print("(c2) ");
3514 } else if (is_compiled_by_jvmci()) {
3515 st->print("(JVMCI) ");
3516 } else {
3517 st->print("(n/a) ");
3518 }
3519
3520 print_on_with_msg(st, nullptr);
3521
3522 if (WizardMode) {
3523 st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3524 st->print(" for method " INTPTR_FORMAT , p2i(method()));
3525 st->print(" { ");
3526 st->print_cr("%s ", state());
3527 st->print_cr("}:");
3528 }
3529 if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3530 p2i(this),
3531 p2i(this) + size(),
3532 size());
3533 if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3534 p2i(consts_begin()),
3535 p2i(consts_end()),
3536 consts_size());
3537 if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3538 p2i(insts_begin()),
3539 p2i(insts_end()),
3540 insts_size());
3541 if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3542 p2i(stub_begin()),
3543 p2i(stub_end()),
3544 stub_size());
3545 if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3546 p2i(oops_begin()),
3547 p2i(oops_end()),
3548 oops_size());
3549 if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3550 p2i(mutable_data_begin()),
3551 p2i(mutable_data_end()),
3552 mutable_data_size());
3553 if (relocation_size() > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3554 p2i(relocation_begin()),
3555 p2i(relocation_end()),
3556 relocation_size());
3557 if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3558 p2i(metadata_begin()),
3559 p2i(metadata_end()),
3560 metadata_size());
3561 #if INCLUDE_JVMCI
3562 if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3563 p2i(jvmci_data_begin()),
3564 p2i(jvmci_data_end()),
3565 jvmci_data_size());
3566 #endif
3567 if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3568 p2i(immutable_data_begin()),
3569 p2i(immutable_data_end()),
3570 immutable_data_size());
3571 if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3572 p2i(dependencies_begin()),
3573 p2i(dependencies_end()),
3574 dependencies_size());
3575 if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3576 p2i(nul_chk_table_begin()),
3577 p2i(nul_chk_table_end()),
3578 nul_chk_table_size());
3579 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3580 p2i(handler_table_begin()),
3581 p2i(handler_table_end()),
3582 handler_table_size());
3583 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3584 p2i(scopes_pcs_begin()),
3585 p2i(scopes_pcs_end()),
3586 scopes_pcs_size());
3587 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3588 p2i(scopes_data_begin()),
3589 p2i(scopes_data_end()),
3590 scopes_data_size());
3591 #if INCLUDE_JVMCI
3592 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3593 p2i(speculations_begin()),
3594 p2i(speculations_end()),
3595 speculations_size());
3596 #endif
3597 if (AOTCodeCache::is_on() && _aot_code_entry != nullptr) {
3598 _aot_code_entry->print(st);
3599 }
3600 }
3601
3602 void nmethod::print_code() {
3603 ResourceMark m;
3604 ttyLocker ttyl;
3605 // Call the specialized decode method of this class.
3606 decode(tty);
3607 }
3608
3609 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3610
3611 void nmethod::print_dependencies_on(outputStream* out) {
3612 ResourceMark rm;
3613 stringStream st;
3614 st.print_cr("Dependencies:");
3615 for (Dependencies::DepStream deps(this); deps.next(); ) {
3616 deps.print_dependency(&st);
3617 InstanceKlass* ctxk = deps.context_type();
3618 if (ctxk != nullptr) {
3619 if (ctxk->is_dependent_nmethod(this)) {
3620 st.print_cr(" [nmethod<=klass]%s", ctxk->external_name());
3621 }
3622 }
3623 deps.log_dependency(); // put it into the xml log also
3624 }
3625 out->print_raw(st.as_string());
3626 }
3627 #endif
3628
3629 #if defined(SUPPORT_DATA_STRUCTS)
3630
3631 // Print the oops from the underlying CodeBlob.
3632 void nmethod::print_oops(outputStream* st) {
3633 ResourceMark m;
3634 st->print("Oops:");
3635 if (oops_begin() < oops_end()) {
3636 st->cr();
3637 for (oop* p = oops_begin(); p < oops_end(); p++) {
3638 Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3639 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3640 if (Universe::contains_non_oop_word(p)) {
3641 st->print_cr("NON_OOP");
3642 continue; // skip non-oops
3643 }
3644 if (*p == nullptr) {
3645 st->print_cr("nullptr-oop");
3646 continue; // skip non-oops
3647 }
3648 (*p)->print_value_on(st);
3649 st->cr();
3650 }
3651 } else {
3652 st->print_cr(" <list empty>");
3653 }
3654 }
3655
3656 // Print metadata pool.
3657 void nmethod::print_metadata(outputStream* st) {
3658 ResourceMark m;
3659 st->print("Metadata:");
3660 if (metadata_begin() < metadata_end()) {
3661 st->cr();
3662 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3663 Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3664 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3665 if (*p && *p != Universe::non_oop_word()) {
3666 (*p)->print_value_on(st);
3667 }
3668 st->cr();
3669 }
3670 } else {
3671 st->print_cr(" <list empty>");
3672 }
3673 }
3674
3675 #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3676 void nmethod::print_scopes_on(outputStream* st) {
3677 // Find the first pc desc for all scopes in the code and print it.
3678 ResourceMark rm;
3679 st->print("scopes:");
3680 if (scopes_pcs_begin() < scopes_pcs_end()) {
3681 st->cr();
3682 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3683 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3684 continue;
3685
3686 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3687 while (sd != nullptr) {
3688 sd->print_on(st, p); // print output ends with a newline
3689 sd = sd->sender();
3690 }
3691 }
3692 } else {
3693 st->print_cr(" <list empty>");
3694 }
3695 }
3696 #endif
3697
3698 #ifndef PRODUCT // RelocIterator does support printing only then.
3699 void nmethod::print_relocations_on(outputStream* st) {
3700 ResourceMark m; // in case methods get printed via the debugger
3701 st->print_cr("relocations:");
3702 RelocIterator iter(this);
3703 iter.print_on(st);
3704 }
3705 #endif
3706
3707 void nmethod::print_pcs_on(outputStream* st) {
3708 ResourceMark m; // in case methods get printed via debugger
3709 st->print("pc-bytecode offsets:");
3710 if (scopes_pcs_begin() < scopes_pcs_end()) {
3711 st->cr();
3712 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3713 p->print_on(st, this); // print output ends with a newline
3714 }
3715 } else {
3716 st->print_cr(" <list empty>");
3717 }
3718 }
3719
3720 void nmethod::print_handler_table() {
3721 ExceptionHandlerTable(this).print(code_begin());
3722 }
3723
3724 void nmethod::print_nul_chk_table() {
3725 ImplicitExceptionTable(this).print(code_begin());
3726 }
3727
3728 void nmethod::print_recorded_oop(int log_n, int i) {
3729 void* value;
3730
3731 if (i == 0) {
3732 value = nullptr;
3733 } else {
3734 // Be careful around non-oop words. Don't create an oop
3735 // with that value, or it will assert in verification code.
3736 if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3737 value = Universe::non_oop_word();
3738 } else {
3739 value = oop_at(i);
3740 }
3741 }
3742
3743 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3744
3745 if (value == Universe::non_oop_word()) {
3746 tty->print("non-oop word");
3747 } else {
3748 if (value == nullptr) {
3749 tty->print("nullptr-oop");
3750 } else {
3751 oop_at(i)->print_value_on(tty);
3752 }
3753 }
3754
3755 tty->cr();
3756 }
3757
3758 void nmethod::print_recorded_oops() {
3759 const int n = oops_count();
3760 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3761 tty->print("Recorded oops:");
3762 if (n > 0) {
3763 tty->cr();
3764 for (int i = 0; i < n; i++) {
3765 print_recorded_oop(log_n, i);
3766 }
3767 } else {
3768 tty->print_cr(" <list empty>");
3769 }
3770 }
3771
3772 void nmethod::print_recorded_metadata() {
3773 const int n = metadata_count();
3774 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3775 tty->print("Recorded metadata:");
3776 if (n > 0) {
3777 tty->cr();
3778 for (int i = 0; i < n; i++) {
3779 Metadata* m = metadata_at(i);
3780 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3781 if (m == (Metadata*)Universe::non_oop_word()) {
3782 tty->print("non-metadata word");
3783 } else if (m == nullptr) {
3784 tty->print("nullptr-oop");
3785 } else {
3786 Metadata::print_value_on_maybe_null(tty, m);
3787 }
3788 tty->cr();
3789 }
3790 } else {
3791 tty->print_cr(" <list empty>");
3792 }
3793 }
3794 #endif
3795
3796 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3797
3798 void nmethod::print_constant_pool(outputStream* st) {
3799 //-----------------------------------
3800 //---< Print the constant pool >---
3801 //-----------------------------------
3802 int consts_size = this->consts_size();
3803 if ( consts_size > 0 ) {
3804 unsigned char* cstart = this->consts_begin();
3805 unsigned char* cp = cstart;
3806 unsigned char* cend = cp + consts_size;
3807 unsigned int bytes_per_line = 4;
3808 unsigned int CP_alignment = 8;
3809 unsigned int n;
3810
3811 st->cr();
3812
3813 //---< print CP header to make clear what's printed >---
3814 if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3815 n = bytes_per_line;
3816 st->print_cr("[Constant Pool]");
3817 Disassembler::print_location(cp, cstart, cend, st, true, true);
3818 Disassembler::print_hexdata(cp, n, st, true);
3819 st->cr();
3820 } else {
3821 n = (int)((uintptr_t)cp & (bytes_per_line-1));
3822 st->print_cr("[Constant Pool (unaligned)]");
3823 }
3824
3825 //---< print CP contents, bytes_per_line at a time >---
3826 while (cp < cend) {
3827 Disassembler::print_location(cp, cstart, cend, st, true, false);
3828 Disassembler::print_hexdata(cp, n, st, false);
3829 cp += n;
3830 n = bytes_per_line;
3831 st->cr();
3832 }
3833
3834 //---< Show potential alignment gap between constant pool and code >---
3835 cend = code_begin();
3836 if( cp < cend ) {
3837 n = 4;
3838 st->print_cr("[Code entry alignment]");
3839 while (cp < cend) {
3840 Disassembler::print_location(cp, cstart, cend, st, false, false);
3841 cp += n;
3842 st->cr();
3843 }
3844 }
3845 } else {
3846 st->print_cr("[Constant Pool (empty)]");
3847 }
3848 st->cr();
3849 }
3850
3851 #endif
3852
3853 // Disassemble this nmethod.
3854 // Print additional debug information, if requested. This could be code
3855 // comments, block comments, profiling counters, etc.
3856 // The undisassembled format is useful no disassembler library is available.
3857 // The resulting hex dump (with markers) can be disassembled later, or on
3858 // another system, when/where a disassembler library is available.
3859 void nmethod::decode2(outputStream* ost) const {
3860
3861 // Called from frame::back_trace_with_decode without ResourceMark.
3862 ResourceMark rm;
3863
3864 // Make sure we have a valid stream to print on.
3865 outputStream* st = ost ? ost : tty;
3866
3867 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3868 const bool use_compressed_format = true;
3869 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3870 AbstractDisassembler::show_block_comment());
3871 #else
3872 const bool use_compressed_format = Disassembler::is_abstract();
3873 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3874 AbstractDisassembler::show_block_comment());
3875 #endif
3876
3877 st->cr();
3878 this->print_on(st);
3879 st->cr();
3880
3881 #if defined(SUPPORT_ASSEMBLY)
3882 //----------------------------------
3883 //---< Print real disassembly >---
3884 //----------------------------------
3885 if (! use_compressed_format) {
3886 st->print_cr("[Disassembly]");
3887 Disassembler::decode(const_cast<nmethod*>(this), st);
3888 st->bol();
3889 st->print_cr("[/Disassembly]");
3890 return;
3891 }
3892 #endif
3893
3894 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3895
3896 // Compressed undisassembled disassembly format.
3897 // The following status values are defined/supported:
3898 // = 0 - currently at bol() position, nothing printed yet on current line.
3899 // = 1 - currently at position after print_location().
3900 // > 1 - in the midst of printing instruction stream bytes.
3901 int compressed_format_idx = 0;
3902 int code_comment_column = 0;
3903 const int instr_maxlen = Assembler::instr_maxlen();
3904 const uint tabspacing = 8;
3905 unsigned char* start = this->code_begin();
3906 unsigned char* p = this->code_begin();
3907 unsigned char* end = this->code_end();
3908 unsigned char* pss = p; // start of a code section (used for offsets)
3909
3910 if ((start == nullptr) || (end == nullptr)) {
3911 st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3912 return;
3913 }
3914 #endif
3915
3916 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3917 //---< plain abstract disassembly, no comments or anything, just section headers >---
3918 if (use_compressed_format && ! compressed_with_comments) {
3919 const_cast<nmethod*>(this)->print_constant_pool(st);
3920
3921 st->bol();
3922 st->cr();
3923 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3924 //---< Open the output (Marker for post-mortem disassembler) >---
3925 st->print_cr("[MachCode]");
3926 const char* header = nullptr;
3927 address p0 = p;
3928 while (p < end) {
3929 address pp = p;
3930 while ((p < end) && (header == nullptr)) {
3931 header = nmethod_section_label(p);
3932 pp = p;
3933 p += Assembler::instr_len(p);
3934 }
3935 if (pp > p0) {
3936 AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3937 p0 = pp;
3938 p = pp;
3939 header = nullptr;
3940 } else if (header != nullptr) {
3941 st->bol();
3942 st->print_cr("%s", header);
3943 header = nullptr;
3944 }
3945 }
3946 //---< Close the output (Marker for post-mortem disassembler) >---
3947 st->bol();
3948 st->print_cr("[/MachCode]");
3949 return;
3950 }
3951 #endif
3952
3953 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3954 //---< abstract disassembly with comments and section headers merged in >---
3955 if (compressed_with_comments) {
3956 const_cast<nmethod*>(this)->print_constant_pool(st);
3957
3958 st->bol();
3959 st->cr();
3960 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section");
3961 //---< Open the output (Marker for post-mortem disassembler) >---
3962 st->print_cr("[MachCode]");
3963 while ((p < end) && (p != nullptr)) {
3964 const int instruction_size_in_bytes = Assembler::instr_len(p);
3965
3966 //---< Block comments for nmethod. Interrupts instruction stream, if any. >---
3967 // Outputs a bol() before and a cr() after, but only if a comment is printed.
3968 // Prints nmethod_section_label as well.
3969 if (AbstractDisassembler::show_block_comment()) {
3970 print_block_comment(st, p);
3971 if (st->position() == 0) {
3972 compressed_format_idx = 0;
3973 }
3974 }
3975
3976 //---< New location information after line break >---
3977 if (compressed_format_idx == 0) {
3978 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3979 compressed_format_idx = 1;
3980 }
3981
3982 //---< Code comment for current instruction. Address range [p..(p+len)) >---
3983 unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
3984 S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
3985
3986 if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
3987 //---< interrupt instruction byte stream for code comment >---
3988 if (compressed_format_idx > 1) {
3989 st->cr(); // interrupt byte stream
3990 st->cr(); // add an empty line
3991 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3992 }
3993 const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
3994 st->bol();
3995 compressed_format_idx = 0;
3996 }
3997
3998 //---< New location information after line break >---
3999 if (compressed_format_idx == 0) {
4000 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
4001 compressed_format_idx = 1;
4002 }
4003
4004 //---< Nicely align instructions for readability >---
4005 if (compressed_format_idx > 1) {
4006 Disassembler::print_delimiter(st);
4007 }
4008
4009 //---< Now, finally, print the actual instruction bytes >---
4010 unsigned char* p0 = p;
4011 p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
4012 compressed_format_idx += (int)(p - p0);
4013
4014 if (Disassembler::start_newline(compressed_format_idx-1)) {
4015 st->cr();
4016 compressed_format_idx = 0;
4017 }
4018 }
4019 //---< Close the output (Marker for post-mortem disassembler) >---
4020 st->bol();
4021 st->print_cr("[/MachCode]");
4022 return;
4023 }
4024 #endif
4025 }
4026
4027 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
4028
4029 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
4030 RelocIterator iter(this, begin, end);
4031 bool have_one = false;
4032 while (iter.next()) {
4033 have_one = true;
4034 switch (iter.type()) {
4035 case relocInfo::none: {
4036 // Skip it and check next
4037 break;
4038 }
4039 case relocInfo::oop_type: {
4040 // Get a non-resizable resource-allocated stringStream.
4041 // Our callees make use of (nested) ResourceMarks.
4042 stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
4043 oop_Relocation* r = iter.oop_reloc();
4044 oop obj = r->oop_value();
4045 st.print("oop(");
4046 if (obj == nullptr) st.print("nullptr");
4047 else obj->print_value_on(&st);
4048 st.print(")");
4049 return st.as_string();
4050 }
4051 case relocInfo::metadata_type: {
4052 stringStream st;
4053 metadata_Relocation* r = iter.metadata_reloc();
4054 Metadata* obj = r->metadata_value();
4055 st.print("metadata(");
4056 if (obj == nullptr) st.print("nullptr");
4057 else obj->print_value_on(&st);
4058 st.print(")");
4059 return st.as_string();
4060 }
4061 case relocInfo::runtime_call_type:
4062 case relocInfo::runtime_call_w_cp_type: {
4063 stringStream st;
4064 st.print("runtime_call");
4065 CallRelocation* r = (CallRelocation*)iter.reloc();
4066 address dest = r->destination();
4067 if (StubRoutines::contains(dest)) {
4068 StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
4069 if (desc == nullptr) {
4070 desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
4071 }
4072 if (desc != nullptr) {
4073 st.print(" Stub::%s", desc->name());
4074 return st.as_string();
4075 }
4076 }
4077 CodeBlob* cb = CodeCache::find_blob(dest);
4078 if (cb != nullptr) {
4079 st.print(" %s", cb->name());
4080 } else {
4081 ResourceMark rm;
4082 const int buflen = 1024;
4083 char* buf = NEW_RESOURCE_ARRAY(char, buflen);
4084 int offset;
4085 if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
4086 st.print(" %s", buf);
4087 if (offset != 0) {
4088 st.print("+%d", offset);
4089 }
4090 }
4091 }
4092 return st.as_string();
4093 }
4094 case relocInfo::virtual_call_type: {
4095 stringStream st;
4096 st.print_raw("virtual_call");
4097 virtual_call_Relocation* r = iter.virtual_call_reloc();
4098 Method* m = r->method_value();
4099 if (m != nullptr) {
4100 assert(m->is_method(), "");
4101 m->print_short_name(&st);
4102 }
4103 return st.as_string();
4104 }
4105 case relocInfo::opt_virtual_call_type: {
4106 stringStream st;
4107 st.print_raw("optimized virtual_call");
4108 opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
4109 Method* m = r->method_value();
4110 if (m != nullptr) {
4111 assert(m->is_method(), "");
4112 m->print_short_name(&st);
4113 }
4114 return st.as_string();
4115 }
4116 case relocInfo::static_call_type: {
4117 stringStream st;
4118 st.print_raw("static_call");
4119 static_call_Relocation* r = iter.static_call_reloc();
4120 Method* m = r->method_value();
4121 if (m != nullptr) {
4122 assert(m->is_method(), "");
4123 m->print_short_name(&st);
4124 }
4125 return st.as_string();
4126 }
4127 case relocInfo::static_stub_type: return "static_stub";
4128 case relocInfo::external_word_type: return "external_word";
4129 case relocInfo::internal_word_type: return "internal_word";
4130 case relocInfo::section_word_type: return "section_word";
4131 case relocInfo::poll_type: return "poll";
4132 case relocInfo::poll_return_type: return "poll_return";
4133 case relocInfo::trampoline_stub_type: return "trampoline_stub";
4134 case relocInfo::entry_guard_type: return "entry_guard";
4135 case relocInfo::post_call_nop_type: return "post_call_nop";
4136 case relocInfo::barrier_type: {
4137 barrier_Relocation* const reloc = iter.barrier_reloc();
4138 stringStream st;
4139 st.print("barrier format=%d", reloc->format());
4140 return st.as_string();
4141 }
4142
4143 case relocInfo::type_mask: return "type_bit_mask";
4144
4145 default: {
4146 stringStream st;
4147 st.print("unknown relocInfo=%d", (int) iter.type());
4148 return st.as_string();
4149 }
4150 }
4151 }
4152 return have_one ? "other" : nullptr;
4153 }
4154
4155 // Return the last scope in (begin..end]
4156 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
4157 PcDesc* p = pc_desc_near(begin+1);
4158 if (p != nullptr && p->real_pc(this) <= end) {
4159 return new ScopeDesc(this, p);
4160 }
4161 return nullptr;
4162 }
4163
4164 const char* nmethod::nmethod_section_label(address pos) const {
4165 const char* label = nullptr;
4166 if (pos == code_begin()) label = "[Instructions begin]";
4167 if (pos == entry_point()) label = "[Entry Point]";
4168 if (pos == verified_entry_point()) label = "[Verified Entry Point]";
4169 if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
4170 // Check stub_code before checking exception_handler or deopt_handler.
4171 if (pos == this->stub_begin()) label = "[Stub Code]";
4172 if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
4173 if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
4174 return label;
4175 }
4176
4177 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
4178 if (print_section_labels) {
4179 const char* label = nmethod_section_label(block_begin);
4180 if (label != nullptr) {
4181 stream->bol();
4182 stream->print_cr("%s", label);
4183 }
4184 }
4185
4186 if (block_begin == entry_point()) {
4187 Method* m = method();
4188 if (m != nullptr) {
4189 stream->print(" # ");
4190 m->print_value_on(stream);
4191 stream->cr();
4192 }
4193 if (m != nullptr && !is_osr_method()) {
4194 ResourceMark rm;
4195 int sizeargs = m->size_of_parameters();
4196 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
4197 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
4198 {
4199 int sig_index = 0;
4200 if (!m->is_static())
4201 sig_bt[sig_index++] = T_OBJECT; // 'this'
4202 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
4203 BasicType t = ss.type();
4204 sig_bt[sig_index++] = t;
4205 if (type2size[t] == 2) {
4206 sig_bt[sig_index++] = T_VOID;
4207 } else {
4208 assert(type2size[t] == 1, "size is 1 or 2");
4209 }
4210 }
4211 assert(sig_index == sizeargs, "");
4212 }
4213 const char* spname = "sp"; // make arch-specific?
4214 SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
4215 int stack_slot_offset = this->frame_size() * wordSize;
4216 int tab1 = 14, tab2 = 24;
4217 int sig_index = 0;
4218 int arg_index = (m->is_static() ? 0 : -1);
4219 bool did_old_sp = false;
4220 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
4221 bool at_this = (arg_index == -1);
4222 bool at_old_sp = false;
4223 BasicType t = (at_this ? T_OBJECT : ss.type());
4224 assert(t == sig_bt[sig_index], "sigs in sync");
4225 if (at_this)
4226 stream->print(" # this: ");
4227 else
4228 stream->print(" # parm%d: ", arg_index);
4229 stream->move_to(tab1);
4230 VMReg fst = regs[sig_index].first();
4231 VMReg snd = regs[sig_index].second();
4232 if (fst->is_reg()) {
4233 stream->print("%s", fst->name());
4234 if (snd->is_valid()) {
4235 stream->print(":%s", snd->name());
4236 }
4237 } else if (fst->is_stack()) {
4238 int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
4239 if (offset == stack_slot_offset) at_old_sp = true;
4240 stream->print("[%s+0x%x]", spname, offset);
4241 } else {
4242 stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
4243 }
4244 stream->print(" ");
4245 stream->move_to(tab2);
4246 stream->print("= ");
4247 if (at_this) {
4248 m->method_holder()->print_value_on(stream);
4249 } else {
4250 bool did_name = false;
4251 if (!at_this && ss.is_reference()) {
4252 Symbol* name = ss.as_symbol();
4253 name->print_value_on(stream);
4254 did_name = true;
4255 }
4256 if (!did_name)
4257 stream->print("%s", type2name(t));
4258 }
4259 if (at_old_sp) {
4260 stream->print(" (%s of caller)", spname);
4261 did_old_sp = true;
4262 }
4263 stream->cr();
4264 sig_index += type2size[t];
4265 arg_index += 1;
4266 if (!at_this) ss.next();
4267 }
4268 if (!did_old_sp) {
4269 stream->print(" # ");
4270 stream->move_to(tab1);
4271 stream->print("[%s+0x%x]", spname, stack_slot_offset);
4272 stream->print(" (%s of caller)", spname);
4273 stream->cr();
4274 }
4275 }
4276 }
4277 }
4278
4279 // Returns whether this nmethod has code comments.
4280 bool nmethod::has_code_comment(address begin, address end) {
4281 // scopes?
4282 ScopeDesc* sd = scope_desc_in(begin, end);
4283 if (sd != nullptr) return true;
4284
4285 // relocations?
4286 const char* str = reloc_string_for(begin, end);
4287 if (str != nullptr) return true;
4288
4289 // implicit exceptions?
4290 int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
4291 if (cont_offset != 0) return true;
4292
4293 return false;
4294 }
4295
4296 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
4297 ImplicitExceptionTable implicit_table(this);
4298 int pc_offset = (int)(begin - code_begin());
4299 int cont_offset = implicit_table.continuation_offset(pc_offset);
4300 bool oop_map_required = false;
4301 if (cont_offset != 0) {
4302 st->move_to(column, 6, 0);
4303 if (pc_offset == cont_offset) {
4304 st->print("; implicit exception: deoptimizes");
4305 oop_map_required = true;
4306 } else {
4307 st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
4308 }
4309 }
4310
4311 // Find an oopmap in (begin, end]. We use the odd half-closed
4312 // interval so that oop maps and scope descs which are tied to the
4313 // byte after a call are printed with the call itself. OopMaps
4314 // associated with implicit exceptions are printed with the implicit
4315 // instruction.
4316 address base = code_begin();
4317 ImmutableOopMapSet* oms = oop_maps();
4318 if (oms != nullptr) {
4319 for (int i = 0, imax = oms->count(); i < imax; i++) {
4320 const ImmutableOopMapPair* pair = oms->pair_at(i);
4321 const ImmutableOopMap* om = pair->get_from(oms);
4322 address pc = base + pair->pc_offset();
4323 if (pc >= begin) {
4324 #if INCLUDE_JVMCI
4325 bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
4326 #else
4327 bool is_implicit_deopt = false;
4328 #endif
4329 if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
4330 st->move_to(column, 6, 0);
4331 st->print("; ");
4332 om->print_on(st);
4333 oop_map_required = false;
4334 }
4335 }
4336 if (pc > end) {
4337 break;
4338 }
4339 }
4340 }
4341 assert(!oop_map_required, "missed oopmap");
4342
4343 Thread* thread = Thread::current();
4344
4345 // Print any debug info present at this pc.
4346 ScopeDesc* sd = scope_desc_in(begin, end);
4347 if (sd != nullptr) {
4348 st->move_to(column, 6, 0);
4349 if (sd->bci() == SynchronizationEntryBCI) {
4350 st->print(";*synchronization entry");
4351 } else if (sd->bci() == AfterBci) {
4352 st->print(";* method exit (unlocked if synchronized)");
4353 } else if (sd->bci() == UnwindBci) {
4354 st->print(";* unwind (locked if synchronized)");
4355 } else if (sd->bci() == AfterExceptionBci) {
4356 st->print(";* unwind (unlocked if synchronized)");
4357 } else if (sd->bci() == UnknownBci) {
4358 st->print(";* unknown");
4359 } else if (sd->bci() == InvalidFrameStateBci) {
4360 st->print(";* invalid frame state");
4361 } else {
4362 if (sd->method() == nullptr) {
4363 st->print("method is nullptr");
4364 } else if (sd->method()->is_native()) {
4365 st->print("method is native");
4366 } else {
4367 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
4368 st->print(";*%s", Bytecodes::name(bc));
4369 switch (bc) {
4370 case Bytecodes::_invokevirtual:
4371 case Bytecodes::_invokespecial:
4372 case Bytecodes::_invokestatic:
4373 case Bytecodes::_invokeinterface:
4374 {
4375 Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
4376 st->print(" ");
4377 if (invoke.name() != nullptr)
4378 invoke.name()->print_symbol_on(st);
4379 else
4380 st->print("<UNKNOWN>");
4381 break;
4382 }
4383 case Bytecodes::_getfield:
4384 case Bytecodes::_putfield:
4385 case Bytecodes::_getstatic:
4386 case Bytecodes::_putstatic:
4387 {
4388 Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
4389 st->print(" ");
4390 if (field.name() != nullptr)
4391 field.name()->print_symbol_on(st);
4392 else
4393 st->print("<UNKNOWN>");
4394 }
4395 default:
4396 break;
4397 }
4398 }
4399 st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
4400 }
4401
4402 // Print all scopes
4403 for (;sd != nullptr; sd = sd->sender()) {
4404 st->move_to(column, 6, 0);
4405 st->print("; -");
4406 if (sd->should_reexecute()) {
4407 st->print(" (reexecute)");
4408 }
4409 if (sd->method() == nullptr) {
4410 st->print("method is nullptr");
4411 } else {
4412 sd->method()->print_short_name(st);
4413 }
4414 int lineno = sd->method()->line_number_from_bci(sd->bci());
4415 if (lineno != -1) {
4416 st->print("@%d (line %d)", sd->bci(), lineno);
4417 } else {
4418 st->print("@%d", sd->bci());
4419 }
4420 st->cr();
4421 }
4422 }
4423
4424 // Print relocation information
4425 // Prevent memory leak: allocating without ResourceMark.
4426 ResourceMark rm;
4427 const char* str = reloc_string_for(begin, end);
4428 if (str != nullptr) {
4429 if (sd != nullptr) st->cr();
4430 st->move_to(column, 6, 0);
4431 st->print("; {%s}", str);
4432 }
4433 }
4434
4435 #endif
4436
4437 address nmethod::call_instruction_address(address pc) const {
4438 if (NativeCall::is_call_before(pc)) {
4439 NativeCall *ncall = nativeCall_before(pc);
4440 return ncall->instruction_address();
4441 }
4442 return nullptr;
4443 }
4444
4445 void nmethod::print_value_on_impl(outputStream* st) const {
4446 st->print_cr("nmethod");
4447 #if defined(SUPPORT_DATA_STRUCTS)
4448 print_on_with_msg(st, nullptr);
4449 #endif
4450 }
4451
4452 #ifndef PRODUCT
4453
4454 void nmethod::print_calls(outputStream* st) {
4455 RelocIterator iter(this);
4456 while (iter.next()) {
4457 switch (iter.type()) {
4458 case relocInfo::virtual_call_type: {
4459 CompiledICLocker ml_verify(this);
4460 CompiledIC_at(&iter)->print();
4461 break;
4462 }
4463 case relocInfo::static_call_type:
4464 case relocInfo::opt_virtual_call_type:
4465 st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
4466 CompiledDirectCall::at(iter.reloc())->print();
4467 break;
4468 default:
4469 break;
4470 }
4471 }
4472 }
4473
4474 void nmethod::print_statistics() {
4475 ttyLocker ttyl;
4476 if (xtty != nullptr) xtty->head("statistics type='nmethod'");
4477 native_nmethod_stats.print_native_nmethod_stats();
4478 #ifdef COMPILER1
4479 c1_java_nmethod_stats.print_nmethod_stats("C1");
4480 #endif
4481 #ifdef COMPILER2
4482 c2_java_nmethod_stats.print_nmethod_stats("C2");
4483 #endif
4484 #if INCLUDE_JVMCI
4485 jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
4486 #endif
4487 unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
4488 DebugInformationRecorder::print_statistics();
4489 pc_nmethod_stats.print_pc_stats();
4490 Dependencies::print_statistics();
4491 ExternalsRecorder::print_statistics();
4492 if (xtty != nullptr) xtty->tail("statistics");
4493 }
4494
4495 #endif // !PRODUCT
4496
4497 #if INCLUDE_JVMCI
4498 void nmethod::update_speculation(JavaThread* thread) {
4499 jlong speculation = thread->pending_failed_speculation();
4500 if (speculation != 0) {
4501 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4502 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4503 thread->set_pending_failed_speculation(0);
4504 }
4505 }
4506
4507 const char* nmethod::jvmci_name() {
4508 if (jvmci_nmethod_data() != nullptr) {
4509 return jvmci_nmethod_data()->name();
4510 }
4511 return nullptr;
4512 }
4513
4514 bool nmethod::jvmci_skip_profile_deopt() const {
4515 return jvmci_nmethod_data() != nullptr && !jvmci_nmethod_data()->profile_deopt();
4516 }
4517 #endif
4518
4519 void nmethod::prepare_for_archiving_impl() {
4520 CodeBlob::prepare_for_archiving_impl();
4521 _deoptimization_generation = 0;
4522 _gc_epoch = 0;
4523 _method_profiling_count = 0;
4524 _osr_link = nullptr;
4525 _method = nullptr;
4526 _immutable_data = nullptr;
4527 _pc_desc_container = nullptr;
4528 _exception_cache = nullptr;
4529 _gc_data = nullptr;
4530 _oops_do_mark_link = nullptr;
4531 _compiled_ic_data = nullptr;
4532 _osr_entry_point = nullptr;
4533 _compile_id = -1;
4534 _deoptimization_status = not_marked;
4535 _is_unloading_state = 0;
4536 _state = not_installed;
4537 }