1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/dependencies.hpp"
29 #include "code/nativeInst.hpp"
30 #include "code/nmethod.inline.hpp"
31 #include "code/scopeDesc.hpp"
32 #include "compiler/abstractCompiler.hpp"
33 #include "compiler/compilationLog.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compileLog.hpp"
36 #include "compiler/compileTask.hpp"
37 #include "compiler/compilerDirectives.hpp"
38 #include "compiler/compilerOracle.hpp"
39 #include "compiler/directivesParser.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "compiler/oopMap.inline.hpp"
42 #include "gc/shared/barrierSet.hpp"
43 #include "gc/shared/barrierSetNMethod.hpp"
44 #include "gc/shared/classUnloadingContext.hpp"
45 #include "gc/shared/collectedHeap.hpp"
46 #include "interpreter/bytecode.inline.hpp"
47 #include "jvm.h"
48 #include "logging/log.hpp"
49 #include "logging/logStream.hpp"
50 #include "memory/allocation.inline.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "oops/access.inline.hpp"
54 #include "oops/klass.inline.hpp"
55 #include "oops/method.inline.hpp"
56 #include "oops/methodData.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "oops/weakHandle.inline.hpp"
59 #include "prims/jvmtiImpl.hpp"
60 #include "prims/jvmtiThreadState.hpp"
61 #include "prims/methodHandles.hpp"
62 #include "runtime/continuation.hpp"
63 #include "runtime/atomic.hpp"
64 #include "runtime/deoptimization.hpp"
65 #include "runtime/flags/flagSetting.hpp"
66 #include "runtime/frame.inline.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/orderAccess.hpp"
70 #include "runtime/os.hpp"
71 #include "runtime/safepointVerifiers.hpp"
72 #include "runtime/serviceThread.hpp"
73 #include "runtime/sharedRuntime.hpp"
74 #include "runtime/signature.hpp"
75 #include "runtime/threadWXSetters.inline.hpp"
76 #include "runtime/vmThread.hpp"
77 #include "utilities/align.hpp"
78 #include "utilities/copy.hpp"
79 #include "utilities/dtrace.hpp"
80 #include "utilities/events.hpp"
81 #include "utilities/globalDefinitions.hpp"
82 #include "utilities/resourceHash.hpp"
83 #include "utilities/xmlstream.hpp"
84 #if INCLUDE_JVMCI
85 #include "jvmci/jvmciRuntime.hpp"
86 #endif
87
88 #ifdef DTRACE_ENABLED
89
90 // Only bother with this argument setup if dtrace is available
91
92 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
93 { \
94 Method* m = (method); \
95 if (m != nullptr) { \
96 Symbol* klass_name = m->klass_name(); \
97 Symbol* name = m->name(); \
98 Symbol* signature = m->signature(); \
99 HOTSPOT_COMPILED_METHOD_UNLOAD( \
100 (char *) klass_name->bytes(), klass_name->utf8_length(), \
101 (char *) name->bytes(), name->utf8_length(), \
102 (char *) signature->bytes(), signature->utf8_length()); \
103 } \
104 }
105
106 #else // ndef DTRACE_ENABLED
107
108 #define DTRACE_METHOD_UNLOAD_PROBE(method)
109
110 #endif
111
112 // Cast from int value to narrow type
113 #define CHECKED_CAST(result, T, thing) \
114 result = static_cast<T>(thing); \
115 guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
116
117 //---------------------------------------------------------------------------------
118 // NMethod statistics
119 // They are printed under various flags, including:
120 // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
121 // (In the latter two cases, they like other stats are printed to the log only.)
122
123 #ifndef PRODUCT
124 // These variables are put into one block to reduce relocations
125 // and make it simpler to print from the debugger.
126 struct java_nmethod_stats_struct {
127 uint nmethod_count;
128 uint total_nm_size;
129 uint total_immut_size;
130 uint total_mut_size;
131 uint relocation_size;
132 uint consts_size;
133 uint insts_size;
134 uint stub_size;
135 uint oops_size;
136 uint metadata_size;
137 uint dependencies_size;
138 uint nul_chk_table_size;
139 uint handler_table_size;
140 uint scopes_pcs_size;
141 uint scopes_data_size;
142 #if INCLUDE_JVMCI
143 uint speculations_size;
144 uint jvmci_data_size;
145 #endif
146
147 void note_nmethod(nmethod* nm) {
148 nmethod_count += 1;
149 total_nm_size += nm->size();
150 total_immut_size += nm->immutable_data_size();
151 total_mut_size += nm->mutable_data_size();
152 relocation_size += nm->relocation_size();
153 consts_size += nm->consts_size();
154 insts_size += nm->insts_size();
155 stub_size += nm->stub_size();
156 oops_size += nm->oops_size();
157 metadata_size += nm->metadata_size();
158 scopes_data_size += nm->scopes_data_size();
159 scopes_pcs_size += nm->scopes_pcs_size();
160 dependencies_size += nm->dependencies_size();
161 handler_table_size += nm->handler_table_size();
162 nul_chk_table_size += nm->nul_chk_table_size();
163 #if INCLUDE_JVMCI
164 speculations_size += nm->speculations_size();
165 jvmci_data_size += nm->jvmci_data_size();
166 #endif
167 }
168 void print_nmethod_stats(const char* name) {
169 if (nmethod_count == 0) return;
170 tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
171 uint total_size = total_nm_size + total_immut_size + total_mut_size;
172 if (total_nm_size != 0) {
173 tty->print_cr(" total size = %u (100%%)", total_size);
174 tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
175 }
176 uint header_size = (uint)(nmethod_count * sizeof(nmethod));
177 if (nmethod_count != 0) {
178 tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
179 }
180 if (consts_size != 0) {
181 tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
182 }
183 if (insts_size != 0) {
184 tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
185 }
186 if (stub_size != 0) {
187 tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
188 }
189 if (oops_size != 0) {
190 tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
191 }
192 if (total_mut_size != 0) {
193 tty->print_cr(" mutable data = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size);
194 }
195 if (relocation_size != 0) {
196 tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size);
197 }
198 if (metadata_size != 0) {
199 tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size);
200 }
201 #if INCLUDE_JVMCI
202 if (jvmci_data_size != 0) {
203 tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size);
204 }
205 #endif
206 if (total_immut_size != 0) {
207 tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
208 }
209 if (dependencies_size != 0) {
210 tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
211 }
212 if (nul_chk_table_size != 0) {
213 tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
214 }
215 if (handler_table_size != 0) {
216 tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
217 }
218 if (scopes_pcs_size != 0) {
219 tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
220 }
221 if (scopes_data_size != 0) {
222 tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
223 }
224 #if INCLUDE_JVMCI
225 if (speculations_size != 0) {
226 tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
227 }
228 #endif
229 }
230 };
231
232 struct native_nmethod_stats_struct {
233 uint native_nmethod_count;
234 uint native_total_size;
235 uint native_relocation_size;
236 uint native_insts_size;
237 uint native_oops_size;
238 uint native_metadata_size;
239 void note_native_nmethod(nmethod* nm) {
240 native_nmethod_count += 1;
241 native_total_size += nm->size();
242 native_relocation_size += nm->relocation_size();
243 native_insts_size += nm->insts_size();
244 native_oops_size += nm->oops_size();
245 native_metadata_size += nm->metadata_size();
246 }
247 void print_native_nmethod_stats() {
248 if (native_nmethod_count == 0) return;
249 tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
250 if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size);
251 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size);
252 if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size);
253 if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size);
254 if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size);
255 }
256 };
257
258 struct pc_nmethod_stats_struct {
259 uint pc_desc_init; // number of initialization of cache (= number of caches)
260 uint pc_desc_queries; // queries to nmethod::find_pc_desc
261 uint pc_desc_approx; // number of those which have approximate true
262 uint pc_desc_repeats; // number of _pc_descs[0] hits
263 uint pc_desc_hits; // number of LRU cache hits
264 uint pc_desc_tests; // total number of PcDesc examinations
265 uint pc_desc_searches; // total number of quasi-binary search steps
266 uint pc_desc_adds; // number of LUR cache insertions
267
268 void print_pc_stats() {
269 tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query",
270 pc_desc_queries,
271 (double)(pc_desc_tests + pc_desc_searches)
272 / pc_desc_queries);
273 tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
274 pc_desc_init,
275 pc_desc_queries, pc_desc_approx,
276 pc_desc_repeats, pc_desc_hits,
277 pc_desc_tests, pc_desc_searches, pc_desc_adds);
278 }
279 };
280
281 #ifdef COMPILER1
282 static java_nmethod_stats_struct c1_java_nmethod_stats;
283 #endif
284 #ifdef COMPILER2
285 static java_nmethod_stats_struct c2_java_nmethod_stats;
286 #endif
287 #if INCLUDE_JVMCI
288 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
289 #endif
290 static java_nmethod_stats_struct unknown_java_nmethod_stats;
291
292 static native_nmethod_stats_struct native_nmethod_stats;
293 static pc_nmethod_stats_struct pc_nmethod_stats;
294
295 static void note_java_nmethod(nmethod* nm) {
296 #ifdef COMPILER1
297 if (nm->is_compiled_by_c1()) {
298 c1_java_nmethod_stats.note_nmethod(nm);
299 } else
300 #endif
301 #ifdef COMPILER2
302 if (nm->is_compiled_by_c2()) {
303 c2_java_nmethod_stats.note_nmethod(nm);
304 } else
305 #endif
306 #if INCLUDE_JVMCI
307 if (nm->is_compiled_by_jvmci()) {
308 jvmci_java_nmethod_stats.note_nmethod(nm);
309 } else
310 #endif
311 {
312 unknown_java_nmethod_stats.note_nmethod(nm);
313 }
314 }
315 #endif // !PRODUCT
316
317 //---------------------------------------------------------------------------------
318
319
320 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
321 assert(pc != nullptr, "Must be non null");
322 assert(exception.not_null(), "Must be non null");
323 assert(handler != nullptr, "Must be non null");
324
325 _count = 0;
326 _exception_type = exception->klass();
327 _next = nullptr;
328 _purge_list_next = nullptr;
329
330 add_address_and_handler(pc,handler);
331 }
332
333
334 address ExceptionCache::match(Handle exception, address pc) {
335 assert(pc != nullptr,"Must be non null");
336 assert(exception.not_null(),"Must be non null");
337 if (exception->klass() == exception_type()) {
338 return (test_address(pc));
339 }
340
341 return nullptr;
342 }
343
344
345 bool ExceptionCache::match_exception_with_space(Handle exception) {
346 assert(exception.not_null(),"Must be non null");
347 if (exception->klass() == exception_type() && count() < cache_size) {
348 return true;
349 }
350 return false;
351 }
352
353
354 address ExceptionCache::test_address(address addr) {
355 int limit = count();
356 for (int i = 0; i < limit; i++) {
357 if (pc_at(i) == addr) {
358 return handler_at(i);
359 }
360 }
361 return nullptr;
362 }
363
364
365 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
366 if (test_address(addr) == handler) return true;
367
368 int index = count();
369 if (index < cache_size) {
370 set_pc_at(index, addr);
371 set_handler_at(index, handler);
372 increment_count();
373 return true;
374 }
375 return false;
376 }
377
378 ExceptionCache* ExceptionCache::next() {
379 return Atomic::load(&_next);
380 }
381
382 void ExceptionCache::set_next(ExceptionCache *ec) {
383 Atomic::store(&_next, ec);
384 }
385
386 //-----------------------------------------------------------------------------
387
388
389 // Helper used by both find_pc_desc methods.
390 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
391 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
392 if (!approximate) {
393 return pc->pc_offset() == pc_offset;
394 } else {
395 // Do not look before the sentinel
396 assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset");
397 return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset;
398 }
399 }
400
401 void PcDescCache::init_to(PcDesc* initial_pc_desc) {
402 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
403 // initialize the cache by filling it with benign (non-null) values
404 assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
405 "must start with a sentinel");
406 for (int i = 0; i < cache_size; i++) {
407 _pc_descs[i] = initial_pc_desc;
408 }
409 }
410
411 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
412 // Note: one might think that caching the most recently
413 // read value separately would be a win, but one would be
414 // wrong. When many threads are updating it, the cache
415 // line it's in would bounce between caches, negating
416 // any benefit.
417
418 // In order to prevent race conditions do not load cache elements
419 // repeatedly, but use a local copy:
420 PcDesc* res;
421
422 // Step one: Check the most recently added value.
423 res = _pc_descs[0];
424 assert(res != nullptr, "PcDesc cache should be initialized already");
425
426 // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
427 if (approximate && match_desc(res, pc_offset, approximate)) {
428 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
429 return res;
430 }
431
432 // Step two: Check the rest of the LRU cache.
433 for (int i = 1; i < cache_size; ++i) {
434 res = _pc_descs[i];
435 if (res->pc_offset() < 0) break; // optimization: skip empty cache
436 if (match_desc(res, pc_offset, approximate)) {
437 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
438 return res;
439 }
440 }
441
442 // Report failure.
443 return nullptr;
444 }
445
446 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
447 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
448 // Update the LRU cache by shifting pc_desc forward.
449 for (int i = 0; i < cache_size; i++) {
450 PcDesc* next = _pc_descs[i];
451 _pc_descs[i] = pc_desc;
452 pc_desc = next;
453 }
454 }
455
456 // adjust pcs_size so that it is a multiple of both oopSize and
457 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
458 // of oopSize, then 2*sizeof(PcDesc) is)
459 static int adjust_pcs_size(int pcs_size) {
460 int nsize = align_up(pcs_size, oopSize);
461 if ((nsize % sizeof(PcDesc)) != 0) {
462 nsize = pcs_size + sizeof(PcDesc);
463 }
464 assert((nsize % oopSize) == 0, "correct alignment");
465 return nsize;
466 }
467
468 bool nmethod::is_method_handle_return(address return_pc) {
469 if (!has_method_handle_invokes()) return false;
470 PcDesc* pd = pc_desc_at(return_pc);
471 if (pd == nullptr)
472 return false;
473 return pd->is_method_handle_invoke();
474 }
475
476 // Returns a string version of the method state.
477 const char* nmethod::state() const {
478 int state = get_state();
479 switch (state) {
480 case not_installed:
481 return "not installed";
482 case in_use:
483 return "in use";
484 case not_entrant:
485 return "not_entrant";
486 default:
487 fatal("unexpected method state: %d", state);
488 return nullptr;
489 }
490 }
491
492 void nmethod::set_deoptimized_done() {
493 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
494 if (_deoptimization_status != deoptimize_done) { // can't go backwards
495 Atomic::store(&_deoptimization_status, deoptimize_done);
496 }
497 }
498
499 ExceptionCache* nmethod::exception_cache_acquire() const {
500 return Atomic::load_acquire(&_exception_cache);
501 }
502
503 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
504 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
505 assert(new_entry != nullptr,"Must be non null");
506 assert(new_entry->next() == nullptr, "Must be null");
507
508 for (;;) {
509 ExceptionCache *ec = exception_cache();
510 if (ec != nullptr) {
511 Klass* ex_klass = ec->exception_type();
512 if (!ex_klass->is_loader_alive()) {
513 // We must guarantee that entries are not inserted with new next pointer
514 // edges to ExceptionCache entries with dead klasses, due to bad interactions
515 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
516 // the head pointer forward to the first live ExceptionCache, so that the new
517 // next pointers always point at live ExceptionCaches, that are not removed due
518 // to concurrent ExceptionCache cleanup.
519 ExceptionCache* next = ec->next();
520 if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
521 CodeCache::release_exception_cache(ec);
522 }
523 continue;
524 }
525 ec = exception_cache();
526 if (ec != nullptr) {
527 new_entry->set_next(ec);
528 }
529 }
530 if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
531 return;
532 }
533 }
534 }
535
536 void nmethod::clean_exception_cache() {
537 // For each nmethod, only a single thread may call this cleanup function
538 // at the same time, whether called in STW cleanup or concurrent cleanup.
539 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
540 // then a single writer may contend with cleaning up the head pointer to the
541 // first ExceptionCache node that has a Klass* that is alive. That is fine,
542 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
543 // And the concurrent writers do not clean up next pointers, only the head.
544 // Also note that concurrent readers will walk through Klass* pointers that are not
545 // alive. That does not cause ABA problems, because Klass* is deleted after
546 // a handshake with all threads, after all stale ExceptionCaches have been
547 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
548 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
549 // That similarly implies that CAS operations on ExceptionCache entries do not
550 // suffer from ABA problems as unlinking and deletion is separated by a global
551 // handshake operation.
552 ExceptionCache* prev = nullptr;
553 ExceptionCache* curr = exception_cache_acquire();
554
555 while (curr != nullptr) {
556 ExceptionCache* next = curr->next();
557
558 if (!curr->exception_type()->is_loader_alive()) {
559 if (prev == nullptr) {
560 // Try to clean head; this is contended by concurrent inserts, that
561 // both lazily clean the head, and insert entries at the head. If
562 // the CAS fails, the operation is restarted.
563 if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
564 prev = nullptr;
565 curr = exception_cache_acquire();
566 continue;
567 }
568 } else {
569 // It is impossible to during cleanup connect the next pointer to
570 // an ExceptionCache that has not been published before a safepoint
571 // prior to the cleanup. Therefore, release is not required.
572 prev->set_next(next);
573 }
574 // prev stays the same.
575
576 CodeCache::release_exception_cache(curr);
577 } else {
578 prev = curr;
579 }
580
581 curr = next;
582 }
583 }
584
585 // public method for accessing the exception cache
586 // These are the public access methods.
587 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
588 // We never grab a lock to read the exception cache, so we may
589 // have false negatives. This is okay, as it can only happen during
590 // the first few exception lookups for a given nmethod.
591 ExceptionCache* ec = exception_cache_acquire();
592 while (ec != nullptr) {
593 address ret_val;
594 if ((ret_val = ec->match(exception,pc)) != nullptr) {
595 return ret_val;
596 }
597 ec = ec->next();
598 }
599 return nullptr;
600 }
601
602 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
603 // There are potential race conditions during exception cache updates, so we
604 // must own the ExceptionCache_lock before doing ANY modifications. Because
605 // we don't lock during reads, it is possible to have several threads attempt
606 // to update the cache with the same data. We need to check for already inserted
607 // copies of the current data before adding it.
608
609 MutexLocker ml(ExceptionCache_lock);
610 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
611
612 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
613 target_entry = new ExceptionCache(exception,pc,handler);
614 add_exception_cache_entry(target_entry);
615 }
616 }
617
618 // private method for handling exception cache
619 // These methods are private, and used to manipulate the exception cache
620 // directly.
621 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
622 ExceptionCache* ec = exception_cache_acquire();
623 while (ec != nullptr) {
624 if (ec->match_exception_with_space(exception)) {
625 return ec;
626 }
627 ec = ec->next();
628 }
629 return nullptr;
630 }
631
632 bool nmethod::is_at_poll_return(address pc) {
633 RelocIterator iter(this, pc, pc+1);
634 while (iter.next()) {
635 if (iter.type() == relocInfo::poll_return_type)
636 return true;
637 }
638 return false;
639 }
640
641
642 bool nmethod::is_at_poll_or_poll_return(address pc) {
643 RelocIterator iter(this, pc, pc+1);
644 while (iter.next()) {
645 relocInfo::relocType t = iter.type();
646 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
647 return true;
648 }
649 return false;
650 }
651
652 void nmethod::verify_oop_relocations() {
653 // Ensure sure that the code matches the current oop values
654 RelocIterator iter(this, nullptr, nullptr);
655 while (iter.next()) {
656 if (iter.type() == relocInfo::oop_type) {
657 oop_Relocation* reloc = iter.oop_reloc();
658 if (!reloc->oop_is_immediate()) {
659 reloc->verify_oop_relocation();
660 }
661 }
662 }
663 }
664
665
666 ScopeDesc* nmethod::scope_desc_at(address pc) {
667 PcDesc* pd = pc_desc_at(pc);
668 guarantee(pd != nullptr, "scope must be present");
669 return new ScopeDesc(this, pd);
670 }
671
672 ScopeDesc* nmethod::scope_desc_near(address pc) {
673 PcDesc* pd = pc_desc_near(pc);
674 guarantee(pd != nullptr, "scope must be present");
675 return new ScopeDesc(this, pd);
676 }
677
678 address nmethod::oops_reloc_begin() const {
679 // If the method is not entrant then a JMP is plastered over the
680 // first few bytes. If an oop in the old code was there, that oop
681 // should not get GC'd. Skip the first few bytes of oops on
682 // not-entrant methods.
683 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
684 code_begin() + frame_complete_offset() >
685 verified_entry_point() + NativeJump::instruction_size)
686 {
687 // If we have a frame_complete_offset after the native jump, then there
688 // is no point trying to look for oops before that. This is a requirement
689 // for being allowed to scan oops concurrently.
690 return code_begin() + frame_complete_offset();
691 }
692
693 address low_boundary = verified_entry_point();
694 if (!is_in_use()) {
695 low_boundary += NativeJump::instruction_size;
696 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
697 // This means that the low_boundary is going to be a little too high.
698 // This shouldn't matter, since oops of non-entrant methods are never used.
699 // In fact, why are we bothering to look at oops in a non-entrant method??
700 }
701 return low_boundary;
702 }
703
704 // Method that knows how to preserve outgoing arguments at call. This method must be
705 // called with a frame corresponding to a Java invoke
706 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
707 if (method() == nullptr) {
708 return;
709 }
710
711 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
712 JavaThread* thread = reg_map->thread();
713 if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
714 JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
715 return;
716 }
717
718 if (!method()->is_native()) {
719 address pc = fr.pc();
720 bool has_receiver, has_appendix;
721 Symbol* signature;
722
723 // The method attached by JIT-compilers should be used, if present.
724 // Bytecode can be inaccurate in such case.
725 Method* callee = attached_method_before_pc(pc);
726 if (callee != nullptr) {
727 has_receiver = !(callee->access_flags().is_static());
728 has_appendix = false;
729 signature = callee->signature();
730 } else {
731 SimpleScopeDesc ssd(this, pc);
732
733 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
734 has_receiver = call.has_receiver();
735 has_appendix = call.has_appendix();
736 signature = call.signature();
737 }
738
739 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
740 } else if (method()->is_continuation_enter_intrinsic()) {
741 // This method only calls Continuation.enter()
742 Symbol* signature = vmSymbols::continuationEnter_signature();
743 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
744 }
745 }
746
747 Method* nmethod::attached_method(address call_instr) {
748 assert(code_contains(call_instr), "not part of the nmethod");
749 RelocIterator iter(this, call_instr, call_instr + 1);
750 while (iter.next()) {
751 if (iter.addr() == call_instr) {
752 switch(iter.type()) {
753 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
754 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
755 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
756 default: break;
757 }
758 }
759 }
760 return nullptr; // not found
761 }
762
763 Method* nmethod::attached_method_before_pc(address pc) {
764 if (NativeCall::is_call_before(pc)) {
765 NativeCall* ncall = nativeCall_before(pc);
766 return attached_method(ncall->instruction_address());
767 }
768 return nullptr; // not a call
769 }
770
771 void nmethod::clear_inline_caches() {
772 assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
773 RelocIterator iter(this);
774 while (iter.next()) {
775 iter.reloc()->clear_inline_cache();
776 }
777 }
778
779 #ifdef ASSERT
780 // Check class_loader is alive for this bit of metadata.
781 class CheckClass : public MetadataClosure {
782 void do_metadata(Metadata* md) {
783 Klass* klass = nullptr;
784 if (md->is_klass()) {
785 klass = ((Klass*)md);
786 } else if (md->is_method()) {
787 klass = ((Method*)md)->method_holder();
788 } else if (md->is_methodData()) {
789 klass = ((MethodData*)md)->method()->method_holder();
790 } else if (md->is_methodCounters()) {
791 klass = ((MethodCounters*)md)->method()->method_holder();
792 } else {
793 md->print();
794 ShouldNotReachHere();
795 }
796 assert(klass->is_loader_alive(), "must be alive");
797 }
798 };
799 #endif // ASSERT
800
801
802 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
803 ic->clean_metadata();
804 }
805
806 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
807 template <typename CallsiteT>
808 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
809 bool clean_all) {
810 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
811 if (!cb->is_nmethod()) {
812 return;
813 }
814 nmethod* nm = cb->as_nmethod();
815 if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
816 callsite->set_to_clean();
817 }
818 }
819
820 // Cleans caches in nmethods that point to either classes that are unloaded
821 // or nmethods that are unloaded.
822 //
823 // Can be called either in parallel by G1 currently or after all
824 // nmethods are unloaded. Return postponed=true in the parallel case for
825 // inline caches found that point to nmethods that are not yet visited during
826 // the do_unloading walk.
827 void nmethod::unload_nmethod_caches(bool unloading_occurred) {
828 ResourceMark rm;
829
830 // Exception cache only needs to be called if unloading occurred
831 if (unloading_occurred) {
832 clean_exception_cache();
833 }
834
835 cleanup_inline_caches_impl(unloading_occurred, false);
836
837 #ifdef ASSERT
838 // Check that the metadata embedded in the nmethod is alive
839 CheckClass check_class;
840 metadata_do(&check_class);
841 #endif
842 }
843
844 void nmethod::run_nmethod_entry_barrier() {
845 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
846 if (bs_nm != nullptr) {
847 // We want to keep an invariant that nmethods found through iterations of a Thread's
848 // nmethods found in safepoints have gone through an entry barrier and are not armed.
849 // By calling this nmethod entry barrier, it plays along and acts
850 // like any other nmethod found on the stack of a thread (fewer surprises).
851 nmethod* nm = this;
852 bool alive = bs_nm->nmethod_entry_barrier(nm);
853 assert(alive, "should be alive");
854 }
855 }
856
857 // Only called by whitebox test
858 void nmethod::cleanup_inline_caches_whitebox() {
859 assert_locked_or_safepoint(CodeCache_lock);
860 CompiledICLocker ic_locker(this);
861 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
862 }
863
864 address* nmethod::orig_pc_addr(const frame* fr) {
865 return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
866 }
867
868 // Called to clean up after class unloading for live nmethods
869 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
870 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
871 ResourceMark rm;
872
873 // Find all calls in an nmethod and clear the ones that point to bad nmethods.
874 RelocIterator iter(this, oops_reloc_begin());
875 bool is_in_static_stub = false;
876 while(iter.next()) {
877
878 switch (iter.type()) {
879
880 case relocInfo::virtual_call_type:
881 if (unloading_occurred) {
882 // If class unloading occurred we first clear ICs where the cached metadata
883 // is referring to an unloaded klass or method.
884 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
885 }
886
887 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
888 break;
889
890 case relocInfo::opt_virtual_call_type:
891 case relocInfo::static_call_type:
892 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
893 break;
894
895 case relocInfo::static_stub_type: {
896 is_in_static_stub = true;
897 break;
898 }
899
900 case relocInfo::metadata_type: {
901 // Only the metadata relocations contained in static/opt virtual call stubs
902 // contains the Method* passed to c2i adapters. It is the only metadata
903 // relocation that needs to be walked, as it is the one metadata relocation
904 // that violates the invariant that all metadata relocations have an oop
905 // in the compiled method (due to deferred resolution and code patching).
906
907 // This causes dead metadata to remain in compiled methods that are not
908 // unloading. Unless these slippery metadata relocations of the static
909 // stubs are at least cleared, subsequent class redefinition operations
910 // will access potentially free memory, and JavaThread execution
911 // concurrent to class unloading may call c2i adapters with dead methods.
912 if (!is_in_static_stub) {
913 // The first metadata relocation after a static stub relocation is the
914 // metadata relocation of the static stub used to pass the Method* to
915 // c2i adapters.
916 continue;
917 }
918 is_in_static_stub = false;
919 if (is_unloading()) {
920 // If the nmethod itself is dying, then it may point at dead metadata.
921 // Nobody should follow that metadata; it is strictly unsafe.
922 continue;
923 }
924 metadata_Relocation* r = iter.metadata_reloc();
925 Metadata* md = r->metadata_value();
926 if (md != nullptr && md->is_method()) {
927 Method* method = static_cast<Method*>(md);
928 if (!method->method_holder()->is_loader_alive()) {
929 Atomic::store(r->metadata_addr(), (Method*)nullptr);
930
931 if (!r->metadata_is_immediate()) {
932 r->fix_metadata_relocation();
933 }
934 }
935 }
936 break;
937 }
938
939 default:
940 break;
941 }
942 }
943 }
944
945 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
946 // Exception happened outside inline-cache check code => we are inside
947 // an active nmethod => use cpc to determine a return address
948 int exception_offset = int(pc - code_begin());
949 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
950 #ifdef ASSERT
951 if (cont_offset == 0) {
952 Thread* thread = Thread::current();
953 ResourceMark rm(thread);
954 CodeBlob* cb = CodeCache::find_blob(pc);
955 assert(cb != nullptr && cb == this, "");
956
957 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
958 stringStream ss;
959 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
960 print_on(&ss);
961 method()->print_codes_on(&ss);
962 print_code_on(&ss);
963 print_pcs_on(&ss);
964 tty->print("%s", ss.as_string()); // print all at once
965 }
966 #endif
967 if (cont_offset == 0) {
968 // Let the normal error handling report the exception
969 return nullptr;
970 }
971 if (cont_offset == exception_offset) {
972 #if INCLUDE_JVMCI
973 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
974 JavaThread *thread = JavaThread::current();
975 thread->set_jvmci_implicit_exception_pc(pc);
976 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
977 Deoptimization::Action_reinterpret));
978 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
979 #else
980 ShouldNotReachHere();
981 #endif
982 }
983 return code_begin() + cont_offset;
984 }
985
986 class HasEvolDependency : public MetadataClosure {
987 bool _has_evol_dependency;
988 public:
989 HasEvolDependency() : _has_evol_dependency(false) {}
990 void do_metadata(Metadata* md) {
991 if (md->is_method()) {
992 Method* method = (Method*)md;
993 if (method->is_old()) {
994 _has_evol_dependency = true;
995 }
996 }
997 }
998 bool has_evol_dependency() const { return _has_evol_dependency; }
999 };
1000
1001 bool nmethod::has_evol_metadata() {
1002 // Check the metadata in relocIter and CompiledIC and also deoptimize
1003 // any nmethod that has reference to old methods.
1004 HasEvolDependency check_evol;
1005 metadata_do(&check_evol);
1006 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
1007 ResourceMark rm;
1008 log_debug(redefine, class, nmethod)
1009 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
1010 _method->method_holder()->external_name(),
1011 _method->name()->as_C_string(),
1012 _method->signature()->as_C_string(),
1013 compile_id());
1014 }
1015 return check_evol.has_evol_dependency();
1016 }
1017
1018 int nmethod::total_size() const {
1019 return
1020 consts_size() +
1021 insts_size() +
1022 stub_size() +
1023 scopes_data_size() +
1024 scopes_pcs_size() +
1025 handler_table_size() +
1026 nul_chk_table_size();
1027 }
1028
1029 const char* nmethod::compile_kind() const {
1030 if (is_osr_method()) return "osr";
1031 if (method() != nullptr && is_native_method()) {
1032 if (method()->is_continuation_native_intrinsic()) {
1033 return "cnt";
1034 }
1035 return "c2n";
1036 }
1037 return nullptr;
1038 }
1039
1040 const char* nmethod::compiler_name() const {
1041 return compilertype2name(_compiler_type);
1042 }
1043
1044 #ifdef ASSERT
1045 class CheckForOopsClosure : public OopClosure {
1046 bool _found_oop = false;
1047 public:
1048 virtual void do_oop(oop* o) { _found_oop = true; }
1049 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1050 bool found_oop() { return _found_oop; }
1051 };
1052 class CheckForMetadataClosure : public MetadataClosure {
1053 bool _found_metadata = false;
1054 Metadata* _ignore = nullptr;
1055 public:
1056 CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1057 virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1058 bool found_metadata() { return _found_metadata; }
1059 };
1060
1061 static void assert_no_oops_or_metadata(nmethod* nm) {
1062 if (nm == nullptr) return;
1063 assert(nm->oop_maps() == nullptr, "expectation");
1064
1065 CheckForOopsClosure cfo;
1066 nm->oops_do(&cfo);
1067 assert(!cfo.found_oop(), "no oops allowed");
1068
1069 // We allow an exception for the own Method, but require its class to be permanent.
1070 Method* own_method = nm->method();
1071 CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1072 nm->metadata_do(&cfm);
1073 assert(!cfm.found_metadata(), "no metadata allowed");
1074
1075 assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1076 "Method's class needs to be permanent");
1077 }
1078 #endif
1079
1080 static int required_mutable_data_size(CodeBuffer* code_buffer,
1081 int jvmci_data_size = 0) {
1082 return align_up(code_buffer->total_relocation_size(), oopSize) +
1083 align_up(code_buffer->total_metadata_size(), oopSize) +
1084 align_up(jvmci_data_size, oopSize);
1085 }
1086
1087 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1088 int compile_id,
1089 CodeBuffer *code_buffer,
1090 int vep_offset,
1091 int frame_complete,
1092 int frame_size,
1093 ByteSize basic_lock_owner_sp_offset,
1094 ByteSize basic_lock_sp_offset,
1095 OopMapSet* oop_maps,
1096 int exception_handler) {
1097 code_buffer->finalize_oop_references(method);
1098 // create nmethod
1099 nmethod* nm = nullptr;
1100 int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1101 {
1102 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1103
1104 CodeOffsets offsets;
1105 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1106 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1107 if (exception_handler != -1) {
1108 offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1109 }
1110
1111 int mutable_data_size = required_mutable_data_size(code_buffer);
1112
1113 // MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1114 // IsUnloadingBehaviour::is_unloading needs to handle them separately.
1115 bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1116 nm = new (native_nmethod_size, allow_NonNMethod_space)
1117 nmethod(method(), compiler_none, native_nmethod_size,
1118 compile_id, &offsets,
1119 code_buffer, frame_size,
1120 basic_lock_owner_sp_offset,
1121 basic_lock_sp_offset,
1122 oop_maps, mutable_data_size);
1123 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1124 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1125 }
1126
1127 if (nm != nullptr) {
1128 // verify nmethod
1129 DEBUG_ONLY(nm->verify();) // might block
1130
1131 nm->log_new_nmethod();
1132 }
1133 return nm;
1134 }
1135
1136 nmethod* nmethod::new_nmethod(const methodHandle& method,
1137 int compile_id,
1138 int entry_bci,
1139 CodeOffsets* offsets,
1140 int orig_pc_offset,
1141 DebugInformationRecorder* debug_info,
1142 Dependencies* dependencies,
1143 CodeBuffer* code_buffer, int frame_size,
1144 OopMapSet* oop_maps,
1145 ExceptionHandlerTable* handler_table,
1146 ImplicitExceptionTable* nul_chk_table,
1147 AbstractCompiler* compiler,
1148 CompLevel comp_level
1149 #if INCLUDE_JVMCI
1150 , char* speculations,
1151 int speculations_len,
1152 JVMCINMethodData* jvmci_data
1153 #endif
1154 )
1155 {
1156 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1157 code_buffer->finalize_oop_references(method);
1158 // create nmethod
1159 nmethod* nm = nullptr;
1160 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1161
1162 int immutable_data_size =
1163 adjust_pcs_size(debug_info->pcs_size())
1164 + align_up((int)dependencies->size_in_bytes(), oopSize)
1165 + align_up(handler_table->size_in_bytes() , oopSize)
1166 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1167 #if INCLUDE_JVMCI
1168 + align_up(speculations_len , oopSize)
1169 #endif
1170 + align_up(debug_info->data_size() , oopSize);
1171
1172 // First, allocate space for immutable data in C heap.
1173 address immutable_data = nullptr;
1174 if (immutable_data_size > 0) {
1175 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1176 if (immutable_data == nullptr) {
1177 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1178 return nullptr;
1179 }
1180 }
1181
1182 int mutable_data_size = required_mutable_data_size(code_buffer
1183 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1184
1185 {
1186 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1187
1188 nm = new (nmethod_size, comp_level)
1189 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1190 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1191 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1192 handler_table, nul_chk_table, compiler, comp_level
1193 #if INCLUDE_JVMCI
1194 , speculations,
1195 speculations_len,
1196 jvmci_data
1197 #endif
1198 );
1199
1200 if (nm != nullptr) {
1201 // To make dependency checking during class loading fast, record
1202 // the nmethod dependencies in the classes it is dependent on.
1203 // This allows the dependency checking code to simply walk the
1204 // class hierarchy above the loaded class, checking only nmethods
1205 // which are dependent on those classes. The slow way is to
1206 // check every nmethod for dependencies which makes it linear in
1207 // the number of methods compiled. For applications with a lot
1208 // classes the slow way is too slow.
1209 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1210 if (deps.type() == Dependencies::call_site_target_value) {
1211 // CallSite dependencies are managed on per-CallSite instance basis.
1212 oop call_site = deps.argument_oop(0);
1213 MethodHandles::add_dependent_nmethod(call_site, nm);
1214 } else {
1215 InstanceKlass* ik = deps.context_type();
1216 if (ik == nullptr) {
1217 continue; // ignore things like evol_method
1218 }
1219 // record this nmethod as dependent on this klass
1220 ik->add_dependent_nmethod(nm);
1221 }
1222 }
1223 NOT_PRODUCT(if (nm != nullptr) note_java_nmethod(nm));
1224 }
1225 }
1226 // Do verification and logging outside CodeCache_lock.
1227 if (nm != nullptr) {
1228 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1229 DEBUG_ONLY(nm->verify();)
1230 nm->log_new_nmethod();
1231 }
1232 return nm;
1233 }
1234
1235 // Fill in default values for various fields
1236 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1237 // avoid uninitialized fields, even for short time periods
1238 _exception_cache = nullptr;
1239 _gc_data = nullptr;
1240 _oops_do_mark_link = nullptr;
1241 _compiled_ic_data = nullptr;
1242
1243 _is_unloading_state = 0;
1244 _state = not_installed;
1245
1246 _has_unsafe_access = 0;
1247 _has_method_handle_invokes = 0;
1248 _has_wide_vectors = 0;
1249 _has_monitors = 0;
1250 _has_scoped_access = 0;
1251 _has_flushed_dependencies = 0;
1252 _is_unlinked = 0;
1253 _load_reported = 0; // jvmti state
1254
1255 _deoptimization_status = not_marked;
1256
1257 // SECT_CONSTS is first in code buffer so the offset should be 0.
1258 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1259 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1260
1261 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1262
1263 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1264 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1265
1266 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1267 }
1268
1269 // Post initialization
1270 void nmethod::post_init() {
1271 clear_unloading_state();
1272
1273 finalize_relocations();
1274
1275 Universe::heap()->register_nmethod(this);
1276 DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
1277
1278 CodeCache::commit(this);
1279 }
1280
1281 // For native wrappers
1282 nmethod::nmethod(
1283 Method* method,
1284 CompilerType type,
1285 int nmethod_size,
1286 int compile_id,
1287 CodeOffsets* offsets,
1288 CodeBuffer* code_buffer,
1289 int frame_size,
1290 ByteSize basic_lock_owner_sp_offset,
1291 ByteSize basic_lock_sp_offset,
1292 OopMapSet* oop_maps,
1293 int mutable_data_size)
1294 : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1295 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1296 _deoptimization_generation(0),
1297 _gc_epoch(CodeCache::gc_epoch()),
1298 _method(method),
1299 _native_receiver_sp_offset(basic_lock_owner_sp_offset),
1300 _native_basic_lock_sp_offset(basic_lock_sp_offset)
1301 {
1302 {
1303 DEBUG_ONLY(NoSafepointVerifier nsv;)
1304 assert_locked_or_safepoint(CodeCache_lock);
1305
1306 init_defaults(code_buffer, offsets);
1307
1308 _osr_entry_point = nullptr;
1309 _pc_desc_container = nullptr;
1310 _entry_bci = InvocationEntryBci;
1311 _compile_id = compile_id;
1312 _comp_level = CompLevel_none;
1313 _compiler_type = type;
1314 _orig_pc_offset = 0;
1315 _num_stack_arg_slots = 0;
1316
1317 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1318 // Continuation enter intrinsic
1319 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1320 } else {
1321 _exception_offset = 0;
1322 }
1323 // Native wrappers do not have deopt handlers. Make the values
1324 // something that will never match a pc like the nmethod vtable entry
1325 _deopt_handler_offset = 0;
1326 _deopt_mh_handler_offset = 0;
1327 _unwind_handler_offset = 0;
1328
1329 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1330 uint16_t metadata_size;
1331 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1332 JVMCI_ONLY( _metadata_size = metadata_size; )
1333 assert(_mutable_data_size == _relocation_size + metadata_size,
1334 "wrong mutable data size: %d != %d + %d",
1335 _mutable_data_size, _relocation_size, metadata_size);
1336
1337 // native wrapper does not have read-only data but we need unique not null address
1338 _immutable_data = blob_end();
1339 _immutable_data_size = 0;
1340 _nul_chk_table_offset = 0;
1341 _handler_table_offset = 0;
1342 _scopes_pcs_offset = 0;
1343 _scopes_data_offset = 0;
1344 #if INCLUDE_JVMCI
1345 _speculations_offset = 0;
1346 #endif
1347
1348 code_buffer->copy_code_and_locs_to(this);
1349 code_buffer->copy_values_to(this);
1350
1351 post_init();
1352 }
1353
1354 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1355 ttyLocker ttyl; // keep the following output all in one block
1356 // This output goes directly to the tty, not the compiler log.
1357 // To enable tools to match it up with the compilation activity,
1358 // be sure to tag this tty output with the compile ID.
1359 if (xtty != nullptr) {
1360 xtty->begin_head("print_native_nmethod");
1361 xtty->method(_method);
1362 xtty->stamp();
1363 xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1364 }
1365 // Print the header part, then print the requested information.
1366 // This is both handled in decode2(), called via print_code() -> decode()
1367 if (PrintNativeNMethods) {
1368 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1369 print_code();
1370 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1371 #if defined(SUPPORT_DATA_STRUCTS)
1372 if (AbstractDisassembler::show_structs()) {
1373 if (oop_maps != nullptr) {
1374 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1375 oop_maps->print_on(tty);
1376 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1377 }
1378 }
1379 #endif
1380 } else {
1381 print(); // print the header part only.
1382 }
1383 #if defined(SUPPORT_DATA_STRUCTS)
1384 if (AbstractDisassembler::show_structs()) {
1385 if (PrintRelocations) {
1386 print_relocations();
1387 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1388 }
1389 }
1390 #endif
1391 if (xtty != nullptr) {
1392 xtty->tail("print_native_nmethod");
1393 }
1394 }
1395 }
1396
1397 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1398 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1399 }
1400
1401 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1402 // Try MethodNonProfiled and MethodProfiled.
1403 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1404 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1405 // Try NonNMethod or give up.
1406 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1407 }
1408
1409 // For normal JIT compiled code
1410 nmethod::nmethod(
1411 Method* method,
1412 CompilerType type,
1413 int nmethod_size,
1414 int immutable_data_size,
1415 int mutable_data_size,
1416 int compile_id,
1417 int entry_bci,
1418 address immutable_data,
1419 CodeOffsets* offsets,
1420 int orig_pc_offset,
1421 DebugInformationRecorder* debug_info,
1422 Dependencies* dependencies,
1423 CodeBuffer *code_buffer,
1424 int frame_size,
1425 OopMapSet* oop_maps,
1426 ExceptionHandlerTable* handler_table,
1427 ImplicitExceptionTable* nul_chk_table,
1428 AbstractCompiler* compiler,
1429 CompLevel comp_level
1430 #if INCLUDE_JVMCI
1431 , char* speculations,
1432 int speculations_len,
1433 JVMCINMethodData* jvmci_data
1434 #endif
1435 )
1436 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1437 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1438 _deoptimization_generation(0),
1439 _gc_epoch(CodeCache::gc_epoch()),
1440 _method(method),
1441 _osr_link(nullptr)
1442 {
1443 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1444 {
1445 DEBUG_ONLY(NoSafepointVerifier nsv;)
1446 assert_locked_or_safepoint(CodeCache_lock);
1447
1448 init_defaults(code_buffer, offsets);
1449
1450 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1451 _entry_bci = entry_bci;
1452 _compile_id = compile_id;
1453 _comp_level = comp_level;
1454 _compiler_type = type;
1455 _orig_pc_offset = orig_pc_offset;
1456
1457 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1458
1459 set_ctable_begin(header_begin() + content_offset());
1460
1461 #if INCLUDE_JVMCI
1462 if (compiler->is_jvmci()) {
1463 // JVMCI might not produce any stub sections
1464 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1465 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1466 } else {
1467 _exception_offset = -1;
1468 }
1469 if (offsets->value(CodeOffsets::Deopt) != -1) {
1470 _deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
1471 } else {
1472 _deopt_handler_offset = -1;
1473 }
1474 if (offsets->value(CodeOffsets::DeoptMH) != -1) {
1475 _deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH);
1476 } else {
1477 _deopt_mh_handler_offset = -1;
1478 }
1479 } else
1480 #endif
1481 {
1482 // Exception handler and deopt handler are in the stub section
1483 assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
1484 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
1485
1486 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1487 _deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
1488 if (offsets->value(CodeOffsets::DeoptMH) != -1) {
1489 _deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
1490 } else {
1491 _deopt_mh_handler_offset = -1;
1492 }
1493 }
1494 if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1495 // C1 generates UnwindHandler at the end of instructions section.
1496 // Calculate positive offset as distance between the start of stubs section
1497 // (which is also the end of instructions section) and the start of the handler.
1498 int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1499 CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1500 } else {
1501 _unwind_handler_offset = -1;
1502 }
1503
1504 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1505 uint16_t metadata_size;
1506 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1507 JVMCI_ONLY( _metadata_size = metadata_size; )
1508 int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
1509 assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,
1510 "wrong mutable data size: %d != %d + %d + %d",
1511 _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size);
1512 assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d",
1513 nmethod_size, (int)(code_end() - header_begin()));
1514
1515 _immutable_data_size = immutable_data_size;
1516 if (immutable_data_size > 0) {
1517 assert(immutable_data != nullptr, "required");
1518 _immutable_data = immutable_data;
1519 } else {
1520 // We need unique not null address
1521 _immutable_data = blob_end();
1522 }
1523 CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1524 CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1525 _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1526 _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
1527
1528 #if INCLUDE_JVMCI
1529 _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1530 DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize); )
1531 #else
1532 DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); )
1533 #endif
1534 assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1535 immutable_data_end_offset, immutable_data_size);
1536
1537 // Copy code and relocation info
1538 code_buffer->copy_code_and_locs_to(this);
1539 // Copy oops and metadata
1540 code_buffer->copy_values_to(this);
1541 dependencies->copy_to(this);
1542 // Copy PcDesc and ScopeDesc data
1543 debug_info->copy_to(this);
1544
1545 // Create cache after PcDesc data is copied - it will be used to initialize cache
1546 _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1547
1548 #if INCLUDE_JVMCI
1549 if (compiler->is_jvmci()) {
1550 // Initialize the JVMCINMethodData object inlined into nm
1551 jvmci_nmethod_data()->copy(jvmci_data);
1552 }
1553 #endif
1554
1555 // Copy contents of ExceptionHandlerTable to nmethod
1556 handler_table->copy_to(this);
1557 nul_chk_table->copy_to(this);
1558
1559 #if INCLUDE_JVMCI
1560 // Copy speculations to nmethod
1561 if (speculations_size() != 0) {
1562 memcpy(speculations_begin(), speculations, speculations_len);
1563 }
1564 #endif
1565
1566 post_init();
1567
1568 // we use the information of entry points to find out if a method is
1569 // static or non static
1570 assert(compiler->is_c2() || compiler->is_jvmci() ||
1571 _method->is_static() == (entry_point() == verified_entry_point()),
1572 " entry points must be same for static methods and vice versa");
1573 }
1574 }
1575
1576 // Print a short set of xml attributes to identify this nmethod. The
1577 // output should be embedded in some other element.
1578 void nmethod::log_identity(xmlStream* log) const {
1579 log->print(" compile_id='%d'", compile_id());
1580 const char* nm_kind = compile_kind();
1581 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1582 log->print(" compiler='%s'", compiler_name());
1583 if (TieredCompilation) {
1584 log->print(" level='%d'", comp_level());
1585 }
1586 #if INCLUDE_JVMCI
1587 if (jvmci_nmethod_data() != nullptr) {
1588 const char* jvmci_name = jvmci_nmethod_data()->name();
1589 if (jvmci_name != nullptr) {
1590 log->print(" jvmci_mirror_name='");
1591 log->text("%s", jvmci_name);
1592 log->print("'");
1593 }
1594 }
1595 #endif
1596 }
1597
1598
1599 #define LOG_OFFSET(log, name) \
1600 if (p2i(name##_end()) - p2i(name##_begin())) \
1601 log->print(" " XSTR(name) "_offset='%zd'" , \
1602 p2i(name##_begin()) - p2i(this))
1603
1604
1605 void nmethod::log_new_nmethod() const {
1606 if (LogCompilation && xtty != nullptr) {
1607 ttyLocker ttyl;
1608 xtty->begin_elem("nmethod");
1609 log_identity(xtty);
1610 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1611 xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1612
1613 LOG_OFFSET(xtty, relocation);
1614 LOG_OFFSET(xtty, consts);
1615 LOG_OFFSET(xtty, insts);
1616 LOG_OFFSET(xtty, stub);
1617 LOG_OFFSET(xtty, scopes_data);
1618 LOG_OFFSET(xtty, scopes_pcs);
1619 LOG_OFFSET(xtty, dependencies);
1620 LOG_OFFSET(xtty, handler_table);
1621 LOG_OFFSET(xtty, nul_chk_table);
1622 LOG_OFFSET(xtty, oops);
1623 LOG_OFFSET(xtty, metadata);
1624
1625 xtty->method(method());
1626 xtty->stamp();
1627 xtty->end_elem();
1628 }
1629 }
1630
1631 #undef LOG_OFFSET
1632
1633
1634 // Print out more verbose output usually for a newly created nmethod.
1635 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const {
1636 if (st != nullptr) {
1637 ttyLocker ttyl;
1638 if (WizardMode) {
1639 CompileTask::print(st, this, msg, /*short_form:*/ true);
1640 st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
1641 } else {
1642 CompileTask::print(st, this, msg, /*short_form:*/ false);
1643 }
1644 }
1645 }
1646
1647 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
1648 bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
1649 if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
1650 print_nmethod(printnmethods);
1651 }
1652 }
1653
1654 void nmethod::print_nmethod(bool printmethod) {
1655 ttyLocker ttyl; // keep the following output all in one block
1656 if (xtty != nullptr) {
1657 xtty->begin_head("print_nmethod");
1658 log_identity(xtty);
1659 xtty->stamp();
1660 xtty->end_head();
1661 }
1662 // Print the header part, then print the requested information.
1663 // This is both handled in decode2().
1664 if (printmethod) {
1665 ResourceMark m;
1666 if (is_compiled_by_c1()) {
1667 tty->cr();
1668 tty->print_cr("============================= C1-compiled nmethod ==============================");
1669 }
1670 if (is_compiled_by_jvmci()) {
1671 tty->cr();
1672 tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
1673 }
1674 tty->print_cr("----------------------------------- Assembly -----------------------------------");
1675 decode2(tty);
1676 #if defined(SUPPORT_DATA_STRUCTS)
1677 if (AbstractDisassembler::show_structs()) {
1678 // Print the oops from the underlying CodeBlob as well.
1679 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1680 print_oops(tty);
1681 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1682 print_metadata(tty);
1683 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1684 print_pcs_on(tty);
1685 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1686 if (oop_maps() != nullptr) {
1687 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1688 oop_maps()->print_on(tty);
1689 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1690 }
1691 }
1692 #endif
1693 } else {
1694 print(); // print the header part only.
1695 }
1696
1697 #if defined(SUPPORT_DATA_STRUCTS)
1698 if (AbstractDisassembler::show_structs()) {
1699 methodHandle mh(Thread::current(), _method);
1700 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1701 print_scopes();
1702 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1703 }
1704 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1705 print_relocations();
1706 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1707 }
1708 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1709 print_dependencies_on(tty);
1710 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1711 }
1712 if (printmethod || PrintExceptionHandlers) {
1713 print_handler_table();
1714 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1715 print_nul_chk_table();
1716 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1717 }
1718
1719 if (printmethod) {
1720 print_recorded_oops();
1721 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1722 print_recorded_metadata();
1723 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1724 }
1725 }
1726 #endif
1727
1728 if (xtty != nullptr) {
1729 xtty->tail("print_nmethod");
1730 }
1731 }
1732
1733
1734 // Promote one word from an assembly-time handle to a live embedded oop.
1735 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1736 if (handle == nullptr ||
1737 // As a special case, IC oops are initialized to 1 or -1.
1738 handle == (jobject) Universe::non_oop_word()) {
1739 *(void**)dest = handle;
1740 } else {
1741 *dest = JNIHandles::resolve_non_null(handle);
1742 }
1743 }
1744
1745
1746 // Have to have the same name because it's called by a template
1747 void nmethod::copy_values(GrowableArray<jobject>* array) {
1748 int length = array->length();
1749 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1750 oop* dest = oops_begin();
1751 for (int index = 0 ; index < length; index++) {
1752 initialize_immediate_oop(&dest[index], array->at(index));
1753 }
1754
1755 // Now we can fix up all the oops in the code. We need to do this
1756 // in the code because the assembler uses jobjects as placeholders.
1757 // The code and relocations have already been initialized by the
1758 // CodeBlob constructor, so it is valid even at this early point to
1759 // iterate over relocations and patch the code.
1760 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
1761 }
1762
1763 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1764 int length = array->length();
1765 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1766 Metadata** dest = metadata_begin();
1767 for (int index = 0 ; index < length; index++) {
1768 dest[index] = array->at(index);
1769 }
1770 }
1771
1772 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1773 // re-patch all oop-bearing instructions, just in case some oops moved
1774 RelocIterator iter(this, begin, end);
1775 while (iter.next()) {
1776 if (iter.type() == relocInfo::oop_type) {
1777 oop_Relocation* reloc = iter.oop_reloc();
1778 if (initialize_immediates && reloc->oop_is_immediate()) {
1779 oop* dest = reloc->oop_addr();
1780 jobject obj = *reinterpret_cast<jobject*>(dest);
1781 initialize_immediate_oop(dest, obj);
1782 }
1783 // Refresh the oop-related bits of this instruction.
1784 reloc->fix_oop_relocation();
1785 } else if (iter.type() == relocInfo::metadata_type) {
1786 metadata_Relocation* reloc = iter.metadata_reloc();
1787 reloc->fix_metadata_relocation();
1788 }
1789 }
1790 }
1791
1792 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
1793 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
1794 intptr_t cbaddr = (intptr_t) nm;
1795 intptr_t offset = ((intptr_t) pc) - cbaddr;
1796
1797 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
1798 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
1799 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
1800 } else if (!nop->patch(oopmap_slot, offset)) {
1801 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
1802 }
1803 }
1804
1805 void nmethod::finalize_relocations() {
1806 NoSafepointVerifier nsv;
1807
1808 GrowableArray<NativeMovConstReg*> virtual_call_data;
1809
1810 // Make sure that post call nops fill in nmethod offsets eagerly so
1811 // we don't have to race with deoptimization
1812 RelocIterator iter(this);
1813 while (iter.next()) {
1814 if (iter.type() == relocInfo::virtual_call_type) {
1815 virtual_call_Relocation* r = iter.virtual_call_reloc();
1816 NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
1817 virtual_call_data.append(value);
1818 } else if (iter.type() == relocInfo::post_call_nop_type) {
1819 post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
1820 address pc = reloc->addr();
1821 install_post_call_nop_displacement(this, pc);
1822 }
1823 }
1824
1825 if (virtual_call_data.length() > 0) {
1826 // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
1827 _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
1828 CompiledICData* next_data = _compiled_ic_data;
1829
1830 for (NativeMovConstReg* value : virtual_call_data) {
1831 value->set_data((intptr_t)next_data);
1832 next_data++;
1833 }
1834 }
1835 }
1836
1837 void nmethod::make_deoptimized() {
1838 if (!Continuations::enabled()) {
1839 // Don't deopt this again.
1840 set_deoptimized_done();
1841 return;
1842 }
1843
1844 assert(method() == nullptr || can_be_deoptimized(), "");
1845
1846 CompiledICLocker ml(this);
1847 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1848
1849 // If post call nops have been already patched, we can just bail-out.
1850 if (has_been_deoptimized()) {
1851 return;
1852 }
1853
1854 ResourceMark rm;
1855 RelocIterator iter(this, oops_reloc_begin());
1856
1857 while (iter.next()) {
1858
1859 switch (iter.type()) {
1860 case relocInfo::virtual_call_type: {
1861 CompiledIC *ic = CompiledIC_at(&iter);
1862 address pc = ic->end_of_call();
1863 NativePostCallNop* nop = nativePostCallNop_at(pc);
1864 if (nop != nullptr) {
1865 nop->make_deopt();
1866 }
1867 assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1868 break;
1869 }
1870 case relocInfo::static_call_type:
1871 case relocInfo::opt_virtual_call_type: {
1872 CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
1873 address pc = csc->end_of_call();
1874 NativePostCallNop* nop = nativePostCallNop_at(pc);
1875 //tty->print_cr(" - static pc %p", pc);
1876 if (nop != nullptr) {
1877 nop->make_deopt();
1878 }
1879 // We can't assert here, there are some calls to stubs / runtime
1880 // that have reloc data and doesn't have a post call NOP.
1881 //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1882 break;
1883 }
1884 default:
1885 break;
1886 }
1887 }
1888 // Don't deopt this again.
1889 set_deoptimized_done();
1890 }
1891
1892 void nmethod::verify_clean_inline_caches() {
1893 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1894
1895 ResourceMark rm;
1896 RelocIterator iter(this, oops_reloc_begin());
1897 while(iter.next()) {
1898 switch(iter.type()) {
1899 case relocInfo::virtual_call_type: {
1900 CompiledIC *ic = CompiledIC_at(&iter);
1901 CodeBlob *cb = CodeCache::find_blob(ic->destination());
1902 assert(cb != nullptr, "destination not in CodeBlob?");
1903 nmethod* nm = cb->as_nmethod_or_null();
1904 if (nm != nullptr) {
1905 // Verify that inline caches pointing to bad nmethods are clean
1906 if (!nm->is_in_use() || nm->is_unloading()) {
1907 assert(ic->is_clean(), "IC should be clean");
1908 }
1909 }
1910 break;
1911 }
1912 case relocInfo::static_call_type:
1913 case relocInfo::opt_virtual_call_type: {
1914 CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
1915 CodeBlob *cb = CodeCache::find_blob(cdc->destination());
1916 assert(cb != nullptr, "destination not in CodeBlob?");
1917 nmethod* nm = cb->as_nmethod_or_null();
1918 if (nm != nullptr) {
1919 // Verify that inline caches pointing to bad nmethods are clean
1920 if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
1921 assert(cdc->is_clean(), "IC should be clean");
1922 }
1923 }
1924 break;
1925 }
1926 default:
1927 break;
1928 }
1929 }
1930 }
1931
1932 void nmethod::mark_as_maybe_on_stack() {
1933 Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
1934 }
1935
1936 bool nmethod::is_maybe_on_stack() {
1937 // If the condition below is true, it means that the nmethod was found to
1938 // be alive the previous completed marking cycle.
1939 return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
1940 }
1941
1942 void nmethod::inc_decompile_count() {
1943 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1944 // Could be gated by ProfileTraps, but do not bother...
1945 Method* m = method();
1946 if (m == nullptr) return;
1947 MethodData* mdo = m->method_data();
1948 if (mdo == nullptr) return;
1949 // There is a benign race here. See comments in methodData.hpp.
1950 mdo->inc_decompile_count();
1951 }
1952
1953 bool nmethod::try_transition(signed char new_state_int) {
1954 signed char new_state = new_state_int;
1955 assert_lock_strong(NMethodState_lock);
1956 signed char old_state = _state;
1957 if (old_state >= new_state) {
1958 // Ensure monotonicity of transitions.
1959 return false;
1960 }
1961 Atomic::store(&_state, new_state);
1962 return true;
1963 }
1964
1965 void nmethod::invalidate_osr_method() {
1966 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1967 // Remove from list of active nmethods
1968 if (method() != nullptr) {
1969 method()->method_holder()->remove_osr_nmethod(this);
1970 }
1971 }
1972
1973 void nmethod::log_state_change(InvalidationReason invalidation_reason) const {
1974 if (LogCompilation) {
1975 if (xtty != nullptr) {
1976 ttyLocker ttyl; // keep the following output all in one block
1977 xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
1978 os::current_thread_id(), invalidation_reason_to_string(invalidation_reason));
1979 log_identity(xtty);
1980 xtty->stamp();
1981 xtty->end_elem();
1982 }
1983 }
1984
1985 ResourceMark rm;
1986 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
1987 ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason));
1988
1989 CompileTask::print_ul(this, ss.freeze());
1990 if (PrintCompilation) {
1991 print_on_with_msg(tty, ss.freeze());
1992 }
1993 }
1994
1995 void nmethod::unlink_from_method() {
1996 if (method() != nullptr) {
1997 method()->unlink_code(this);
1998 }
1999 }
2000
2001 // Invalidate code
2002 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {
2003 // This can be called while the system is already at a safepoint which is ok
2004 NoSafepointVerifier nsv;
2005
2006 if (is_unloading()) {
2007 // If the nmethod is unloading, then it is already not entrant through
2008 // the nmethod entry barriers. No need to do anything; GC will unload it.
2009 return false;
2010 }
2011
2012 if (Atomic::load(&_state) == not_entrant) {
2013 // Avoid taking the lock if already in required state.
2014 // This is safe from races because the state is an end-state,
2015 // which the nmethod cannot back out of once entered.
2016 // No need for fencing either.
2017 return false;
2018 }
2019
2020 {
2021 // Enter critical section. Does not block for safepoint.
2022 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2023
2024 if (Atomic::load(&_state) == not_entrant) {
2025 // another thread already performed this transition so nothing
2026 // to do, but return false to indicate this.
2027 return false;
2028 }
2029
2030 if (is_osr_method()) {
2031 // This logic is equivalent to the logic below for patching the
2032 // verified entry point of regular methods.
2033 // this effectively makes the osr nmethod not entrant
2034 invalidate_osr_method();
2035 } else {
2036 // The caller can be calling the method statically or through an inline
2037 // cache call.
2038 NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
2039 SharedRuntime::get_handle_wrong_method_stub());
2040 }
2041
2042 if (update_recompile_counts()) {
2043 // Mark the method as decompiled.
2044 inc_decompile_count();
2045 }
2046
2047 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2048 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2049 // If nmethod entry barriers are not supported, we won't mark
2050 // nmethods as on-stack when they become on-stack. So we
2051 // degrade to a less accurate flushing strategy, for now.
2052 mark_as_maybe_on_stack();
2053 }
2054
2055 // Change state
2056 bool success = try_transition(not_entrant);
2057 assert(success, "Transition can't fail");
2058
2059 // Log the transition once
2060 log_state_change(invalidation_reason);
2061
2062 // Remove nmethod from method.
2063 unlink_from_method();
2064
2065 } // leave critical region under NMethodState_lock
2066
2067 #if INCLUDE_JVMCI
2068 // Invalidate can't occur while holding the NMethodState_lock
2069 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2070 if (nmethod_data != nullptr) {
2071 nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason);
2072 }
2073 #endif
2074
2075 #ifdef ASSERT
2076 if (is_osr_method() && method() != nullptr) {
2077 // Make sure osr nmethod is invalidated, i.e. not on the list
2078 bool found = method()->method_holder()->remove_osr_nmethod(this);
2079 assert(!found, "osr nmethod should have been invalidated");
2080 }
2081 #endif
2082
2083 return true;
2084 }
2085
2086 // For concurrent GCs, there must be a handshake between unlink and flush
2087 void nmethod::unlink() {
2088 if (is_unlinked()) {
2089 // Already unlinked.
2090 return;
2091 }
2092
2093 flush_dependencies();
2094
2095 // unlink_from_method will take the NMethodState_lock.
2096 // In this case we don't strictly need it when unlinking nmethods from
2097 // the Method, because it is only concurrently unlinked by
2098 // the entry barrier, which acquires the per nmethod lock.
2099 unlink_from_method();
2100
2101 if (is_osr_method()) {
2102 invalidate_osr_method();
2103 }
2104
2105 #if INCLUDE_JVMCI
2106 // Clear the link between this nmethod and a HotSpotNmethod mirror
2107 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2108 if (nmethod_data != nullptr) {
2109 nmethod_data->invalidate_nmethod_mirror(this, is_cold() ?
2110 nmethod::InvalidationReason::UNLOADING_COLD :
2111 nmethod::InvalidationReason::UNLOADING);
2112 }
2113 #endif
2114
2115 // Post before flushing as jmethodID is being used
2116 post_compiled_method_unload();
2117
2118 // Register for flushing when it is safe. For concurrent class unloading,
2119 // that would be after the unloading handshake, and for STW class unloading
2120 // that would be when getting back to the VM thread.
2121 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2122 }
2123
2124 void nmethod::purge(bool unregister_nmethod) {
2125
2126 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2127
2128 // completely deallocate this method
2129 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2130
2131 LogTarget(Debug, codecache) lt;
2132 if (lt.is_enabled()) {
2133 ResourceMark rm;
2134 LogStream ls(lt);
2135 const char* method_name = method()->name()->as_C_string();
2136 const size_t codecache_capacity = CodeCache::capacity()/1024;
2137 const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024;
2138 ls.print("Flushing nmethod %6d/" INTPTR_FORMAT ", level=%d, osr=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". "
2139 "Cache capacity: %zuKb, free space: %zuKb. method %s (%s)",
2140 _compile_id, p2i(this), _comp_level, is_osr_method(), is_cold(), _gc_epoch, CodeCache::cold_gc_count(),
2141 codecache_capacity, codecache_free_space, method_name, compiler_name());
2142 }
2143
2144 // We need to deallocate any ExceptionCache data.
2145 // Note that we do not need to grab the nmethod lock for this, it
2146 // better be thread safe if we're disposing of it!
2147 ExceptionCache* ec = exception_cache();
2148 while(ec != nullptr) {
2149 ExceptionCache* next = ec->next();
2150 delete ec;
2151 ec = next;
2152 }
2153 if (_pc_desc_container != nullptr) {
2154 delete _pc_desc_container;
2155 }
2156 delete[] _compiled_ic_data;
2157
2158 if (_immutable_data != blob_end()) {
2159 os::free(_immutable_data);
2160 _immutable_data = blob_end(); // Valid not null address
2161 }
2162 if (unregister_nmethod) {
2163 Universe::heap()->unregister_nmethod(this);
2164 }
2165 CodeCache::unregister_old_nmethod(this);
2166
2167 JVMCI_ONLY( _metadata_size = 0; )
2168 CodeBlob::purge();
2169 }
2170
2171 oop nmethod::oop_at(int index) const {
2172 if (index == 0) {
2173 return nullptr;
2174 }
2175
2176 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2177 return bs_nm->oop_load_no_keepalive(this, index);
2178 }
2179
2180 oop nmethod::oop_at_phantom(int index) const {
2181 if (index == 0) {
2182 return nullptr;
2183 }
2184
2185 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2186 return bs_nm->oop_load_phantom(this, index);
2187 }
2188
2189 //
2190 // Notify all classes this nmethod is dependent on that it is no
2191 // longer dependent.
2192
2193 void nmethod::flush_dependencies() {
2194 if (!has_flushed_dependencies()) {
2195 set_has_flushed_dependencies(true);
2196 for (Dependencies::DepStream deps(this); deps.next(); ) {
2197 if (deps.type() == Dependencies::call_site_target_value) {
2198 // CallSite dependencies are managed on per-CallSite instance basis.
2199 oop call_site = deps.argument_oop(0);
2200 MethodHandles::clean_dependency_context(call_site);
2201 } else {
2202 InstanceKlass* ik = deps.context_type();
2203 if (ik == nullptr) {
2204 continue; // ignore things like evol_method
2205 }
2206 // During GC liveness of dependee determines class that needs to be updated.
2207 // The GC may clean dependency contexts concurrently and in parallel.
2208 ik->clean_dependency_context();
2209 }
2210 }
2211 }
2212 }
2213
2214 void nmethod::post_compiled_method(CompileTask* task) {
2215 task->mark_success();
2216 task->set_nm_content_size(content_size());
2217 task->set_nm_insts_size(insts_size());
2218 task->set_nm_total_size(total_size());
2219
2220 // JVMTI -- compiled method notification (must be done outside lock)
2221 post_compiled_method_load_event();
2222
2223 if (CompilationLog::log() != nullptr) {
2224 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2225 }
2226
2227 const DirectiveSet* directive = task->directive();
2228 maybe_print_nmethod(directive);
2229 }
2230
2231 // ------------------------------------------------------------------
2232 // post_compiled_method_load_event
2233 // new method for install_code() path
2234 // Transfer information from compilation to jvmti
2235 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2236 // This is a bad time for a safepoint. We don't want
2237 // this nmethod to get unloaded while we're queueing the event.
2238 NoSafepointVerifier nsv;
2239
2240 Method* m = method();
2241 HOTSPOT_COMPILED_METHOD_LOAD(
2242 (char *) m->klass_name()->bytes(),
2243 m->klass_name()->utf8_length(),
2244 (char *) m->name()->bytes(),
2245 m->name()->utf8_length(),
2246 (char *) m->signature()->bytes(),
2247 m->signature()->utf8_length(),
2248 insts_begin(), insts_size());
2249
2250
2251 if (JvmtiExport::should_post_compiled_method_load()) {
2252 // Only post unload events if load events are found.
2253 set_load_reported();
2254 // If a JavaThread hasn't been passed in, let the Service thread
2255 // (which is a real Java thread) post the event
2256 JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2257 if (state == nullptr) {
2258 // Execute any barrier code for this nmethod as if it's called, since
2259 // keeping it alive looks like stack walking.
2260 run_nmethod_entry_barrier();
2261 ServiceThread::enqueue_deferred_event(&event);
2262 } else {
2263 // This enters the nmethod barrier outside in the caller.
2264 state->enqueue_event(&event);
2265 }
2266 }
2267 }
2268
2269 void nmethod::post_compiled_method_unload() {
2270 assert(_method != nullptr, "just checking");
2271 DTRACE_METHOD_UNLOAD_PROBE(method());
2272
2273 // If a JVMTI agent has enabled the CompiledMethodUnload event then
2274 // post the event. The Method* will not be valid when this is freed.
2275
2276 // Don't bother posting the unload if the load event wasn't posted.
2277 if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2278 JvmtiDeferredEvent event =
2279 JvmtiDeferredEvent::compiled_method_unload_event(
2280 method()->jmethod_id(), insts_begin());
2281 ServiceThread::enqueue_deferred_event(&event);
2282 }
2283 }
2284
2285 // Iterate over metadata calling this function. Used by RedefineClasses
2286 void nmethod::metadata_do(MetadataClosure* f) {
2287 {
2288 // Visit all immediate references that are embedded in the instruction stream.
2289 RelocIterator iter(this, oops_reloc_begin());
2290 while (iter.next()) {
2291 if (iter.type() == relocInfo::metadata_type) {
2292 metadata_Relocation* r = iter.metadata_reloc();
2293 // In this metadata, we must only follow those metadatas directly embedded in
2294 // the code. Other metadatas (oop_index>0) are seen as part of
2295 // the metadata section below.
2296 assert(1 == (r->metadata_is_immediate()) +
2297 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2298 "metadata must be found in exactly one place");
2299 if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2300 Metadata* md = r->metadata_value();
2301 if (md != _method) f->do_metadata(md);
2302 }
2303 } else if (iter.type() == relocInfo::virtual_call_type) {
2304 // Check compiledIC holders associated with this nmethod
2305 ResourceMark rm;
2306 CompiledIC *ic = CompiledIC_at(&iter);
2307 ic->metadata_do(f);
2308 }
2309 }
2310 }
2311
2312 // Visit the metadata section
2313 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2314 if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops
2315 Metadata* md = *p;
2316 f->do_metadata(md);
2317 }
2318
2319 // Visit metadata not embedded in the other places.
2320 if (_method != nullptr) f->do_metadata(_method);
2321 }
2322
2323 // Heuristic for nuking nmethods even though their oops are live.
2324 // Main purpose is to reduce code cache pressure and get rid of
2325 // nmethods that don't seem to be all that relevant any longer.
2326 bool nmethod::is_cold() {
2327 if (!MethodFlushing || is_native_method() || is_not_installed()) {
2328 // No heuristic unloading at all
2329 return false;
2330 }
2331
2332 if (!is_maybe_on_stack() && is_not_entrant()) {
2333 // Not entrant nmethods that are not on any stack can just
2334 // be removed
2335 return true;
2336 }
2337
2338 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2339 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2340 // On platforms that don't support nmethod entry barriers, we can't
2341 // trust the temporal aspect of the gc epochs. So we can't detect
2342 // cold nmethods on such platforms.
2343 return false;
2344 }
2345
2346 if (!UseCodeCacheFlushing) {
2347 // Bail out if we don't heuristically remove nmethods
2348 return false;
2349 }
2350
2351 // Other code can be phased out more gradually after N GCs
2352 return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2353 }
2354
2355 // The _is_unloading_state encodes a tuple comprising the unloading cycle
2356 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2357 // This is the bit layout of the _is_unloading_state byte: 00000CCU
2358 // CC refers to the cycle, which has 2 bits, and U refers to the result of
2359 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2360
2361 class IsUnloadingState: public AllStatic {
2362 static const uint8_t _is_unloading_mask = 1;
2363 static const uint8_t _is_unloading_shift = 0;
2364 static const uint8_t _unloading_cycle_mask = 6;
2365 static const uint8_t _unloading_cycle_shift = 1;
2366
2367 static uint8_t set_is_unloading(uint8_t state, bool value) {
2368 state &= (uint8_t)~_is_unloading_mask;
2369 if (value) {
2370 state |= 1 << _is_unloading_shift;
2371 }
2372 assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2373 return state;
2374 }
2375
2376 static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2377 state &= (uint8_t)~_unloading_cycle_mask;
2378 state |= (uint8_t)(value << _unloading_cycle_shift);
2379 assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2380 return state;
2381 }
2382
2383 public:
2384 static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2385 static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2386
2387 static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2388 uint8_t state = 0;
2389 state = set_is_unloading(state, is_unloading);
2390 state = set_unloading_cycle(state, unloading_cycle);
2391 return state;
2392 }
2393 };
2394
2395 bool nmethod::is_unloading() {
2396 uint8_t state = Atomic::load(&_is_unloading_state);
2397 bool state_is_unloading = IsUnloadingState::is_unloading(state);
2398 if (state_is_unloading) {
2399 return true;
2400 }
2401 uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2402 uint8_t current_cycle = CodeCache::unloading_cycle();
2403 if (state_unloading_cycle == current_cycle) {
2404 return false;
2405 }
2406
2407 // The IsUnloadingBehaviour is responsible for calculating if the nmethod
2408 // should be unloaded. This can be either because there is a dead oop,
2409 // or because is_cold() heuristically determines it is time to unload.
2410 state_unloading_cycle = current_cycle;
2411 state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2412 uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2413
2414 // Note that if an nmethod has dead oops, everyone will agree that the
2415 // nmethod is_unloading. However, the is_cold heuristics can yield
2416 // different outcomes, so we guard the computed result with a CAS
2417 // to ensure all threads have a shared view of whether an nmethod
2418 // is_unloading or not.
2419 uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2420
2421 if (found_state == state) {
2422 // First to change state, we win
2423 return state_is_unloading;
2424 } else {
2425 // State already set, so use it
2426 return IsUnloadingState::is_unloading(found_state);
2427 }
2428 }
2429
2430 void nmethod::clear_unloading_state() {
2431 uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2432 Atomic::store(&_is_unloading_state, state);
2433 }
2434
2435
2436 // This is called at the end of the strong tracing/marking phase of a
2437 // GC to unload an nmethod if it contains otherwise unreachable
2438 // oops or is heuristically found to be not important.
2439 void nmethod::do_unloading(bool unloading_occurred) {
2440 // Make sure the oop's ready to receive visitors
2441 if (is_unloading()) {
2442 unlink();
2443 } else {
2444 unload_nmethod_caches(unloading_occurred);
2445 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2446 if (bs_nm != nullptr) {
2447 bs_nm->disarm(this);
2448 }
2449 }
2450 }
2451
2452 void nmethod::oops_do(OopClosure* f, bool allow_dead) {
2453 // Prevent extra code cache walk for platforms that don't have immediate oops.
2454 if (relocInfo::mustIterateImmediateOopsInCode()) {
2455 RelocIterator iter(this, oops_reloc_begin());
2456
2457 while (iter.next()) {
2458 if (iter.type() == relocInfo::oop_type ) {
2459 oop_Relocation* r = iter.oop_reloc();
2460 // In this loop, we must only follow those oops directly embedded in
2461 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
2462 assert(1 == (r->oop_is_immediate()) +
2463 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2464 "oop must be found in exactly one place");
2465 if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2466 f->do_oop(r->oop_addr());
2467 }
2468 }
2469 }
2470 }
2471
2472 // Scopes
2473 // This includes oop constants not inlined in the code stream.
2474 for (oop* p = oops_begin(); p < oops_end(); p++) {
2475 if (*p == Universe::non_oop_word()) continue; // skip non-oops
2476 f->do_oop(p);
2477 }
2478 }
2479
2480 void nmethod::follow_nmethod(OopIterateClosure* cl) {
2481 // Process oops in the nmethod
2482 oops_do(cl);
2483
2484 // CodeCache unloading support
2485 mark_as_maybe_on_stack();
2486
2487 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2488 bs_nm->disarm(this);
2489
2490 // There's an assumption made that this function is not used by GCs that
2491 // relocate objects, and therefore we don't call fix_oop_relocations.
2492 }
2493
2494 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2495
2496 void nmethod::oops_do_log_change(const char* state) {
2497 LogTarget(Trace, gc, nmethod) lt;
2498 if (lt.is_enabled()) {
2499 LogStream ls(lt);
2500 CompileTask::print(&ls, this, state, true /* short_form */);
2501 }
2502 }
2503
2504 bool nmethod::oops_do_try_claim() {
2505 if (oops_do_try_claim_weak_request()) {
2506 nmethod* result = oops_do_try_add_to_list_as_weak_done();
2507 assert(result == nullptr, "adding to global list as weak done must always succeed.");
2508 return true;
2509 }
2510 return false;
2511 }
2512
2513 bool nmethod::oops_do_try_claim_weak_request() {
2514 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2515
2516 if ((_oops_do_mark_link == nullptr) &&
2517 (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
2518 oops_do_log_change("oops_do, mark weak request");
2519 return true;
2520 }
2521 return false;
2522 }
2523
2524 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
2525 _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
2526 }
2527
2528 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
2529 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2530
2531 oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
2532 if (old_next == nullptr) {
2533 oops_do_log_change("oops_do, mark strong done");
2534 }
2535 return old_next;
2536 }
2537
2538 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
2539 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2540 assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
2541
2542 oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
2543 if (old_next == next) {
2544 oops_do_log_change("oops_do, mark strong request");
2545 }
2546 return old_next;
2547 }
2548
2549 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
2550 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2551 assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
2552
2553 oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
2554 if (old_next == next) {
2555 oops_do_log_change("oops_do, mark weak done -> mark strong done");
2556 return true;
2557 }
2558 return false;
2559 }
2560
2561 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
2562 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2563
2564 assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
2565 extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2566 "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2567
2568 nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
2569 // Self-loop if needed.
2570 if (old_head == nullptr) {
2571 old_head = this;
2572 }
2573 // Try to install end of list and weak done tag.
2574 if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
2575 oops_do_log_change("oops_do, mark weak done");
2576 return nullptr;
2577 } else {
2578 return old_head;
2579 }
2580 }
2581
2582 void nmethod::oops_do_add_to_list_as_strong_done() {
2583 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2584
2585 nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
2586 // Self-loop if needed.
2587 if (old_head == nullptr) {
2588 old_head = this;
2589 }
2590 assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
2591 p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2592
2593 oops_do_set_strong_done(old_head);
2594 }
2595
2596 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
2597 if (!oops_do_try_claim_weak_request()) {
2598 // Failed to claim for weak processing.
2599 oops_do_log_change("oops_do, mark weak request fail");
2600 return;
2601 }
2602
2603 p->do_regular_processing(this);
2604
2605 nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
2606 if (old_head == nullptr) {
2607 return;
2608 }
2609 oops_do_log_change("oops_do, mark weak done fail");
2610 // Adding to global list failed, another thread added a strong request.
2611 assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2612 "must be but is %u", extract_state(_oops_do_mark_link));
2613
2614 oops_do_log_change("oops_do, mark weak request -> mark strong done");
2615
2616 oops_do_set_strong_done(old_head);
2617 // Do missing strong processing.
2618 p->do_remaining_strong_processing(this);
2619 }
2620
2621 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
2622 oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
2623 if (next_raw == nullptr) {
2624 p->do_regular_processing(this);
2625 oops_do_add_to_list_as_strong_done();
2626 return;
2627 }
2628 // Claim failed. Figure out why and handle it.
2629 if (oops_do_has_weak_request(next_raw)) {
2630 oops_do_mark_link* old = next_raw;
2631 // Claim failed because being weak processed (state == "weak request").
2632 // Try to request deferred strong processing.
2633 next_raw = oops_do_try_add_strong_request(old);
2634 if (next_raw == old) {
2635 // Successfully requested deferred strong processing.
2636 return;
2637 }
2638 // Failed because of a concurrent transition. No longer in "weak request" state.
2639 }
2640 if (oops_do_has_any_strong_state(next_raw)) {
2641 // Already claimed for strong processing or requested for such.
2642 return;
2643 }
2644 if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
2645 // Successfully claimed "weak done" as "strong done". Do the missing marking.
2646 p->do_remaining_strong_processing(this);
2647 return;
2648 }
2649 // Claim failed, some other thread got it.
2650 }
2651
2652 void nmethod::oops_do_marking_prologue() {
2653 assert_at_safepoint();
2654
2655 log_trace(gc, nmethod)("oops_do_marking_prologue");
2656 assert(_oops_do_mark_nmethods == nullptr, "must be empty");
2657 }
2658
2659 void nmethod::oops_do_marking_epilogue() {
2660 assert_at_safepoint();
2661
2662 nmethod* next = _oops_do_mark_nmethods;
2663 _oops_do_mark_nmethods = nullptr;
2664 if (next != nullptr) {
2665 nmethod* cur;
2666 do {
2667 cur = next;
2668 next = extract_nmethod(cur->_oops_do_mark_link);
2669 cur->_oops_do_mark_link = nullptr;
2670 DEBUG_ONLY(cur->verify_oop_relocations());
2671
2672 LogTarget(Trace, gc, nmethod) lt;
2673 if (lt.is_enabled()) {
2674 LogStream ls(lt);
2675 CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
2676 }
2677 // End if self-loop has been detected.
2678 } while (cur != next);
2679 }
2680 log_trace(gc, nmethod)("oops_do_marking_epilogue");
2681 }
2682
2683 inline bool includes(void* p, void* from, void* to) {
2684 return from <= p && p < to;
2685 }
2686
2687
2688 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2689 assert(count >= 2, "must be sentinel values, at least");
2690
2691 #ifdef ASSERT
2692 // must be sorted and unique; we do a binary search in find_pc_desc()
2693 int prev_offset = pcs[0].pc_offset();
2694 assert(prev_offset == PcDesc::lower_offset_limit,
2695 "must start with a sentinel");
2696 for (int i = 1; i < count; i++) {
2697 int this_offset = pcs[i].pc_offset();
2698 assert(this_offset > prev_offset, "offsets must be sorted");
2699 prev_offset = this_offset;
2700 }
2701 assert(prev_offset == PcDesc::upper_offset_limit,
2702 "must end with a sentinel");
2703 #endif //ASSERT
2704
2705 // Search for MethodHandle invokes and tag the nmethod.
2706 for (int i = 0; i < count; i++) {
2707 if (pcs[i].is_method_handle_invoke()) {
2708 set_has_method_handle_invokes(true);
2709 break;
2710 }
2711 }
2712 assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler");
2713
2714 int size = count * sizeof(PcDesc);
2715 assert(scopes_pcs_size() >= size, "oob");
2716 memcpy(scopes_pcs_begin(), pcs, size);
2717
2718 // Adjust the final sentinel downward.
2719 PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2720 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2721 last_pc->set_pc_offset(content_size() + 1);
2722 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2723 // Fill any rounding gaps with copies of the last record.
2724 last_pc[1] = last_pc[0];
2725 }
2726 // The following assert could fail if sizeof(PcDesc) is not
2727 // an integral multiple of oopSize (the rounding term).
2728 // If it fails, change the logic to always allocate a multiple
2729 // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2730 assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2731 }
2732
2733 void nmethod::copy_scopes_data(u_char* buffer, int size) {
2734 assert(scopes_data_size() >= size, "oob");
2735 memcpy(scopes_data_begin(), buffer, size);
2736 }
2737
2738 #ifdef ASSERT
2739 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
2740 PcDesc* res = nullptr;
2741 assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
2742 "must start with a sentinel");
2743 // lower + 1 to exclude initial sentinel
2744 for (PcDesc* p = lower + 1; p < upper; p++) {
2745 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
2746 if (match_desc(p, pc_offset, approximate)) {
2747 if (res == nullptr) {
2748 res = p;
2749 } else {
2750 res = (PcDesc*) badAddress;
2751 }
2752 }
2753 }
2754 return res;
2755 }
2756 #endif
2757
2758
2759 #ifndef PRODUCT
2760 // Version of method to collect statistic
2761 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
2762 PcDesc* lower, PcDesc* upper) {
2763 ++pc_nmethod_stats.pc_desc_queries;
2764 if (approximate) ++pc_nmethod_stats.pc_desc_approx;
2765
2766 PcDesc* desc = _pc_desc_cache.last_pc_desc();
2767 assert(desc != nullptr, "PcDesc cache should be initialized already");
2768 if (desc->pc_offset() == (pc - code_begin)) {
2769 // Cached value matched
2770 ++pc_nmethod_stats.pc_desc_tests;
2771 ++pc_nmethod_stats.pc_desc_repeats;
2772 return desc;
2773 }
2774 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
2775 }
2776 #endif
2777
2778 // Finds a PcDesc with real-pc equal to "pc"
2779 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
2780 PcDesc* lower_incl, PcDesc* upper_incl) {
2781 if ((pc < code_begin) ||
2782 (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2783 return nullptr; // PC is wildly out of range
2784 }
2785 int pc_offset = (int) (pc - code_begin);
2786
2787 // Check the PcDesc cache if it contains the desired PcDesc
2788 // (This as an almost 100% hit rate.)
2789 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2790 if (res != nullptr) {
2791 assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
2792 return res;
2793 }
2794
2795 // Fallback algorithm: quasi-linear search for the PcDesc
2796 // Find the last pc_offset less than the given offset.
2797 // The successor must be the required match, if there is a match at all.
2798 // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2799 PcDesc* lower = lower_incl; // this is initial sentinel
2800 PcDesc* upper = upper_incl - 1; // exclude final sentinel
2801 if (lower >= upper) return nullptr; // no PcDescs at all
2802
2803 #define assert_LU_OK \
2804 /* invariant on lower..upper during the following search: */ \
2805 assert(lower->pc_offset() < pc_offset, "sanity"); \
2806 assert(upper->pc_offset() >= pc_offset, "sanity")
2807 assert_LU_OK;
2808
2809 // Use the last successful return as a split point.
2810 PcDesc* mid = _pc_desc_cache.last_pc_desc();
2811 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2812 if (mid->pc_offset() < pc_offset) {
2813 lower = mid;
2814 } else {
2815 upper = mid;
2816 }
2817
2818 // Take giant steps at first (4096, then 256, then 16, then 1)
2819 const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
2820 const int RADIX = (1 << LOG2_RADIX);
2821 for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2822 while ((mid = lower + step) < upper) {
2823 assert_LU_OK;
2824 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2825 if (mid->pc_offset() < pc_offset) {
2826 lower = mid;
2827 } else {
2828 upper = mid;
2829 break;
2830 }
2831 }
2832 assert_LU_OK;
2833 }
2834
2835 // Sneak up on the value with a linear search of length ~16.
2836 while (true) {
2837 assert_LU_OK;
2838 mid = lower + 1;
2839 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2840 if (mid->pc_offset() < pc_offset) {
2841 lower = mid;
2842 } else {
2843 upper = mid;
2844 break;
2845 }
2846 }
2847 #undef assert_LU_OK
2848
2849 if (match_desc(upper, pc_offset, approximate)) {
2850 assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
2851 if (!Thread::current_in_asgct()) {
2852 // we don't want to modify the cache if we're in ASGCT
2853 // which is typically called in a signal handler
2854 _pc_desc_cache.add_pc_desc(upper);
2855 }
2856 return upper;
2857 } else {
2858 assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
2859 return nullptr;
2860 }
2861 }
2862
2863 bool nmethod::check_dependency_on(DepChange& changes) {
2864 // What has happened:
2865 // 1) a new class dependee has been added
2866 // 2) dependee and all its super classes have been marked
2867 bool found_check = false; // set true if we are upset
2868 for (Dependencies::DepStream deps(this); deps.next(); ) {
2869 // Evaluate only relevant dependencies.
2870 if (deps.spot_check_dependency_at(changes) != nullptr) {
2871 found_check = true;
2872 NOT_DEBUG(break);
2873 }
2874 }
2875 return found_check;
2876 }
2877
2878 // Called from mark_for_deoptimization, when dependee is invalidated.
2879 bool nmethod::is_dependent_on_method(Method* dependee) {
2880 for (Dependencies::DepStream deps(this); deps.next(); ) {
2881 if (deps.type() != Dependencies::evol_method)
2882 continue;
2883 Method* method = deps.method_argument(0);
2884 if (method == dependee) return true;
2885 }
2886 return false;
2887 }
2888
2889 void nmethod_init() {
2890 // make sure you didn't forget to adjust the filler fields
2891 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2892 }
2893
2894 // -----------------------------------------------------------------------------
2895 // Verification
2896
2897 class VerifyOopsClosure: public OopClosure {
2898 nmethod* _nm;
2899 bool _ok;
2900 public:
2901 VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2902 bool ok() { return _ok; }
2903 virtual void do_oop(oop* p) {
2904 if (oopDesc::is_oop_or_null(*p)) return;
2905 // Print diagnostic information before calling print_nmethod().
2906 // Assertions therein might prevent call from returning.
2907 tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2908 p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2909 if (_ok) {
2910 _nm->print_nmethod(true);
2911 _ok = false;
2912 }
2913 }
2914 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2915 };
2916
2917 class VerifyMetadataClosure: public MetadataClosure {
2918 public:
2919 void do_metadata(Metadata* md) {
2920 if (md->is_method()) {
2921 Method* method = (Method*)md;
2922 assert(!method->is_old(), "Should not be installing old methods");
2923 }
2924 }
2925 };
2926
2927
2928 void nmethod::verify() {
2929 if (is_not_entrant())
2930 return;
2931
2932 // Make sure all the entry points are correctly aligned for patching.
2933 NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2934
2935 // assert(oopDesc::is_oop(method()), "must be valid");
2936
2937 ResourceMark rm;
2938
2939 if (!CodeCache::contains(this)) {
2940 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2941 }
2942
2943 if(is_native_method() )
2944 return;
2945
2946 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2947 if (nm != this) {
2948 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2949 }
2950
2951 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2952 if (! p->verify(this)) {
2953 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2954 }
2955 }
2956
2957 #ifdef ASSERT
2958 #if INCLUDE_JVMCI
2959 {
2960 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
2961 ImmutableOopMapSet* oms = oop_maps();
2962 ImplicitExceptionTable implicit_table(this);
2963 for (uint i = 0; i < implicit_table.len(); i++) {
2964 int exec_offset = (int) implicit_table.get_exec_offset(i);
2965 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
2966 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
2967 bool found = false;
2968 for (int i = 0, imax = oms->count(); i < imax; i++) {
2969 if (oms->pair_at(i)->pc_offset() == exec_offset) {
2970 found = true;
2971 break;
2972 }
2973 }
2974 assert(found, "missing oopmap");
2975 }
2976 }
2977 }
2978 #endif
2979 #endif
2980
2981 VerifyOopsClosure voc(this);
2982 oops_do(&voc);
2983 assert(voc.ok(), "embedded oops must be OK");
2984 Universe::heap()->verify_nmethod(this);
2985
2986 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
2987 nm->method()->external_name(), p2i(_oops_do_mark_link));
2988 verify_scopes();
2989
2990 CompiledICLocker nm_verify(this);
2991 VerifyMetadataClosure vmc;
2992 metadata_do(&vmc);
2993 }
2994
2995
2996 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
2997
2998 // Verify IC only when nmethod installation is finished.
2999 if (!is_not_installed()) {
3000 if (CompiledICLocker::is_safe(this)) {
3001 if (is_inline_cache) {
3002 CompiledIC_at(this, call_site);
3003 } else {
3004 CompiledDirectCall::at(call_site);
3005 }
3006 } else {
3007 CompiledICLocker ml_verify(this);
3008 if (is_inline_cache) {
3009 CompiledIC_at(this, call_site);
3010 } else {
3011 CompiledDirectCall::at(call_site);
3012 }
3013 }
3014 }
3015
3016 HandleMark hm(Thread::current());
3017
3018 PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
3019 assert(pd != nullptr, "PcDesc must exist");
3020 for (ScopeDesc* sd = new ScopeDesc(this, pd);
3021 !sd->is_top(); sd = sd->sender()) {
3022 sd->verify();
3023 }
3024 }
3025
3026 void nmethod::verify_scopes() {
3027 if( !method() ) return; // Runtime stubs have no scope
3028 if (method()->is_native()) return; // Ignore stub methods.
3029 // iterate through all interrupt point
3030 // and verify the debug information is valid.
3031 RelocIterator iter(this);
3032 while (iter.next()) {
3033 address stub = nullptr;
3034 switch (iter.type()) {
3035 case relocInfo::virtual_call_type:
3036 verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3037 break;
3038 case relocInfo::opt_virtual_call_type:
3039 stub = iter.opt_virtual_call_reloc()->static_stub();
3040 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3041 break;
3042 case relocInfo::static_call_type:
3043 stub = iter.static_call_reloc()->static_stub();
3044 verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3045 break;
3046 case relocInfo::runtime_call_type:
3047 case relocInfo::runtime_call_w_cp_type: {
3048 address destination = iter.reloc()->value();
3049 // Right now there is no way to find out which entries support
3050 // an interrupt point. It would be nice if we had this
3051 // information in a table.
3052 break;
3053 }
3054 default:
3055 break;
3056 }
3057 assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3058 }
3059 }
3060
3061
3062 // -----------------------------------------------------------------------------
3063 // Printing operations
3064
3065 void nmethod::print_on_impl(outputStream* st) const {
3066 ResourceMark rm;
3067
3068 st->print("Compiled method ");
3069
3070 if (is_compiled_by_c1()) {
3071 st->print("(c1) ");
3072 } else if (is_compiled_by_c2()) {
3073 st->print("(c2) ");
3074 } else if (is_compiled_by_jvmci()) {
3075 st->print("(JVMCI) ");
3076 } else {
3077 st->print("(n/a) ");
3078 }
3079
3080 print_on_with_msg(st, nullptr);
3081
3082 if (WizardMode) {
3083 st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3084 st->print(" for method " INTPTR_FORMAT , p2i(method()));
3085 st->print(" { ");
3086 st->print_cr("%s ", state());
3087 st->print_cr("}:");
3088 }
3089 if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3090 p2i(this),
3091 p2i(this) + size(),
3092 size());
3093 if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3094 p2i(consts_begin()),
3095 p2i(consts_end()),
3096 consts_size());
3097 if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3098 p2i(insts_begin()),
3099 p2i(insts_end()),
3100 insts_size());
3101 if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3102 p2i(stub_begin()),
3103 p2i(stub_end()),
3104 stub_size());
3105 if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3106 p2i(oops_begin()),
3107 p2i(oops_end()),
3108 oops_size());
3109 if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3110 p2i(mutable_data_begin()),
3111 p2i(mutable_data_end()),
3112 mutable_data_size());
3113 if (relocation_size() > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3114 p2i(relocation_begin()),
3115 p2i(relocation_end()),
3116 relocation_size());
3117 if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3118 p2i(metadata_begin()),
3119 p2i(metadata_end()),
3120 metadata_size());
3121 #if INCLUDE_JVMCI
3122 if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3123 p2i(jvmci_data_begin()),
3124 p2i(jvmci_data_end()),
3125 jvmci_data_size());
3126 #endif
3127 if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3128 p2i(immutable_data_begin()),
3129 p2i(immutable_data_end()),
3130 immutable_data_size());
3131 if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3132 p2i(dependencies_begin()),
3133 p2i(dependencies_end()),
3134 dependencies_size());
3135 if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3136 p2i(nul_chk_table_begin()),
3137 p2i(nul_chk_table_end()),
3138 nul_chk_table_size());
3139 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3140 p2i(handler_table_begin()),
3141 p2i(handler_table_end()),
3142 handler_table_size());
3143 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3144 p2i(scopes_pcs_begin()),
3145 p2i(scopes_pcs_end()),
3146 scopes_pcs_size());
3147 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3148 p2i(scopes_data_begin()),
3149 p2i(scopes_data_end()),
3150 scopes_data_size());
3151 #if INCLUDE_JVMCI
3152 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3153 p2i(speculations_begin()),
3154 p2i(speculations_end()),
3155 speculations_size());
3156 #endif
3157 }
3158
3159 void nmethod::print_code() {
3160 ResourceMark m;
3161 ttyLocker ttyl;
3162 // Call the specialized decode method of this class.
3163 decode(tty);
3164 }
3165
3166 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3167
3168 void nmethod::print_dependencies_on(outputStream* out) {
3169 ResourceMark rm;
3170 stringStream st;
3171 st.print_cr("Dependencies:");
3172 for (Dependencies::DepStream deps(this); deps.next(); ) {
3173 deps.print_dependency(&st);
3174 InstanceKlass* ctxk = deps.context_type();
3175 if (ctxk != nullptr) {
3176 if (ctxk->is_dependent_nmethod(this)) {
3177 st.print_cr(" [nmethod<=klass]%s", ctxk->external_name());
3178 }
3179 }
3180 deps.log_dependency(); // put it into the xml log also
3181 }
3182 out->print_raw(st.as_string());
3183 }
3184 #endif
3185
3186 #if defined(SUPPORT_DATA_STRUCTS)
3187
3188 // Print the oops from the underlying CodeBlob.
3189 void nmethod::print_oops(outputStream* st) {
3190 ResourceMark m;
3191 st->print("Oops:");
3192 if (oops_begin() < oops_end()) {
3193 st->cr();
3194 for (oop* p = oops_begin(); p < oops_end(); p++) {
3195 Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3196 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3197 if (Universe::contains_non_oop_word(p)) {
3198 st->print_cr("NON_OOP");
3199 continue; // skip non-oops
3200 }
3201 if (*p == nullptr) {
3202 st->print_cr("nullptr-oop");
3203 continue; // skip non-oops
3204 }
3205 (*p)->print_value_on(st);
3206 st->cr();
3207 }
3208 } else {
3209 st->print_cr(" <list empty>");
3210 }
3211 }
3212
3213 // Print metadata pool.
3214 void nmethod::print_metadata(outputStream* st) {
3215 ResourceMark m;
3216 st->print("Metadata:");
3217 if (metadata_begin() < metadata_end()) {
3218 st->cr();
3219 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3220 Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3221 st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3222 if (*p && *p != Universe::non_oop_word()) {
3223 (*p)->print_value_on(st);
3224 }
3225 st->cr();
3226 }
3227 } else {
3228 st->print_cr(" <list empty>");
3229 }
3230 }
3231
3232 #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3233 void nmethod::print_scopes_on(outputStream* st) {
3234 // Find the first pc desc for all scopes in the code and print it.
3235 ResourceMark rm;
3236 st->print("scopes:");
3237 if (scopes_pcs_begin() < scopes_pcs_end()) {
3238 st->cr();
3239 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3240 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3241 continue;
3242
3243 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3244 while (sd != nullptr) {
3245 sd->print_on(st, p); // print output ends with a newline
3246 sd = sd->sender();
3247 }
3248 }
3249 } else {
3250 st->print_cr(" <list empty>");
3251 }
3252 }
3253 #endif
3254
3255 #ifndef PRODUCT // RelocIterator does support printing only then.
3256 void nmethod::print_relocations() {
3257 ResourceMark m; // in case methods get printed via the debugger
3258 tty->print_cr("relocations:");
3259 RelocIterator iter(this);
3260 iter.print_on(tty);
3261 }
3262 #endif
3263
3264 void nmethod::print_pcs_on(outputStream* st) {
3265 ResourceMark m; // in case methods get printed via debugger
3266 st->print("pc-bytecode offsets:");
3267 if (scopes_pcs_begin() < scopes_pcs_end()) {
3268 st->cr();
3269 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3270 p->print_on(st, this); // print output ends with a newline
3271 }
3272 } else {
3273 st->print_cr(" <list empty>");
3274 }
3275 }
3276
3277 void nmethod::print_handler_table() {
3278 ExceptionHandlerTable(this).print(code_begin());
3279 }
3280
3281 void nmethod::print_nul_chk_table() {
3282 ImplicitExceptionTable(this).print(code_begin());
3283 }
3284
3285 void nmethod::print_recorded_oop(int log_n, int i) {
3286 void* value;
3287
3288 if (i == 0) {
3289 value = nullptr;
3290 } else {
3291 // Be careful around non-oop words. Don't create an oop
3292 // with that value, or it will assert in verification code.
3293 if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3294 value = Universe::non_oop_word();
3295 } else {
3296 value = oop_at(i);
3297 }
3298 }
3299
3300 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3301
3302 if (value == Universe::non_oop_word()) {
3303 tty->print("non-oop word");
3304 } else {
3305 if (value == nullptr) {
3306 tty->print("nullptr-oop");
3307 } else {
3308 oop_at(i)->print_value_on(tty);
3309 }
3310 }
3311
3312 tty->cr();
3313 }
3314
3315 void nmethod::print_recorded_oops() {
3316 const int n = oops_count();
3317 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3318 tty->print("Recorded oops:");
3319 if (n > 0) {
3320 tty->cr();
3321 for (int i = 0; i < n; i++) {
3322 print_recorded_oop(log_n, i);
3323 }
3324 } else {
3325 tty->print_cr(" <list empty>");
3326 }
3327 }
3328
3329 void nmethod::print_recorded_metadata() {
3330 const int n = metadata_count();
3331 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3332 tty->print("Recorded metadata:");
3333 if (n > 0) {
3334 tty->cr();
3335 for (int i = 0; i < n; i++) {
3336 Metadata* m = metadata_at(i);
3337 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3338 if (m == (Metadata*)Universe::non_oop_word()) {
3339 tty->print("non-metadata word");
3340 } else if (m == nullptr) {
3341 tty->print("nullptr-oop");
3342 } else {
3343 Metadata::print_value_on_maybe_null(tty, m);
3344 }
3345 tty->cr();
3346 }
3347 } else {
3348 tty->print_cr(" <list empty>");
3349 }
3350 }
3351 #endif
3352
3353 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3354
3355 void nmethod::print_constant_pool(outputStream* st) {
3356 //-----------------------------------
3357 //---< Print the constant pool >---
3358 //-----------------------------------
3359 int consts_size = this->consts_size();
3360 if ( consts_size > 0 ) {
3361 unsigned char* cstart = this->consts_begin();
3362 unsigned char* cp = cstart;
3363 unsigned char* cend = cp + consts_size;
3364 unsigned int bytes_per_line = 4;
3365 unsigned int CP_alignment = 8;
3366 unsigned int n;
3367
3368 st->cr();
3369
3370 //---< print CP header to make clear what's printed >---
3371 if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3372 n = bytes_per_line;
3373 st->print_cr("[Constant Pool]");
3374 Disassembler::print_location(cp, cstart, cend, st, true, true);
3375 Disassembler::print_hexdata(cp, n, st, true);
3376 st->cr();
3377 } else {
3378 n = (int)((uintptr_t)cp & (bytes_per_line-1));
3379 st->print_cr("[Constant Pool (unaligned)]");
3380 }
3381
3382 //---< print CP contents, bytes_per_line at a time >---
3383 while (cp < cend) {
3384 Disassembler::print_location(cp, cstart, cend, st, true, false);
3385 Disassembler::print_hexdata(cp, n, st, false);
3386 cp += n;
3387 n = bytes_per_line;
3388 st->cr();
3389 }
3390
3391 //---< Show potential alignment gap between constant pool and code >---
3392 cend = code_begin();
3393 if( cp < cend ) {
3394 n = 4;
3395 st->print_cr("[Code entry alignment]");
3396 while (cp < cend) {
3397 Disassembler::print_location(cp, cstart, cend, st, false, false);
3398 cp += n;
3399 st->cr();
3400 }
3401 }
3402 } else {
3403 st->print_cr("[Constant Pool (empty)]");
3404 }
3405 st->cr();
3406 }
3407
3408 #endif
3409
3410 // Disassemble this nmethod.
3411 // Print additional debug information, if requested. This could be code
3412 // comments, block comments, profiling counters, etc.
3413 // The undisassembled format is useful no disassembler library is available.
3414 // The resulting hex dump (with markers) can be disassembled later, or on
3415 // another system, when/where a disassembler library is available.
3416 void nmethod::decode2(outputStream* ost) const {
3417
3418 // Called from frame::back_trace_with_decode without ResourceMark.
3419 ResourceMark rm;
3420
3421 // Make sure we have a valid stream to print on.
3422 outputStream* st = ost ? ost : tty;
3423
3424 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3425 const bool use_compressed_format = true;
3426 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3427 AbstractDisassembler::show_block_comment());
3428 #else
3429 const bool use_compressed_format = Disassembler::is_abstract();
3430 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3431 AbstractDisassembler::show_block_comment());
3432 #endif
3433
3434 st->cr();
3435 this->print_on(st);
3436 st->cr();
3437
3438 #if defined(SUPPORT_ASSEMBLY)
3439 //----------------------------------
3440 //---< Print real disassembly >---
3441 //----------------------------------
3442 if (! use_compressed_format) {
3443 st->print_cr("[Disassembly]");
3444 Disassembler::decode(const_cast<nmethod*>(this), st);
3445 st->bol();
3446 st->print_cr("[/Disassembly]");
3447 return;
3448 }
3449 #endif
3450
3451 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3452
3453 // Compressed undisassembled disassembly format.
3454 // The following status values are defined/supported:
3455 // = 0 - currently at bol() position, nothing printed yet on current line.
3456 // = 1 - currently at position after print_location().
3457 // > 1 - in the midst of printing instruction stream bytes.
3458 int compressed_format_idx = 0;
3459 int code_comment_column = 0;
3460 const int instr_maxlen = Assembler::instr_maxlen();
3461 const uint tabspacing = 8;
3462 unsigned char* start = this->code_begin();
3463 unsigned char* p = this->code_begin();
3464 unsigned char* end = this->code_end();
3465 unsigned char* pss = p; // start of a code section (used for offsets)
3466
3467 if ((start == nullptr) || (end == nullptr)) {
3468 st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3469 return;
3470 }
3471 #endif
3472
3473 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3474 //---< plain abstract disassembly, no comments or anything, just section headers >---
3475 if (use_compressed_format && ! compressed_with_comments) {
3476 const_cast<nmethod*>(this)->print_constant_pool(st);
3477
3478 //---< Open the output (Marker for post-mortem disassembler) >---
3479 st->print_cr("[MachCode]");
3480 const char* header = nullptr;
3481 address p0 = p;
3482 while (p < end) {
3483 address pp = p;
3484 while ((p < end) && (header == nullptr)) {
3485 header = nmethod_section_label(p);
3486 pp = p;
3487 p += Assembler::instr_len(p);
3488 }
3489 if (pp > p0) {
3490 AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3491 p0 = pp;
3492 p = pp;
3493 header = nullptr;
3494 } else if (header != nullptr) {
3495 st->bol();
3496 st->print_cr("%s", header);
3497 header = nullptr;
3498 }
3499 }
3500 //---< Close the output (Marker for post-mortem disassembler) >---
3501 st->bol();
3502 st->print_cr("[/MachCode]");
3503 return;
3504 }
3505 #endif
3506
3507 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3508 //---< abstract disassembly with comments and section headers merged in >---
3509 if (compressed_with_comments) {
3510 const_cast<nmethod*>(this)->print_constant_pool(st);
3511
3512 //---< Open the output (Marker for post-mortem disassembler) >---
3513 st->print_cr("[MachCode]");
3514 while ((p < end) && (p != nullptr)) {
3515 const int instruction_size_in_bytes = Assembler::instr_len(p);
3516
3517 //---< Block comments for nmethod. Interrupts instruction stream, if any. >---
3518 // Outputs a bol() before and a cr() after, but only if a comment is printed.
3519 // Prints nmethod_section_label as well.
3520 if (AbstractDisassembler::show_block_comment()) {
3521 print_block_comment(st, p);
3522 if (st->position() == 0) {
3523 compressed_format_idx = 0;
3524 }
3525 }
3526
3527 //---< New location information after line break >---
3528 if (compressed_format_idx == 0) {
3529 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3530 compressed_format_idx = 1;
3531 }
3532
3533 //---< Code comment for current instruction. Address range [p..(p+len)) >---
3534 unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
3535 S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
3536
3537 if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
3538 //---< interrupt instruction byte stream for code comment >---
3539 if (compressed_format_idx > 1) {
3540 st->cr(); // interrupt byte stream
3541 st->cr(); // add an empty line
3542 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3543 }
3544 const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
3545 st->bol();
3546 compressed_format_idx = 0;
3547 }
3548
3549 //---< New location information after line break >---
3550 if (compressed_format_idx == 0) {
3551 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3552 compressed_format_idx = 1;
3553 }
3554
3555 //---< Nicely align instructions for readability >---
3556 if (compressed_format_idx > 1) {
3557 Disassembler::print_delimiter(st);
3558 }
3559
3560 //---< Now, finally, print the actual instruction bytes >---
3561 unsigned char* p0 = p;
3562 p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
3563 compressed_format_idx += (int)(p - p0);
3564
3565 if (Disassembler::start_newline(compressed_format_idx-1)) {
3566 st->cr();
3567 compressed_format_idx = 0;
3568 }
3569 }
3570 //---< Close the output (Marker for post-mortem disassembler) >---
3571 st->bol();
3572 st->print_cr("[/MachCode]");
3573 return;
3574 }
3575 #endif
3576 }
3577
3578 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3579
3580 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3581 RelocIterator iter(this, begin, end);
3582 bool have_one = false;
3583 while (iter.next()) {
3584 have_one = true;
3585 switch (iter.type()) {
3586 case relocInfo::none: {
3587 // Skip it and check next
3588 break;
3589 }
3590 case relocInfo::oop_type: {
3591 // Get a non-resizable resource-allocated stringStream.
3592 // Our callees make use of (nested) ResourceMarks.
3593 stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
3594 oop_Relocation* r = iter.oop_reloc();
3595 oop obj = r->oop_value();
3596 st.print("oop(");
3597 if (obj == nullptr) st.print("nullptr");
3598 else obj->print_value_on(&st);
3599 st.print(")");
3600 return st.as_string();
3601 }
3602 case relocInfo::metadata_type: {
3603 stringStream st;
3604 metadata_Relocation* r = iter.metadata_reloc();
3605 Metadata* obj = r->metadata_value();
3606 st.print("metadata(");
3607 if (obj == nullptr) st.print("nullptr");
3608 else obj->print_value_on(&st);
3609 st.print(")");
3610 return st.as_string();
3611 }
3612 case relocInfo::runtime_call_type:
3613 case relocInfo::runtime_call_w_cp_type: {
3614 stringStream st;
3615 st.print("runtime_call");
3616 CallRelocation* r = (CallRelocation*)iter.reloc();
3617 address dest = r->destination();
3618 if (StubRoutines::contains(dest)) {
3619 StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
3620 if (desc == nullptr) {
3621 desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
3622 }
3623 if (desc != nullptr) {
3624 st.print(" Stub::%s", desc->name());
3625 return st.as_string();
3626 }
3627 }
3628 CodeBlob* cb = CodeCache::find_blob(dest);
3629 if (cb != nullptr) {
3630 st.print(" %s", cb->name());
3631 } else {
3632 ResourceMark rm;
3633 const int buflen = 1024;
3634 char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3635 int offset;
3636 if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3637 st.print(" %s", buf);
3638 if (offset != 0) {
3639 st.print("+%d", offset);
3640 }
3641 }
3642 }
3643 return st.as_string();
3644 }
3645 case relocInfo::virtual_call_type: {
3646 stringStream st;
3647 st.print_raw("virtual_call");
3648 virtual_call_Relocation* r = iter.virtual_call_reloc();
3649 Method* m = r->method_value();
3650 if (m != nullptr) {
3651 assert(m->is_method(), "");
3652 m->print_short_name(&st);
3653 }
3654 return st.as_string();
3655 }
3656 case relocInfo::opt_virtual_call_type: {
3657 stringStream st;
3658 st.print_raw("optimized virtual_call");
3659 opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
3660 Method* m = r->method_value();
3661 if (m != nullptr) {
3662 assert(m->is_method(), "");
3663 m->print_short_name(&st);
3664 }
3665 return st.as_string();
3666 }
3667 case relocInfo::static_call_type: {
3668 stringStream st;
3669 st.print_raw("static_call");
3670 static_call_Relocation* r = iter.static_call_reloc();
3671 Method* m = r->method_value();
3672 if (m != nullptr) {
3673 assert(m->is_method(), "");
3674 m->print_short_name(&st);
3675 }
3676 return st.as_string();
3677 }
3678 case relocInfo::static_stub_type: return "static_stub";
3679 case relocInfo::external_word_type: return "external_word";
3680 case relocInfo::internal_word_type: return "internal_word";
3681 case relocInfo::section_word_type: return "section_word";
3682 case relocInfo::poll_type: return "poll";
3683 case relocInfo::poll_return_type: return "poll_return";
3684 case relocInfo::trampoline_stub_type: return "trampoline_stub";
3685 case relocInfo::entry_guard_type: return "entry_guard";
3686 case relocInfo::post_call_nop_type: return "post_call_nop";
3687 case relocInfo::barrier_type: {
3688 barrier_Relocation* const reloc = iter.barrier_reloc();
3689 stringStream st;
3690 st.print("barrier format=%d", reloc->format());
3691 return st.as_string();
3692 }
3693
3694 case relocInfo::type_mask: return "type_bit_mask";
3695
3696 default: {
3697 stringStream st;
3698 st.print("unknown relocInfo=%d", (int) iter.type());
3699 return st.as_string();
3700 }
3701 }
3702 }
3703 return have_one ? "other" : nullptr;
3704 }
3705
3706 // Return the last scope in (begin..end]
3707 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3708 PcDesc* p = pc_desc_near(begin+1);
3709 if (p != nullptr && p->real_pc(this) <= end) {
3710 return new ScopeDesc(this, p);
3711 }
3712 return nullptr;
3713 }
3714
3715 const char* nmethod::nmethod_section_label(address pos) const {
3716 const char* label = nullptr;
3717 if (pos == code_begin()) label = "[Instructions begin]";
3718 if (pos == entry_point()) label = "[Entry Point]";
3719 if (pos == verified_entry_point()) label = "[Verified Entry Point]";
3720 if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]";
3721 if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
3722 // Check stub_code before checking exception_handler or deopt_handler.
3723 if (pos == this->stub_begin()) label = "[Stub Code]";
3724 if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
3725 if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
3726 return label;
3727 }
3728
3729 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
3730 if (print_section_labels) {
3731 const char* label = nmethod_section_label(block_begin);
3732 if (label != nullptr) {
3733 stream->bol();
3734 stream->print_cr("%s", label);
3735 }
3736 }
3737
3738 if (block_begin == entry_point()) {
3739 Method* m = method();
3740 if (m != nullptr) {
3741 stream->print(" # ");
3742 m->print_value_on(stream);
3743 stream->cr();
3744 }
3745 if (m != nullptr && !is_osr_method()) {
3746 ResourceMark rm;
3747 int sizeargs = m->size_of_parameters();
3748 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
3749 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
3750 {
3751 int sig_index = 0;
3752 if (!m->is_static())
3753 sig_bt[sig_index++] = T_OBJECT; // 'this'
3754 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
3755 BasicType t = ss.type();
3756 sig_bt[sig_index++] = t;
3757 if (type2size[t] == 2) {
3758 sig_bt[sig_index++] = T_VOID;
3759 } else {
3760 assert(type2size[t] == 1, "size is 1 or 2");
3761 }
3762 }
3763 assert(sig_index == sizeargs, "");
3764 }
3765 const char* spname = "sp"; // make arch-specific?
3766 SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
3767 int stack_slot_offset = this->frame_size() * wordSize;
3768 int tab1 = 14, tab2 = 24;
3769 int sig_index = 0;
3770 int arg_index = (m->is_static() ? 0 : -1);
3771 bool did_old_sp = false;
3772 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
3773 bool at_this = (arg_index == -1);
3774 bool at_old_sp = false;
3775 BasicType t = (at_this ? T_OBJECT : ss.type());
3776 assert(t == sig_bt[sig_index], "sigs in sync");
3777 if (at_this)
3778 stream->print(" # this: ");
3779 else
3780 stream->print(" # parm%d: ", arg_index);
3781 stream->move_to(tab1);
3782 VMReg fst = regs[sig_index].first();
3783 VMReg snd = regs[sig_index].second();
3784 if (fst->is_reg()) {
3785 stream->print("%s", fst->name());
3786 if (snd->is_valid()) {
3787 stream->print(":%s", snd->name());
3788 }
3789 } else if (fst->is_stack()) {
3790 int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3791 if (offset == stack_slot_offset) at_old_sp = true;
3792 stream->print("[%s+0x%x]", spname, offset);
3793 } else {
3794 stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3795 }
3796 stream->print(" ");
3797 stream->move_to(tab2);
3798 stream->print("= ");
3799 if (at_this) {
3800 m->method_holder()->print_value_on(stream);
3801 } else {
3802 bool did_name = false;
3803 if (!at_this && ss.is_reference()) {
3804 Symbol* name = ss.as_symbol();
3805 name->print_value_on(stream);
3806 did_name = true;
3807 }
3808 if (!did_name)
3809 stream->print("%s", type2name(t));
3810 }
3811 if (at_old_sp) {
3812 stream->print(" (%s of caller)", spname);
3813 did_old_sp = true;
3814 }
3815 stream->cr();
3816 sig_index += type2size[t];
3817 arg_index += 1;
3818 if (!at_this) ss.next();
3819 }
3820 if (!did_old_sp) {
3821 stream->print(" # ");
3822 stream->move_to(tab1);
3823 stream->print("[%s+0x%x]", spname, stack_slot_offset);
3824 stream->print(" (%s of caller)", spname);
3825 stream->cr();
3826 }
3827 }
3828 }
3829 }
3830
3831 // Returns whether this nmethod has code comments.
3832 bool nmethod::has_code_comment(address begin, address end) {
3833 // scopes?
3834 ScopeDesc* sd = scope_desc_in(begin, end);
3835 if (sd != nullptr) return true;
3836
3837 // relocations?
3838 const char* str = reloc_string_for(begin, end);
3839 if (str != nullptr) return true;
3840
3841 // implicit exceptions?
3842 int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
3843 if (cont_offset != 0) return true;
3844
3845 return false;
3846 }
3847
3848 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
3849 ImplicitExceptionTable implicit_table(this);
3850 int pc_offset = (int)(begin - code_begin());
3851 int cont_offset = implicit_table.continuation_offset(pc_offset);
3852 bool oop_map_required = false;
3853 if (cont_offset != 0) {
3854 st->move_to(column, 6, 0);
3855 if (pc_offset == cont_offset) {
3856 st->print("; implicit exception: deoptimizes");
3857 oop_map_required = true;
3858 } else {
3859 st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3860 }
3861 }
3862
3863 // Find an oopmap in (begin, end]. We use the odd half-closed
3864 // interval so that oop maps and scope descs which are tied to the
3865 // byte after a call are printed with the call itself. OopMaps
3866 // associated with implicit exceptions are printed with the implicit
3867 // instruction.
3868 address base = code_begin();
3869 ImmutableOopMapSet* oms = oop_maps();
3870 if (oms != nullptr) {
3871 for (int i = 0, imax = oms->count(); i < imax; i++) {
3872 const ImmutableOopMapPair* pair = oms->pair_at(i);
3873 const ImmutableOopMap* om = pair->get_from(oms);
3874 address pc = base + pair->pc_offset();
3875 if (pc >= begin) {
3876 #if INCLUDE_JVMCI
3877 bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
3878 #else
3879 bool is_implicit_deopt = false;
3880 #endif
3881 if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
3882 st->move_to(column, 6, 0);
3883 st->print("; ");
3884 om->print_on(st);
3885 oop_map_required = false;
3886 }
3887 }
3888 if (pc > end) {
3889 break;
3890 }
3891 }
3892 }
3893 assert(!oop_map_required, "missed oopmap");
3894
3895 Thread* thread = Thread::current();
3896
3897 // Print any debug info present at this pc.
3898 ScopeDesc* sd = scope_desc_in(begin, end);
3899 if (sd != nullptr) {
3900 st->move_to(column, 6, 0);
3901 if (sd->bci() == SynchronizationEntryBCI) {
3902 st->print(";*synchronization entry");
3903 } else if (sd->bci() == AfterBci) {
3904 st->print(";* method exit (unlocked if synchronized)");
3905 } else if (sd->bci() == UnwindBci) {
3906 st->print(";* unwind (locked if synchronized)");
3907 } else if (sd->bci() == AfterExceptionBci) {
3908 st->print(";* unwind (unlocked if synchronized)");
3909 } else if (sd->bci() == UnknownBci) {
3910 st->print(";* unknown");
3911 } else if (sd->bci() == InvalidFrameStateBci) {
3912 st->print(";* invalid frame state");
3913 } else {
3914 if (sd->method() == nullptr) {
3915 st->print("method is nullptr");
3916 } else if (sd->method()->is_native()) {
3917 st->print("method is native");
3918 } else {
3919 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3920 st->print(";*%s", Bytecodes::name(bc));
3921 switch (bc) {
3922 case Bytecodes::_invokevirtual:
3923 case Bytecodes::_invokespecial:
3924 case Bytecodes::_invokestatic:
3925 case Bytecodes::_invokeinterface:
3926 {
3927 Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
3928 st->print(" ");
3929 if (invoke.name() != nullptr)
3930 invoke.name()->print_symbol_on(st);
3931 else
3932 st->print("<UNKNOWN>");
3933 break;
3934 }
3935 case Bytecodes::_getfield:
3936 case Bytecodes::_putfield:
3937 case Bytecodes::_getstatic:
3938 case Bytecodes::_putstatic:
3939 {
3940 Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
3941 st->print(" ");
3942 if (field.name() != nullptr)
3943 field.name()->print_symbol_on(st);
3944 else
3945 st->print("<UNKNOWN>");
3946 }
3947 default:
3948 break;
3949 }
3950 }
3951 st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
3952 }
3953
3954 // Print all scopes
3955 for (;sd != nullptr; sd = sd->sender()) {
3956 st->move_to(column, 6, 0);
3957 st->print("; -");
3958 if (sd->should_reexecute()) {
3959 st->print(" (reexecute)");
3960 }
3961 if (sd->method() == nullptr) {
3962 st->print("method is nullptr");
3963 } else {
3964 sd->method()->print_short_name(st);
3965 }
3966 int lineno = sd->method()->line_number_from_bci(sd->bci());
3967 if (lineno != -1) {
3968 st->print("@%d (line %d)", sd->bci(), lineno);
3969 } else {
3970 st->print("@%d", sd->bci());
3971 }
3972 st->cr();
3973 }
3974 }
3975
3976 // Print relocation information
3977 // Prevent memory leak: allocating without ResourceMark.
3978 ResourceMark rm;
3979 const char* str = reloc_string_for(begin, end);
3980 if (str != nullptr) {
3981 if (sd != nullptr) st->cr();
3982 st->move_to(column, 6, 0);
3983 st->print("; {%s}", str);
3984 }
3985 }
3986
3987 #endif
3988
3989 address nmethod::call_instruction_address(address pc) const {
3990 if (NativeCall::is_call_before(pc)) {
3991 NativeCall *ncall = nativeCall_before(pc);
3992 return ncall->instruction_address();
3993 }
3994 return nullptr;
3995 }
3996
3997 void nmethod::print_value_on_impl(outputStream* st) const {
3998 st->print_cr("nmethod");
3999 #if defined(SUPPORT_DATA_STRUCTS)
4000 print_on_with_msg(st, nullptr);
4001 #endif
4002 }
4003
4004 #ifndef PRODUCT
4005
4006 void nmethod::print_calls(outputStream* st) {
4007 RelocIterator iter(this);
4008 while (iter.next()) {
4009 switch (iter.type()) {
4010 case relocInfo::virtual_call_type: {
4011 CompiledICLocker ml_verify(this);
4012 CompiledIC_at(&iter)->print();
4013 break;
4014 }
4015 case relocInfo::static_call_type:
4016 case relocInfo::opt_virtual_call_type:
4017 st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
4018 CompiledDirectCall::at(iter.reloc())->print();
4019 break;
4020 default:
4021 break;
4022 }
4023 }
4024 }
4025
4026 void nmethod::print_statistics() {
4027 ttyLocker ttyl;
4028 if (xtty != nullptr) xtty->head("statistics type='nmethod'");
4029 native_nmethod_stats.print_native_nmethod_stats();
4030 #ifdef COMPILER1
4031 c1_java_nmethod_stats.print_nmethod_stats("C1");
4032 #endif
4033 #ifdef COMPILER2
4034 c2_java_nmethod_stats.print_nmethod_stats("C2");
4035 #endif
4036 #if INCLUDE_JVMCI
4037 jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
4038 #endif
4039 unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
4040 DebugInformationRecorder::print_statistics();
4041 pc_nmethod_stats.print_pc_stats();
4042 Dependencies::print_statistics();
4043 ExternalsRecorder::print_statistics();
4044 if (xtty != nullptr) xtty->tail("statistics");
4045 }
4046
4047 #endif // !PRODUCT
4048
4049 #if INCLUDE_JVMCI
4050 void nmethod::update_speculation(JavaThread* thread) {
4051 jlong speculation = thread->pending_failed_speculation();
4052 if (speculation != 0) {
4053 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4054 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4055 thread->set_pending_failed_speculation(0);
4056 }
4057 }
4058
4059 const char* nmethod::jvmci_name() {
4060 if (jvmci_nmethod_data() != nullptr) {
4061 return jvmci_nmethod_data()->name();
4062 }
4063 return nullptr;
4064 }
4065 #endif