1 /*
2 * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/compiledIC.hpp"
27 #include "code/compiledMethod.inline.hpp"
28 #include "code/exceptionHandlerTable.hpp"
29 #include "code/scopeDesc.hpp"
30 #include "code/codeCache.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/barrierSetNMethod.hpp"
33 #include "gc/shared/gcBehaviours.hpp"
34 #include "interpreter/bytecode.inline.hpp"
35 #include "logging/log.hpp"
36 #include "logging/logTag.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/klass.inline.hpp"
39 #include "oops/methodData.hpp"
40 #include "oops/method.inline.hpp"
41 #include "oops/weakHandle.inline.hpp"
42 #include "prims/methodHandles.hpp"
43 #include "runtime/atomic.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/jniHandles.inline.hpp"
47 #include "runtime/handles.inline.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "runtime/sharedRuntime.hpp"
50
51 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
52 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
53 bool caller_must_gc_arguments, bool compiled)
54 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
55 _deoptimization_status(not_marked),
56 _deoptimization_generation(0),
57 _method(method),
58 _gc_data(nullptr)
59 {
60 init_defaults();
61 }
62
63 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
64 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
65 OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled)
66 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
67 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
68 _deoptimization_status(not_marked),
69 _deoptimization_generation(0),
70 _method(method),
71 _gc_data(nullptr)
72 {
73 init_defaults();
74 }
75
76 void CompiledMethod::init_defaults() {
77 { // avoid uninitialized fields, even for short time periods
78 _scopes_data_begin = nullptr;
79 _deopt_handler_begin = nullptr;
80 _deopt_mh_handler_begin = nullptr;
81 _exception_cache = nullptr;
82 }
83 _has_unsafe_access = 0;
84 _has_method_handle_invokes = 0;
85 _has_wide_vectors = 0;
86 _has_monitors = 0;
87 }
88
89 bool CompiledMethod::is_method_handle_return(address return_pc) {
90 if (!has_method_handle_invokes()) return false;
91 PcDesc* pd = pc_desc_at(return_pc);
92 if (pd == nullptr)
93 return false;
94 return pd->is_method_handle_invoke();
95 }
96
97 // Returns a string version of the method state.
98 const char* CompiledMethod::state() const {
99 int state = get_state();
100 switch (state) {
101 case not_installed:
102 return "not installed";
103 case in_use:
104 return "in use";
105 case not_used:
106 return "not_used";
107 case not_entrant:
108 return "not_entrant";
109 default:
110 fatal("unexpected method state: %d", state);
111 return nullptr;
112 }
113 }
114
115 //-----------------------------------------------------------------------------
116 void CompiledMethod::set_deoptimized_done() {
117 ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
118 if (_deoptimization_status != deoptimize_done) { // can't go backwards
119 Atomic::store(&_deoptimization_status, deoptimize_done);
120 }
121 }
122
123 //-----------------------------------------------------------------------------
124
125 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
126 return Atomic::load_acquire(&_exception_cache);
127 }
128
129 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
130 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
131 assert(new_entry != nullptr,"Must be non null");
132 assert(new_entry->next() == nullptr, "Must be null");
133
134 for (;;) {
135 ExceptionCache *ec = exception_cache();
136 if (ec != nullptr) {
137 Klass* ex_klass = ec->exception_type();
138 if (!ex_klass->is_loader_alive()) {
139 // We must guarantee that entries are not inserted with new next pointer
140 // edges to ExceptionCache entries with dead klasses, due to bad interactions
141 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
142 // the head pointer forward to the first live ExceptionCache, so that the new
143 // next pointers always point at live ExceptionCaches, that are not removed due
144 // to concurrent ExceptionCache cleanup.
145 ExceptionCache* next = ec->next();
146 if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
147 CodeCache::release_exception_cache(ec);
148 }
149 continue;
150 }
151 ec = exception_cache();
152 if (ec != nullptr) {
153 new_entry->set_next(ec);
154 }
155 }
156 if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
157 return;
158 }
159 }
160 }
161
162 void CompiledMethod::clean_exception_cache() {
163 // For each nmethod, only a single thread may call this cleanup function
164 // at the same time, whether called in STW cleanup or concurrent cleanup.
165 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
166 // then a single writer may contend with cleaning up the head pointer to the
167 // first ExceptionCache node that has a Klass* that is alive. That is fine,
168 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
169 // And the concurrent writers do not clean up next pointers, only the head.
170 // Also note that concurrent readers will walk through Klass* pointers that are not
171 // alive. That does not cause ABA problems, because Klass* is deleted after
172 // a handshake with all threads, after all stale ExceptionCaches have been
173 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
174 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
175 // That similarly implies that CAS operations on ExceptionCache entries do not
176 // suffer from ABA problems as unlinking and deletion is separated by a global
177 // handshake operation.
178 ExceptionCache* prev = nullptr;
179 ExceptionCache* curr = exception_cache_acquire();
180
181 while (curr != nullptr) {
182 ExceptionCache* next = curr->next();
183
184 if (!curr->exception_type()->is_loader_alive()) {
185 if (prev == nullptr) {
186 // Try to clean head; this is contended by concurrent inserts, that
187 // both lazily clean the head, and insert entries at the head. If
188 // the CAS fails, the operation is restarted.
189 if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
190 prev = nullptr;
191 curr = exception_cache_acquire();
192 continue;
193 }
194 } else {
195 // It is impossible to during cleanup connect the next pointer to
196 // an ExceptionCache that has not been published before a safepoint
197 // prior to the cleanup. Therefore, release is not required.
198 prev->set_next(next);
199 }
200 // prev stays the same.
201
202 CodeCache::release_exception_cache(curr);
203 } else {
204 prev = curr;
205 }
206
207 curr = next;
208 }
209 }
210
211 // public method for accessing the exception cache
212 // These are the public access methods.
213 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
214 // We never grab a lock to read the exception cache, so we may
215 // have false negatives. This is okay, as it can only happen during
216 // the first few exception lookups for a given nmethod.
217 ExceptionCache* ec = exception_cache_acquire();
218 while (ec != nullptr) {
219 address ret_val;
220 if ((ret_val = ec->match(exception,pc)) != nullptr) {
221 return ret_val;
222 }
223 ec = ec->next();
224 }
225 return nullptr;
226 }
227
228 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
229 // There are potential race conditions during exception cache updates, so we
230 // must own the ExceptionCache_lock before doing ANY modifications. Because
231 // we don't lock during reads, it is possible to have several threads attempt
232 // to update the cache with the same data. We need to check for already inserted
233 // copies of the current data before adding it.
234
235 MutexLocker ml(ExceptionCache_lock);
236 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
237
238 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
239 target_entry = new ExceptionCache(exception,pc,handler);
240 add_exception_cache_entry(target_entry);
241 }
242 }
243
244 // private method for handling exception cache
245 // These methods are private, and used to manipulate the exception cache
246 // directly.
247 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
248 ExceptionCache* ec = exception_cache_acquire();
249 while (ec != nullptr) {
250 if (ec->match_exception_with_space(exception)) {
251 return ec;
252 }
253 ec = ec->next();
254 }
255 return nullptr;
256 }
257
258 //-------------end of code for ExceptionCache--------------
259
260 bool CompiledMethod::is_at_poll_return(address pc) {
261 RelocIterator iter(this, pc, pc+1);
262 while (iter.next()) {
263 if (iter.type() == relocInfo::poll_return_type)
264 return true;
265 }
266 return false;
267 }
268
269
270 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
271 RelocIterator iter(this, pc, pc+1);
272 while (iter.next()) {
273 relocInfo::relocType t = iter.type();
274 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
275 return true;
276 }
277 return false;
278 }
279
280 void CompiledMethod::verify_oop_relocations() {
281 // Ensure sure that the code matches the current oop values
282 RelocIterator iter(this, nullptr, nullptr);
283 while (iter.next()) {
284 if (iter.type() == relocInfo::oop_type) {
285 oop_Relocation* reloc = iter.oop_reloc();
286 if (!reloc->oop_is_immediate()) {
287 reloc->verify_oop_relocation();
288 }
289 }
290 }
291 }
292
293
294 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
295 PcDesc* pd = pc_desc_at(pc);
296 guarantee(pd != nullptr, "scope must be present");
297 return new ScopeDesc(this, pd);
298 }
299
300 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
301 PcDesc* pd = pc_desc_near(pc);
302 guarantee(pd != nullptr, "scope must be present");
303 return new ScopeDesc(this, pd);
304 }
305
306 address CompiledMethod::oops_reloc_begin() const {
307 // If the method is not entrant then a JMP is plastered over the
308 // first few bytes. If an oop in the old code was there, that oop
309 // should not get GC'd. Skip the first few bytes of oops on
310 // not-entrant methods.
311 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
312 code_begin() + frame_complete_offset() >
313 verified_entry_point() + NativeJump::instruction_size)
314 {
315 // If we have a frame_complete_offset after the native jump, then there
316 // is no point trying to look for oops before that. This is a requirement
317 // for being allowed to scan oops concurrently.
318 return code_begin() + frame_complete_offset();
319 }
320
321 // It is not safe to read oops concurrently using entry barriers, if their
322 // location depend on whether the nmethod is entrant or not.
323 // assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan");
324
325 address low_boundary = verified_entry_point();
326 if (!is_in_use() && is_nmethod()) {
327 low_boundary += NativeJump::instruction_size;
328 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
329 // This means that the low_boundary is going to be a little too high.
330 // This shouldn't matter, since oops of non-entrant methods are never used.
331 // In fact, why are we bothering to look at oops in a non-entrant method??
332 }
333 return low_boundary;
334 }
335
336 // Method that knows how to preserve outgoing arguments at call. This method must be
337 // called with a frame corresponding to a Java invoke
338 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
339 if (method() == nullptr) {
340 return;
341 }
342
343 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
344 JavaThread* thread = reg_map->thread();
345 if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
346 return;
347 }
348
349 if (!method()->is_native()) {
350 address pc = fr.pc();
351 bool has_receiver, has_appendix;
352 Symbol* signature;
353
354 // The method attached by JIT-compilers should be used, if present.
355 // Bytecode can be inaccurate in such case.
356 Method* callee = attached_method_before_pc(pc);
357 if (callee != nullptr) {
358 has_receiver = !(callee->access_flags().is_static());
359 has_appendix = false;
360 signature = callee->signature();
361 } else {
362 SimpleScopeDesc ssd(this, pc);
363
364 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
365 has_receiver = call.has_receiver();
366 has_appendix = call.has_appendix();
367 signature = call.signature();
368 }
369
370 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
371 } else if (method()->is_continuation_enter_intrinsic()) {
372 // This method only calls Continuation.enter()
373 Symbol* signature = vmSymbols::continuationEnter_signature();
374 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
375 }
376 }
377
378 Method* CompiledMethod::attached_method(address call_instr) {
379 assert(code_contains(call_instr), "not part of the nmethod");
380 RelocIterator iter(this, call_instr, call_instr + 1);
381 while (iter.next()) {
382 if (iter.addr() == call_instr) {
383 switch(iter.type()) {
384 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
385 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
386 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
387 default: break;
388 }
389 }
390 }
391 return nullptr; // not found
392 }
393
394 Method* CompiledMethod::attached_method_before_pc(address pc) {
395 if (NativeCall::is_call_before(pc)) {
396 NativeCall* ncall = nativeCall_before(pc);
397 return attached_method(ncall->instruction_address());
398 }
399 return nullptr; // not a call
400 }
401
402 void CompiledMethod::clear_inline_caches() {
403 assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
404 RelocIterator iter(this);
405 while (iter.next()) {
406 iter.reloc()->clear_inline_cache();
407 }
408 }
409
410 #ifdef ASSERT
411 // Check class_loader is alive for this bit of metadata.
412 class CheckClass : public MetadataClosure {
413 void do_metadata(Metadata* md) {
414 Klass* klass = nullptr;
415 if (md->is_klass()) {
416 klass = ((Klass*)md);
417 } else if (md->is_method()) {
418 klass = ((Method*)md)->method_holder();
419 } else if (md->is_methodData()) {
420 klass = ((MethodData*)md)->method()->method_holder();
421 } else {
422 md->print();
423 ShouldNotReachHere();
424 }
425 assert(klass->is_loader_alive(), "must be alive");
426 }
427 };
428 #endif // ASSERT
429
430
431 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
432 ic->clean_metadata();
433 }
434
435 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
436 template <typename CallsiteT>
437 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, CompiledMethod* from,
438 bool clean_all) {
439 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
440 if (!cb->is_compiled()) {
441 return;
442 }
443 CompiledMethod* cm = cb->as_compiled_method();
444 if (clean_all || !cm->is_in_use() || cm->is_unloading() || cm->method()->code() != cm) {
445 callsite->set_to_clean();
446 }
447 }
448
449 // Cleans caches in nmethods that point to either classes that are unloaded
450 // or nmethods that are unloaded.
451 //
452 // Can be called either in parallel by G1 currently or after all
453 // nmethods are unloaded. Return postponed=true in the parallel case for
454 // inline caches found that point to nmethods that are not yet visited during
455 // the do_unloading walk.
456 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
457 ResourceMark rm;
458
459 // Exception cache only needs to be called if unloading occurred
460 if (unloading_occurred) {
461 clean_exception_cache();
462 }
463
464 cleanup_inline_caches_impl(unloading_occurred, false);
465
466 #ifdef ASSERT
467 // Check that the metadata embedded in the nmethod is alive
468 CheckClass check_class;
469 metadata_do(&check_class);
470 #endif
471 }
472
473 void CompiledMethod::run_nmethod_entry_barrier() {
474 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
475 if (bs_nm != nullptr) {
476 // We want to keep an invariant that nmethods found through iterations of a Thread's
477 // nmethods found in safepoints have gone through an entry barrier and are not armed.
478 // By calling this nmethod entry barrier, it plays along and acts
479 // like any other nmethod found on the stack of a thread (fewer surprises).
480 nmethod* nm = as_nmethod_or_null();
481 if (nm != nullptr && bs_nm->is_armed(nm)) {
482 bool alive = bs_nm->nmethod_entry_barrier(nm);
483 assert(alive, "should be alive");
484 }
485 }
486 }
487
488 // Only called by whitebox test
489 void CompiledMethod::cleanup_inline_caches_whitebox() {
490 assert_locked_or_safepoint(CodeCache_lock);
491 CompiledICLocker ic_locker(this);
492 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
493 }
494
495 address* CompiledMethod::orig_pc_addr(const frame* fr) {
496 return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
497 }
498
499 // Called to clean up after class unloading for live nmethods
500 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
501 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
502 ResourceMark rm;
503
504 // Find all calls in an nmethod and clear the ones that point to bad nmethods.
505 RelocIterator iter(this, oops_reloc_begin());
506 bool is_in_static_stub = false;
507 while(iter.next()) {
508
509 switch (iter.type()) {
510
511 case relocInfo::virtual_call_type:
512 if (unloading_occurred) {
513 // If class unloading occurred we first clear ICs where the cached metadata
514 // is referring to an unloaded klass or method.
515 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
516 }
517
518 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
519 break;
520
521 case relocInfo::opt_virtual_call_type:
522 case relocInfo::static_call_type:
523 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
524 break;
525
526 case relocInfo::static_stub_type: {
527 is_in_static_stub = true;
528 break;
529 }
530
531 case relocInfo::metadata_type: {
532 // Only the metadata relocations contained in static/opt virtual call stubs
533 // contains the Method* passed to c2i adapters. It is the only metadata
534 // relocation that needs to be walked, as it is the one metadata relocation
535 // that violates the invariant that all metadata relocations have an oop
536 // in the compiled method (due to deferred resolution and code patching).
537
538 // This causes dead metadata to remain in compiled methods that are not
539 // unloading. Unless these slippery metadata relocations of the static
540 // stubs are at least cleared, subsequent class redefinition operations
541 // will access potentially free memory, and JavaThread execution
542 // concurrent to class unloading may call c2i adapters with dead methods.
543 if (!is_in_static_stub) {
544 // The first metadata relocation after a static stub relocation is the
545 // metadata relocation of the static stub used to pass the Method* to
546 // c2i adapters.
547 continue;
548 }
549 is_in_static_stub = false;
550 if (is_unloading()) {
551 // If the nmethod itself is dying, then it may point at dead metadata.
552 // Nobody should follow that metadata; it is strictly unsafe.
553 continue;
554 }
555 metadata_Relocation* r = iter.metadata_reloc();
556 Metadata* md = r->metadata_value();
557 if (md != nullptr && md->is_method()) {
558 Method* method = static_cast<Method*>(md);
559 if (!method->method_holder()->is_loader_alive()) {
560 Atomic::store(r->metadata_addr(), (Method*)nullptr);
561
562 if (!r->metadata_is_immediate()) {
563 r->fix_metadata_relocation();
564 }
565 }
566 }
567 break;
568 }
569
570 default:
571 break;
572 }
573 }
574 }
575
576 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
577 // Exception happened outside inline-cache check code => we are inside
578 // an active nmethod => use cpc to determine a return address
579 int exception_offset = int(pc - code_begin());
580 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
581 #ifdef ASSERT
582 if (cont_offset == 0) {
583 Thread* thread = Thread::current();
584 ResourceMark rm(thread);
585 CodeBlob* cb = CodeCache::find_blob(pc);
586 assert(cb != nullptr && cb == this, "");
587
588 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
589 stringStream ss;
590 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
591 print_on(&ss);
592 method()->print_codes_on(&ss);
593 print_code_on(&ss);
594 print_pcs_on(&ss);
595 tty->print("%s", ss.as_string()); // print all at once
596 }
597 #endif
598 if (cont_offset == 0) {
599 // Let the normal error handling report the exception
600 return nullptr;
601 }
602 if (cont_offset == exception_offset) {
603 #if INCLUDE_JVMCI
604 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
605 JavaThread *thread = JavaThread::current();
606 thread->set_jvmci_implicit_exception_pc(pc);
607 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
608 Deoptimization::Action_reinterpret));
609 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
610 #else
611 ShouldNotReachHere();
612 #endif
613 }
614 return code_begin() + cont_offset;
615 }
616
617 class HasEvolDependency : public MetadataClosure {
618 bool _has_evol_dependency;
619 public:
620 HasEvolDependency() : _has_evol_dependency(false) {}
621 void do_metadata(Metadata* md) {
622 if (md->is_method()) {
623 Method* method = (Method*)md;
624 if (method->is_old()) {
625 _has_evol_dependency = true;
626 }
627 }
628 }
629 bool has_evol_dependency() const { return _has_evol_dependency; }
630 };
631
632 bool CompiledMethod::has_evol_metadata() {
633 // Check the metadata in relocIter and CompiledIC and also deoptimize
634 // any nmethod that has reference to old methods.
635 HasEvolDependency check_evol;
636 metadata_do(&check_evol);
637 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
638 ResourceMark rm;
639 log_debug(redefine, class, nmethod)
640 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
641 _method->method_holder()->external_name(),
642 _method->name()->as_C_string(),
643 _method->signature()->as_C_string(),
644 compile_id());
645 }
646 return check_evol.has_evol_dependency();
647 }
--- EOF ---