1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class AOTCodeReader;
50 class AOTCodeEntry;
51 class ScopeDesc;
52 class xmlStream;
53
54 // This class is used internally by nmethods, to cache
55 // exception/pc/handler information.
56
57 class ExceptionCache : public CHeapObj<mtCode> {
58 friend class VMStructs;
59 private:
60 enum { cache_size = 16 };
61 Klass* _exception_type;
62 address _pc[cache_size];
63 address _handler[cache_size];
64 volatile int _count;
65 ExceptionCache* volatile _next;
66 ExceptionCache* _purge_list_next;
67
68 inline address pc_at(int index);
69 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
70
71 inline address handler_at(int index);
72 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
73
74 inline int count();
75 // increment_count is only called under lock, but there may be concurrent readers.
76 void increment_count();
77
78 public:
79
80 ExceptionCache(Handle exception, address pc, address handler);
81
82 Klass* exception_type() { return _exception_type; }
83 ExceptionCache* next();
84 void set_next(ExceptionCache *ec);
85 ExceptionCache* purge_list_next() { return _purge_list_next; }
86 void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
87
88 address match(Handle exception, address pc);
89 bool match_exception_with_space(Handle exception) ;
90 address test_address(address addr);
91 bool add_address_and_handler(address addr, address handler) ;
92 };
93
94 // cache pc descs found in earlier inquiries
95 class PcDescCache {
96 private:
97 enum { cache_size = 4 };
98 // The array elements MUST be volatile! Several threads may modify
99 // and read from the cache concurrently. find_pc_desc_internal has
100 // returned wrong results. C++ compiler (namely xlC12) may duplicate
101 // C++ field accesses if the elements are not volatile.
102 typedef PcDesc* PcDescPtr;
103 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
104 public:
105 PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); }
106 void init_to(PcDesc* initial_pc_desc);
107 PcDesc* find_pc_desc(int pc_offset, bool approximate);
108 void add_pc_desc(PcDesc* pc_desc);
109 PcDesc* last_pc_desc() { return _pc_descs[0]; }
110 };
111
112 class PcDescContainer : public CHeapObj<mtCode> {
113 private:
114 PcDescCache _pc_desc_cache;
115 public:
116 PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
117
118 PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
119 PcDesc* lower, PcDesc* upper);
120
121 PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
122 #ifdef PRODUCT
123 {
124 PcDesc* desc = _pc_desc_cache.last_pc_desc();
125 assert(desc != nullptr, "PcDesc cache should be initialized already");
126 if (desc->pc_offset() == (pc - code_begin)) {
127 // Cached value matched
128 return desc;
129 }
130 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
131 }
132 #endif
133 ;
134 };
135
136 // nmethods (native methods) are the compiled code versions of Java methods.
137 //
138 // An nmethod contains:
139 // - Header (the nmethod structure)
140 // - Constant part (doubles, longs and floats used in nmethod)
141 // - Code part:
142 // - Code body
143 // - Exception handler
144 // - Stub code
145 // - OOP table
146 //
147 // As a CodeBlob, an nmethod references [mutable data] allocated on the C heap:
148 // - CodeBlob relocation data
149 // - Metainfo
150 // - JVMCI data
151 //
152 // An nmethod references [immutable data] allocated on C heap:
153 // - Dependency assertions data
154 // - Implicit null table array
155 // - Handler entry point array
156 // - Debugging information:
157 // - Scopes data array
158 // - Scopes pcs array
159 // - JVMCI speculations array
160 // - Nmethod reference counter
161
162 #if INCLUDE_JVMCI
163 class FailedSpeculation;
164 class JVMCINMethodData;
165 #endif
166
167 class nmethod : public CodeBlob {
168 friend class VMStructs;
169 friend class JVMCIVMStructs;
170 friend class CodeCache; // scavengable oops
171 friend class JVMCINMethodData;
172 friend class DeoptimizationScope;
173
174 #define ImmutableDataRefCountSize ((int)sizeof(int))
175
176 private:
177
178 // Used to track in which deoptimize handshake this method will be deoptimized.
179 uint64_t _deoptimization_generation;
180
181 uint64_t _gc_epoch;
182
183 // Profiling counter used to figure out the hottest nmethods to record into CDS
184 volatile uint64_t _method_profiling_count;
185
186 Method* _method;
187
188 // To reduce header size union fields which usages do not overlap.
189 union {
190 // To support simple linked-list chaining of nmethods:
191 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
192 struct {
193 // These are used for compiled synchronized native methods to
194 // locate the owner and stack slot for the BasicLock. They are
195 // needed because there is no debug information for compiled native
196 // wrappers and the oop maps are insufficient to allow
197 // frame::retrieve_receiver() to work. Currently they are expected
198 // to be byte offsets from the Java stack pointer for maximum code
199 // sharing between platforms. JVMTI's GetLocalInstance() uses these
200 // offsets to find the receiver for non-static native wrapper frames.
201 ByteSize _native_receiver_sp_offset;
202 ByteSize _native_basic_lock_sp_offset;
203 };
204 };
205
206 // nmethod's read-only data
207 address _immutable_data;
208
209 PcDescContainer* _pc_desc_container;
210 ExceptionCache* volatile _exception_cache;
211
212 void* _gc_data;
213
214 struct oops_do_mark_link; // Opaque data type.
215 static nmethod* volatile _oops_do_mark_nmethods;
216 oops_do_mark_link* volatile _oops_do_mark_link;
217
218 CompiledICData* _compiled_ic_data;
219
220 // offsets for entry points
221 address _osr_entry_point; // entry point for on stack replacement
222 uint16_t _entry_offset; // entry point with class check
223 uint16_t _verified_entry_offset; // entry point without class check
224 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
225 int _immutable_data_size;
226
227 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
228
229 int _skipped_instructions_size;
230
231 int _stub_offset;
232
233 // Offsets for different stubs section parts
234 int _exception_offset;
235 // All deoptee's will resume execution at this location described by
236 // this offset.
237 int _deopt_handler_offset;
238 // Offset (from insts_end) of the unwind handler if it exists
239 int16_t _unwind_handler_offset;
240 // Number of arguments passed on the stack
241 uint16_t _num_stack_arg_slots;
242
243 uint16_t _oops_size;
244 #if INCLUDE_JVMCI
245 // _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as:
246 // _metadata_size = mutable_data_size - relocation_size
247 uint16_t _metadata_size;
248 #endif
249
250 // Offset in immutable data section
251 // _dependencies_offset == 0
252 uint16_t _nul_chk_table_offset;
253 uint16_t _handler_table_offset; // This table could be big in C1 code
254 int _scopes_pcs_offset;
255 int _scopes_data_offset;
256 #if INCLUDE_JVMCI
257 int _speculations_offset;
258 #endif
259 int _immutable_data_ref_count_offset;
260
261 // location in frame (offset for sp) that deopt can store the original
262 // pc during a deopt.
263 int _orig_pc_offset;
264
265 int _compile_id; // which compilation made this nmethod
266 CompLevel _comp_level; // compilation level (s1)
267 CompilerType _compiler_type; // which compiler made this nmethod (u1)
268
269 AOTCodeEntry* _aot_code_entry;
270
271 bool _used; // has this nmethod ever been invoked?
272
273 // Local state used to keep track of whether unloading is happening or not
274 volatile uint8_t _is_unloading_state;
275
276 // Protected by NMethodState_lock
277 volatile signed char _state; // {not_installed, in_use, not_entrant}
278
279 // set during construction
280 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
281 _has_wide_vectors:1, // Preserve wide vectors at safepoints
282 _has_monitors:1, // Fastpath monitor detection for continuations
283 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
284 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
285 _is_unlinked:1, // mark during class unloading
286 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
287 _preloaded:1,
288 _has_clinit_barriers:1;
289
290 enum DeoptimizationStatus : u1 {
291 not_marked,
292 deoptimize,
293 deoptimize_noupdate,
294 deoptimize_done
295 };
296
297 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
298
299 DeoptimizationStatus deoptimization_status() const {
300 return AtomicAccess::load(&_deoptimization_status);
301 }
302
303 // Initialize fields to their default values
304 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
305
306 // Post initialization
307 void post_init();
308
309 // For native wrappers
310 nmethod(Method* method,
311 CompilerType type,
312 int nmethod_size,
313 int compile_id,
314 CodeOffsets* offsets,
315 CodeBuffer *code_buffer,
316 int frame_size,
317 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
318 ByteSize basic_lock_sp_offset, /* synchronized natives only */
319 OopMapSet* oop_maps,
320 int mutable_data_size);
321
322 // For normal JIT compiled code
323 nmethod(Method* method,
324 CompilerType type,
325 int nmethod_size,
326 int immutable_data_size,
327 int mutable_data_size,
328 int compile_id,
329 int entry_bci,
330 address immutable_data,
331 CodeOffsets* offsets,
332 int orig_pc_offset,
333 DebugInformationRecorder *recorder,
334 Dependencies* dependencies,
335 CodeBuffer *code_buffer,
336 int frame_size,
337 OopMapSet* oop_maps,
338 ExceptionHandlerTable* handler_table,
339 ImplicitExceptionTable* nul_chk_table,
340 AbstractCompiler* compiler,
341 CompLevel comp_level
342 #if INCLUDE_JVMCI
343 , char* speculations = nullptr,
344 int speculations_len = 0,
345 JVMCINMethodData* jvmci_data = nullptr
346 #endif
347 );
348
349 nmethod(const nmethod &nm);
350
351 // helper methods
352 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
353 void* operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw();
354
355 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
356 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
357 // findable by nmethod iterators! In particular, they must not contain oops!
358 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
359
360 const char* reloc_string_for(u_char* begin, u_char* end);
361
362 bool try_transition(signed char new_state);
363
364 // Returns true if this thread changed the state of the nmethod or
365 // false if another thread performed the transition.
366 bool make_entrant() { Unimplemented(); return false; }
367 void inc_decompile_count();
368
369 // Inform external interfaces that a compiled method has been unloaded
370 void post_compiled_method_unload();
371
372 PcDesc* find_pc_desc(address pc, bool approximate) {
373 if (_pc_desc_container == nullptr) return nullptr; // native method
374 return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
375 }
376
377 // STW two-phase nmethod root processing helpers.
378 //
379 // When determining liveness of a given nmethod to do code cache unloading,
380 // some collectors need to do different things depending on whether the nmethods
381 // need to absolutely be kept alive during root processing; "strong"ly reachable
382 // nmethods are known to be kept alive at root processing, but the liveness of
383 // "weak"ly reachable ones is to be determined later.
384 //
385 // We want to allow strong and weak processing of nmethods by different threads
386 // at the same time without heavy synchronization. Additional constraints are
387 // to make sure that every nmethod is processed a minimal amount of time, and
388 // nmethods themselves are always iterated at most once at a particular time.
389 //
390 // Note that strong processing work must be a superset of weak processing work
391 // for this code to work.
392 //
393 // We store state and claim information in the _oops_do_mark_link member, using
394 // the two LSBs for the state and the remaining upper bits for linking together
395 // nmethods that were already visited.
396 // The last element is self-looped, i.e. points to itself to avoid some special
397 // "end-of-list" sentinel value.
398 //
399 // _oops_do_mark_link special values:
400 //
401 // _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e.
402 // is Unclaimed.
403 //
404 // For other values, its lowest two bits indicate the following states of the nmethod:
405 //
406 // weak_request (WR): the nmethod has been claimed by a thread for weak processing
407 // weak_done (WD): weak processing has been completed for this nmethod.
408 // strong_request (SR): the nmethod has been found to need strong processing while
409 // being weak processed.
410 // strong_done (SD): strong processing has been completed for this nmethod .
411 //
412 // The following shows the _only_ possible progressions of the _oops_do_mark_link
413 // pointer.
414 //
415 // Given
416 // N as the nmethod
417 // X the current next value of _oops_do_mark_link
418 //
419 // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
420 // a single thread.
421 // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
422 // completed (as above) another thread found that the nmethod needs strong
423 // processing after all.
424 // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
425 // thread finds that the nmethod needs strong processing, marks it as such and
426 // terminates. The original thread completes strong processing.
427 // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
428 // the beginning by a single thread.
429 //
430 // "|" describes the concatenation of bits in _oops_do_mark_link.
431 //
432 // The diagram also describes the threads responsible for changing the nmethod to
433 // the next state by marking the _transition_ with (C) and (O), which mean "current"
434 // and "other" thread respectively.
435 //
436
437 // States used for claiming nmethods during root processing.
438 static const uint claim_weak_request_tag = 0;
439 static const uint claim_weak_done_tag = 1;
440 static const uint claim_strong_request_tag = 2;
441 static const uint claim_strong_done_tag = 3;
442
443 static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
444 assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
445 assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
446 return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
447 }
448
449 static uint extract_state(oops_do_mark_link* link) {
450 return (uint)((uintptr_t)link & 0x3);
451 }
452
453 static nmethod* extract_nmethod(oops_do_mark_link* link) {
454 return (nmethod*)((uintptr_t)link & ~0x3);
455 }
456
457 void oops_do_log_change(const char* state);
458
459 static bool oops_do_has_weak_request(oops_do_mark_link* next) {
460 return extract_state(next) == claim_weak_request_tag;
461 }
462
463 static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
464 return extract_state(next) >= claim_strong_request_tag;
465 }
466
467 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
468 bool oops_do_try_claim_weak_request();
469
470 // Attempt Unclaimed -> N|SD transition. Returns the current link.
471 oops_do_mark_link* oops_do_try_claim_strong_done();
472 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
473 nmethod* oops_do_try_add_to_list_as_weak_done();
474
475 // Attempt X|WD -> N|SR transition. Returns the current link.
476 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
477 // Attempt X|WD -> X|SD transition. Returns true if successful.
478 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
479
480 // Do the N|SD -> X|SD transition.
481 void oops_do_add_to_list_as_strong_done();
482
483 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
484 // transitions).
485 void oops_do_set_strong_done(nmethod* old_head);
486
487 void record_nmethod_dependency();
488
489 nmethod* restore(address code_cache_buffer,
490 const methodHandle& method,
491 int compile_id,
492 address reloc_data,
493 GrowableArray<Handle>& oop_list,
494 GrowableArray<Metadata*>& metadata_list,
495 ImmutableOopMapSet* oop_maps,
496 address immutable_data,
497 GrowableArray<Handle>& reloc_imm_oop_list,
498 GrowableArray<Metadata*>& reloc_imm_metadata_list,
499 AOTCodeReader* aot_code_reader);
500
501 public:
502 // create nmethod using archived nmethod from AOT code cache
503 static nmethod* new_nmethod(nmethod* archived_nm,
504 const methodHandle& method,
505 AbstractCompiler* compiler,
506 int compile_id,
507 address reloc_data,
508 GrowableArray<Handle>& oop_list,
509 GrowableArray<Metadata*>& metadata_list,
510 ImmutableOopMapSet* oop_maps,
511 address immutable_data,
512 GrowableArray<Handle>& reloc_imm_oop_list,
513 GrowableArray<Metadata*>& reloc_imm_metadata_list,
514 AOTCodeReader* aot_code_reader);
515
516 // If you change anything in this enum please patch
517 // vmStructs_jvmci.cpp accordingly.
518 enum class InvalidationReason : s1 {
519 NOT_INVALIDATED = -1,
520 C1_CODEPATCH,
521 C1_DEOPTIMIZE,
522 C1_DEOPTIMIZE_FOR_PATCHING,
523 C1_PREDICATE_FAILED_TRAP,
524 CI_REPLAY,
525 UNLOADING,
526 UNLOADING_COLD,
527 JVMCI_INVALIDATE,
528 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
529 JVMCI_REPLACED_WITH_NEW_CODE,
530 JVMCI_REPROFILE,
531 MARKED_FOR_DEOPTIMIZATION,
532 MISSING_EXCEPTION_HANDLER,
533 NOT_USED,
534 OSR_INVALIDATION_BACK_BRANCH,
535 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
536 OSR_INVALIDATION_OF_LOWER_LEVEL,
537 SET_NATIVE_FUNCTION,
538 UNCOMMON_TRAP,
539 WHITEBOX_DEOPTIMIZATION,
540 ZOMBIE,
541 INVALIDATION_REASONS_COUNT
542 };
543
544
545 static const char* invalidation_reason_to_string(InvalidationReason invalidation_reason) {
546 switch (invalidation_reason) {
547 case InvalidationReason::C1_CODEPATCH:
548 return "C1 code patch";
549 case InvalidationReason::C1_DEOPTIMIZE:
550 return "C1 deoptimized";
551 case InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING:
552 return "C1 deoptimize for patching";
553 case InvalidationReason::C1_PREDICATE_FAILED_TRAP:
554 return "C1 predicate failed trap";
555 case InvalidationReason::CI_REPLAY:
556 return "CI replay";
557 case InvalidationReason::JVMCI_INVALIDATE:
558 return "JVMCI invalidate";
559 case InvalidationReason::JVMCI_MATERIALIZE_VIRTUAL_OBJECT:
560 return "JVMCI materialize virtual object";
561 case InvalidationReason::JVMCI_REPLACED_WITH_NEW_CODE:
562 return "JVMCI replaced with new code";
563 case InvalidationReason::JVMCI_REPROFILE:
564 return "JVMCI reprofile";
565 case InvalidationReason::MARKED_FOR_DEOPTIMIZATION:
566 return "marked for deoptimization";
567 case InvalidationReason::MISSING_EXCEPTION_HANDLER:
568 return "missing exception handler";
569 case InvalidationReason::NOT_USED:
570 return "not used";
571 case InvalidationReason::OSR_INVALIDATION_BACK_BRANCH:
572 return "OSR invalidation back branch";
573 case InvalidationReason::OSR_INVALIDATION_FOR_COMPILING_WITH_C1:
574 return "OSR invalidation for compiling with C1";
575 case InvalidationReason::OSR_INVALIDATION_OF_LOWER_LEVEL:
576 return "OSR invalidation of lower level";
577 case InvalidationReason::SET_NATIVE_FUNCTION:
578 return "set native function";
579 case InvalidationReason::UNCOMMON_TRAP:
580 return "uncommon trap";
581 case InvalidationReason::WHITEBOX_DEOPTIMIZATION:
582 return "whitebox deoptimization";
583 case InvalidationReason::ZOMBIE:
584 return "zombie";
585 default: {
586 assert(false, "Unhandled reason");
587 return "Unknown";
588 }
589 }
590 }
591
592 // create nmethod with entry_bci
593 static nmethod* new_nmethod(const methodHandle& method,
594 int compile_id,
595 int entry_bci,
596 CodeOffsets* offsets,
597 int orig_pc_offset,
598 DebugInformationRecorder* recorder,
599 Dependencies* dependencies,
600 CodeBuffer *code_buffer,
601 int frame_size,
602 OopMapSet* oop_maps,
603 ExceptionHandlerTable* handler_table,
604 ImplicitExceptionTable* nul_chk_table,
605 AbstractCompiler* compiler,
606 CompLevel comp_level
607 #if INCLUDE_JVMCI
608 , char* speculations = nullptr,
609 int speculations_len = 0,
610 JVMCINMethodData* jvmci_data = nullptr
611 #endif
612 );
613
614 // Relocate the nmethod to the code heap identified by code_blob_type.
615 // Returns nullptr if the code heap does not have enough space, the
616 // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
617 // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
618 nmethod* relocate(CodeBlobType code_blob_type);
619
620 static nmethod* new_native_nmethod(const methodHandle& method,
621 int compile_id,
622 CodeBuffer *code_buffer,
623 int vep_offset,
624 int frame_complete,
625 int frame_size,
626 ByteSize receiver_sp_offset,
627 ByteSize basic_lock_sp_offset,
628 OopMapSet* oop_maps,
629 int exception_handler = -1);
630
631 Method* method () const { return _method; }
632 uint16_t entry_bci () const { return _entry_bci; }
633 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
634 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
635 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
636
637 int orig_pc_offset() { return _orig_pc_offset; }
638 bool is_relocatable();
639
640 // Compiler task identification. Note that all OSR methods
641 // are numbered in an independent sequence if CICountOSR is true,
642 // and native method wrappers are also numbered independently if
643 // CICountNative is true.
644 int compile_id() const { return _compile_id; }
645 int comp_level() const { return _comp_level; }
646 const char* compile_kind() const;
647
648 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
649 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
650 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
651 CompilerType compiler_type () const { return _compiler_type; }
652 const char* compiler_name () const;
653
654 // boundaries for different parts
655 address consts_begin () const { return content_begin(); }
656 address consts_end () const { return code_begin() ; }
657 address insts_begin () const { return code_begin() ; }
658 address insts_end () const { return header_begin() + _stub_offset ; }
659 address stub_begin () const { return header_begin() + _stub_offset ; }
660 address stub_end () const { return code_end() ; }
661 address exception_begin () const { return header_begin() + _exception_offset ; }
662 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
663 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
664 oop* oops_begin () const { return (oop*) data_begin(); }
665 oop* oops_end () const { return (oop*) data_end(); }
666
667 // mutable data
668 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
669 #if INCLUDE_JVMCI
670 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
671 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
672 address jvmci_data_end () const { return mutable_data_end(); }
673 #else
674 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
675 #endif
676
677 // immutable data
678 void set_immutable_data(address data) { _immutable_data = data; }
679 address immutable_data_begin () const { return _immutable_data; }
680 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
681 address dependencies_begin () const { return _immutable_data; }
682 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
683 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
684 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
685 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
686 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
687 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
688 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
689 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
690
691 #if INCLUDE_JVMCI
692 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
693 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
694 address speculations_end () const { return _immutable_data + _immutable_data_ref_count_offset ; }
695 #else
696 address scopes_data_end () const { return _immutable_data + _immutable_data_ref_count_offset ; }
697 #endif
698 address immutable_data_ref_count_begin () const { return _immutable_data + _immutable_data_ref_count_offset ; }
699
700 // Sizes
701 int immutable_data_size() const { return _immutable_data_size; }
702 int consts_size () const { return int( consts_end () - consts_begin ()); }
703 int insts_size () const { return int( insts_end () - insts_begin ()); }
704 int stub_size () const { return int( stub_end () - stub_begin ()); }
705 int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
706 int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
707 int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); }
708 int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); }
709 int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
710 int handler_table_size () const { return int( handler_table_end() - handler_table_begin()); }
711 int nul_chk_table_size () const { return int( nul_chk_table_end() - nul_chk_table_begin()); }
712 #if INCLUDE_JVMCI
713 int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
714 int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
715 #endif
716
717 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
718 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
719
720 int skipped_instructions_size () const { return _skipped_instructions_size; }
721 int total_size() const;
722
723 // Containment
724 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
725 // Returns true if a given address is in the 'insts' section. The method
726 // insts_contains_inclusive() is end-inclusive.
727 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
728 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
729 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
730 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
731 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
732 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
733 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
734 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
735 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
736
737 // entry points
738 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
739 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
740
741 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
742 // allowed to advance state
743 in_use = 0, // executable nmethod
744 not_entrant = 1 // marked for deoptimization but activations may still exist
745 };
746
747 // flag accessing and manipulation
748 bool is_not_installed() const { return _state == not_installed; }
749 bool is_in_use() const { return _state <= in_use; }
750 bool is_not_entrant() const { return _state == not_entrant; }
751 int get_state() const { return _state; }
752
753 void clear_unloading_state();
754 // Heuristically deduce an nmethod isn't worth keeping around
755 bool is_cold();
756 bool is_unloading();
757 void do_unloading(bool unloading_occurred);
758
759 void inc_method_profiling_count();
760 uint64_t method_profiling_count();
761
762 bool make_in_use() {
763 return try_transition(in_use);
764 }
765 // Make the nmethod non entrant. The nmethod will continue to be
766 // alive. It is used when an uncommon trap happens. Returns true
767 // if this thread changed the state of the nmethod or false if
768 // another thread performed the transition.
769 bool make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry = false);
770 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED, true /* keep AOT entry */); }
771
772 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
773 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
774 void set_deoptimized_done();
775
776 bool update_recompile_counts() const {
777 // Update recompile counts when either the update is explicitly requested (deoptimize)
778 // or the nmethod is not marked for deoptimization at all (not_marked).
779 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
780 DeoptimizationStatus status = deoptimization_status();
781 return status != deoptimize_noupdate && status != deoptimize_done;
782 }
783
784 // tells whether frames described by this nmethod can be deoptimized
785 // note: native wrappers cannot be deoptimized.
786 bool can_be_deoptimized() const { return is_java_method(); }
787
788 bool has_dependencies() { return dependencies_size() != 0; }
789 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
790 void flush_dependencies();
791
792 template<typename T>
793 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
794 template<typename T>
795 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
796
797 bool has_unsafe_access() const { return _has_unsafe_access; }
798 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
799
800 bool has_monitors() const { return _has_monitors; }
801 void set_has_monitors(bool z) { _has_monitors = z; }
802
803 bool has_scoped_access() const { return _has_scoped_access; }
804 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
805
806 bool has_wide_vectors() const { return _has_wide_vectors; }
807 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
808
809 bool has_clinit_barriers() const { return _has_clinit_barriers; }
810 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
811
812 bool preloaded() const { return _preloaded; }
813 void set_preloaded(bool z) { _preloaded = z; }
814
815 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
816 void set_has_flushed_dependencies(bool z) {
817 assert(!has_flushed_dependencies(), "should only happen once");
818 _has_flushed_dependencies = z;
819 }
820
821 bool is_unlinked() const { return _is_unlinked; }
822 void set_is_unlinked() {
823 assert(!_is_unlinked, "already unlinked");
824 _is_unlinked = true;
825 }
826
827 bool used() const { return _used; }
828 void set_used() { _used = true; }
829
830 bool is_aot() const { return _aot_code_entry != nullptr; }
831 void set_aot_code_entry(AOTCodeEntry* entry) { _aot_code_entry = entry; }
832 AOTCodeEntry* aot_code_entry() const { return _aot_code_entry; }
833
834 // Support for oops in scopes and relocs:
835 // Note: index 0 is reserved for null.
836 oop oop_at(int index) const;
837 oop oop_at_phantom(int index) const; // phantom reference
838 oop* oop_addr_at(int index) const { // for GC
839 // relocation indexes are biased by 1 (because 0 is reserved)
840 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
841 return &oops_begin()[index - 1];
842 }
843
844 // Support for meta data in scopes and relocs:
845 // Note: index 0 is reserved for null.
846 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
847 Metadata** metadata_addr_at(int index) const { // for GC
848 // relocation indexes are biased by 1 (because 0 is reserved)
849 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
850 return &metadata_begin()[index - 1];
851 }
852
853 void copy_values(GrowableArray<Handle>* array);
854 void copy_values(GrowableArray<jobject>* oops);
855 void copy_values(GrowableArray<Metadata*>* metadata);
856 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
857
858 // Relocation support
859 private:
860 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
861 inline void initialize_immediate_oop(oop* dest, jobject handle);
862
863 protected:
864 address oops_reloc_begin() const;
865
866 public:
867 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
868 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
869
870 void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
871
872 bool is_at_poll_return(address pc);
873 bool is_at_poll_or_poll_return(address pc);
874
875 protected:
876 // Exception cache support
877 // Note: _exception_cache may be read and cleaned concurrently.
878 ExceptionCache* exception_cache() const { return _exception_cache; }
879 ExceptionCache* exception_cache_acquire() const;
880
881 public:
882 address handler_for_exception_and_pc(Handle exception, address pc);
883 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
884 void clean_exception_cache();
885
886 void add_exception_cache_entry(ExceptionCache* new_entry);
887 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
888
889
890 // Deopt
891 // Return true is the PC is one would expect if the frame is being deopted.
892 inline bool is_deopt_pc(address pc);
893 inline bool is_deopt_entry(address pc);
894
895 // Accessor/mutator for the original pc of a frame before a frame was deopted.
896 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
897 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
898
899 const char* state() const;
900
901 bool inlinecache_check_contains(address addr) const {
902 return (addr >= code_begin() && addr < verified_entry_point());
903 }
904
905 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
906
907 // implicit exceptions support
908 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
909 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
910
911 // Inline cache support for class unloading and nmethod unloading
912 private:
913 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
914
915 address continuation_for_implicit_exception(address pc, bool for_div0_check);
916
917 public:
918 // Serial version used by whitebox test
919 void cleanup_inline_caches_whitebox();
920
921 void clear_inline_caches();
922
923 // Execute nmethod barrier code, as if entering through nmethod call.
924 void run_nmethod_entry_barrier();
925
926 void verify_oop_relocations();
927
928 bool has_evol_metadata();
929
930 Method* attached_method(address call_pc);
931 Method* attached_method_before_pc(address pc);
932
933 // GC unloading support
934 // Cleans unloaded klasses and unloaded nmethods in inline caches
935
936 void unload_nmethod_caches(bool class_unloading_occurred);
937
938 void unlink_from_method();
939
940 // On-stack replacement support
941 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
942 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
943 nmethod* osr_link() const { return _osr_link; }
944 void set_osr_link(nmethod *n) { _osr_link = n; }
945 void invalidate_osr_method();
946
947 int num_stack_arg_slots(bool rounded = true) const {
948 return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
949 }
950
951 // Verify calls to dead methods have been cleaned.
952 void verify_clean_inline_caches();
953
954 // Unlink this nmethod from the system
955 void unlink();
956
957 // Deallocate this nmethod - called by the GC
958 void purge(bool unregister_nmethod);
959
960 // See comment at definition of _last_seen_on_stack
961 void mark_as_maybe_on_stack();
962 bool is_maybe_on_stack();
963
964 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
965 void set_method(Method* method) { _method = method; }
966
967 #if INCLUDE_JVMCI
968 // Gets the JVMCI name of this nmethod.
969 const char* jvmci_name();
970
971 // Records the pending failed speculation in the
972 // JVMCI speculation log associated with this nmethod.
973 void update_speculation(JavaThread* thread);
974
975 // Gets the data specific to a JVMCI compiled method.
976 // This returns a non-nullptr value iff this nmethod was
977 // compiled by the JVMCI compiler.
978 JVMCINMethodData* jvmci_nmethod_data() const {
979 return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin();
980 }
981
982 // Returns true if the runtime should NOT collect deoptimization profile for a JVMCI
983 // compiled method
984 bool jvmci_skip_profile_deopt() const;
985 #endif
986
987 void oops_do(OopClosure* f);
988
989 // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
990 // nmethod.
991 bool oops_do_try_claim();
992
993 // Loom support for following nmethods on the stack
994 void follow_nmethod(OopIterateClosure* cl);
995
996 // Class containing callbacks for the oops_do_process_weak/strong() methods
997 // below.
998 class OopsDoProcessor {
999 public:
1000 // Process the oops of the given nmethod based on whether it has been called
1001 // in a weak or strong processing context, i.e. apply either weak or strong
1002 // work on it.
1003 virtual void do_regular_processing(nmethod* nm) = 0;
1004 // Assuming that the oops of the given nmethod has already been its weak
1005 // processing applied, apply the remaining strong processing part.
1006 virtual void do_remaining_strong_processing(nmethod* nm) = 0;
1007 };
1008
1009 // The following two methods do the work corresponding to weak/strong nmethod
1010 // processing.
1011 void oops_do_process_weak(OopsDoProcessor* p);
1012 void oops_do_process_strong(OopsDoProcessor* p);
1013
1014 static void oops_do_marking_prologue();
1015 static void oops_do_marking_epilogue();
1016
1017 private:
1018 ScopeDesc* scope_desc_in(address begin, address end);
1019
1020 address* orig_pc_addr(const frame* fr);
1021
1022 // used by jvmti to track if the load events has been reported
1023 bool load_reported() const { return _load_reported; }
1024 void set_load_reported() { _load_reported = true; }
1025
1026 inline void init_immutable_data_ref_count() {
1027 assert(is_not_installed(), "should be called in nmethod constructor");
1028 *((int*)immutable_data_ref_count_begin()) = 1;
1029 }
1030
1031 inline int inc_immutable_data_ref_count() {
1032 assert_lock_strong(CodeCache_lock);
1033 int* ref_count = (int*)immutable_data_ref_count_begin();
1034 assert(*ref_count > 0, "Must be positive");
1035 return ++(*ref_count);
1036 }
1037
1038 inline int dec_immutable_data_ref_count() {
1039 assert_lock_strong(CodeCache_lock);
1040 int* ref_count = (int*)immutable_data_ref_count_begin();
1041 assert(*ref_count > 0, "Must be positive");
1042 return --(*ref_count);
1043 }
1044
1045 static void add_delayed_compiled_method_load_event(nmethod* nm) NOT_CDS_RETURN;
1046
1047 public:
1048 // ScopeDesc retrieval operation
1049 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
1050 // pc_desc_near returns the first PcDesc at or after the given pc.
1051 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
1052
1053 // ScopeDesc for an instruction
1054 ScopeDesc* scope_desc_at(address pc);
1055 ScopeDesc* scope_desc_near(address pc);
1056
1057 // copying of debugging information
1058 void copy_scopes_pcs(PcDesc* pcs, int count);
1059 void copy_scopes_data(address buffer, int size);
1060
1061 // Post successful compilation
1062 void post_compiled_method(CompileTask* task);
1063
1064 // jvmti support:
1065 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1066
1067 // verify operations
1068 void verify();
1069 void verify_scopes();
1070 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1071
1072 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1073 void decode2(outputStream* st) const;
1074 void print_constant_pool(outputStream* st);
1075
1076 // Avoid hiding of parent's 'decode(outputStream*)' method.
1077 void decode(outputStream* st) const { decode2(st); } // just delegate here.
1078
1079 // AOT cache support
1080 static void post_delayed_compiled_method_load_events() NOT_CDS_RETURN;
1081
1082 // printing support
1083 void print_on_impl(outputStream* st) const;
1084 void print_code();
1085 void print_value_on_impl(outputStream* st) const;
1086 void print_code_snippet(outputStream* st, address addr) const;
1087
1088 #if defined(SUPPORT_DATA_STRUCTS)
1089 // print output in opt build for disassembler library
1090 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
1091 void print_pcs_on(outputStream* st);
1092 void print_scopes() { print_scopes_on(tty); }
1093 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1094 void print_handler_table();
1095 void print_nul_chk_table();
1096 void print_recorded_oop(int log_n, int index);
1097 void print_recorded_oops();
1098 void print_recorded_metadata();
1099
1100 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1101 void print_metadata(outputStream* st); // metadata in metadata pool.
1102 #else
1103 void print_pcs_on(outputStream* st) { return; }
1104 #endif
1105
1106 void print_calls(outputStream* st) PRODUCT_RETURN;
1107 static void print_statistics() PRODUCT_RETURN;
1108
1109 void maybe_print_nmethod(const DirectiveSet* directive);
1110 void print_nmethod(bool print_code);
1111
1112 void print_on_with_msg(outputStream* st, const char* msg) const;
1113
1114 // Logging
1115 void log_identity(xmlStream* log) const;
1116 void log_new_nmethod() const;
1117 void log_relocated_nmethod(nmethod* original) const;
1118 void log_state_change(InvalidationReason invalidation_reason) const;
1119
1120 // Prints block-level comments, including nmethod specific block labels:
1121 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1122 const char* nmethod_section_label(address pos) const;
1123
1124 // returns whether this nmethod has code comments.
1125 bool has_code_comment(address begin, address end);
1126 // Prints a comment for one native instruction (reloc info, pc desc)
1127 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1128
1129 // tells if this compiled method is dependent on the given changes,
1130 // and the changes have invalidated it
1131 bool check_dependency_on(DepChange& changes);
1132
1133 // Fast breakpoint support. Tells if this compiled method is
1134 // dependent on the given method. Returns true if this nmethod
1135 // corresponds to the given method as well.
1136 bool is_dependent_on_method(Method* dependee);
1137
1138 // JVMTI's GetLocalInstance() support
1139 ByteSize native_receiver_sp_offset() {
1140 assert(is_native_method(), "sanity");
1141 return _native_receiver_sp_offset;
1142 }
1143 ByteSize native_basic_lock_sp_offset() {
1144 assert(is_native_method(), "sanity");
1145 return _native_basic_lock_sp_offset;
1146 }
1147
1148 // support for code generation
1149 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1150 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1151
1152 void metadata_do(MetadataClosure* f);
1153
1154 address call_instruction_address(address pc) const;
1155
1156 void make_deoptimized();
1157 void finalize_relocations();
1158
1159 void prepare_for_archiving_impl();
1160
1161 class Vptr : public CodeBlob::Vptr {
1162 void print_on(const CodeBlob* instance, outputStream* st) const override {
1163 ttyLocker ttyl;
1164 instance->as_nmethod()->print_on_impl(st);
1165 }
1166 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1167 instance->as_nmethod()->print_value_on_impl(st);
1168 }
1169 void prepare_for_archiving(CodeBlob* instance) const override {
1170 ((nmethod*)instance)->prepare_for_archiving_impl();
1171 };
1172 };
1173
1174 static const Vptr _vpntr;
1175 };
1176
1177 struct NMethodMarkingScope : StackObj {
1178 NMethodMarkingScope() {
1179 nmethod::oops_do_marking_prologue();
1180 }
1181 ~NMethodMarkingScope() {
1182 nmethod::oops_do_marking_epilogue();
1183 }
1184 };
1185
1186 #endif // SHARE_CODE_NMETHOD_HPP