1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
68 inline address handler_at(int index);
69 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
70
71 inline int count();
72 // increment_count is only called under lock, but there may be concurrent readers.
73 void increment_count();
74
75 public:
76
77 ExceptionCache(Handle exception, address pc, address handler);
78
79 Klass* exception_type() { return _exception_type; }
80 ExceptionCache* next();
81 void set_next(ExceptionCache *ec);
82 ExceptionCache* purge_list_next() { return _purge_list_next; }
83 void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
84
85 address match(Handle exception, address pc);
86 bool match_exception_with_space(Handle exception) ;
87 address test_address(address addr);
88 bool add_address_and_handler(address addr, address handler) ;
89 };
90
91 // cache pc descs found in earlier inquiries
92 class PcDescCache {
93 friend class VMStructs;
94 private:
95 enum { cache_size = 4 };
96 // The array elements MUST be volatile! Several threads may modify
97 // and read from the cache concurrently. find_pc_desc_internal has
98 // returned wrong results. C++ compiler (namely xlC12) may duplicate
99 // C++ field accesses if the elements are not volatile.
100 typedef PcDesc* PcDescPtr;
101 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
102 public:
103 PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); }
104 void init_to(PcDesc* initial_pc_desc);
105 PcDesc* find_pc_desc(int pc_offset, bool approximate);
106 void add_pc_desc(PcDesc* pc_desc);
107 PcDesc* last_pc_desc() { return _pc_descs[0]; }
108 };
109
110 class PcDescContainer : public CHeapObj<mtCode> {
111 private:
112 PcDescCache _pc_desc_cache;
113 public:
114 PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
115
116 PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
117 PcDesc* lower, PcDesc* upper);
118
119 PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
120 #ifdef PRODUCT
121 {
122 PcDesc* desc = _pc_desc_cache.last_pc_desc();
123 assert(desc != nullptr, "PcDesc cache should be initialized already");
124 if (desc->pc_offset() == (pc - code_begin)) {
125 // Cached value matched
126 return desc;
127 }
128 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
129 }
130 #endif
131 ;
132 };
133
134 // nmethods (native methods) are the compiled code versions of Java methods.
135 //
136 // An nmethod contains:
137 // - Header (the nmethod structure)
138 // - Constant part (doubles, longs and floats used in nmethod)
139 // - Code part:
140 // - Code body
141 // - Exception handler
142 // - Stub code
143 // - OOP table
144 //
145 // As a CodeBlob, an nmethod references [mutable data] allocated on the C heap:
146 // - CodeBlob relocation data
147 // - Metainfo
148 // - JVMCI data
149 //
150 // An nmethod references [immutable data] allocated on C heap:
151 // - Dependency assertions data
152 // - Implicit null table array
153 // - Handler entry point array
154 // - Debugging information:
155 // - Scopes data array
156 // - Scopes pcs array
157 // - JVMCI speculations array
158
159 #if INCLUDE_JVMCI
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 private:
172
173 // Used to track in which deoptimize handshake this method will be deoptimized.
174 uint64_t _deoptimization_generation;
175
176 uint64_t _gc_epoch;
177
178 Method* _method;
179
180 // To reduce header size union fields which usages do not overlap.
181 union {
182 // To support simple linked-list chaining of nmethods:
183 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
184 struct {
185 // These are used for compiled synchronized native methods to
186 // locate the owner and stack slot for the BasicLock. They are
187 // needed because there is no debug information for compiled native
188 // wrappers and the oop maps are insufficient to allow
189 // frame::retrieve_receiver() to work. Currently they are expected
190 // to be byte offsets from the Java stack pointer for maximum code
191 // sharing between platforms. JVMTI's GetLocalInstance() uses these
192 // offsets to find the receiver for non-static native wrapper frames.
193 ByteSize _native_receiver_sp_offset;
194 ByteSize _native_basic_lock_sp_offset;
195 };
196 };
197
198 // nmethod's read-only data
199 address _immutable_data;
200
201 PcDescContainer* _pc_desc_container;
202 ExceptionCache* volatile _exception_cache;
203
204 void* _gc_data;
205
206 struct oops_do_mark_link; // Opaque data type.
207 static nmethod* volatile _oops_do_mark_nmethods;
208 oops_do_mark_link* volatile _oops_do_mark_link;
209
210 CompiledICData* _compiled_ic_data;
211
212 // offsets for entry points
213 address _osr_entry_point; // entry point for on stack replacement
214 uint16_t _entry_offset; // entry point with class check
215 uint16_t _verified_entry_offset; // entry point without class check
216 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
217 int _immutable_data_size;
218
219 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
220
221 int _skipped_instructions_size;
222
223 int _stub_offset;
224
225 // Offsets for different stubs section parts
226 int _exception_offset;
227 // All deoptee's will resume execution at this location described by
228 // this offset.
229 int _deopt_handler_offset;
230 // All deoptee's at a MethodHandle call site will resume execution
231 // at this location described by this offset.
232 int _deopt_mh_handler_offset;
233 // Offset (from insts_end) of the unwind handler if it exists
234 int16_t _unwind_handler_offset;
235 // Number of arguments passed on the stack
236 uint16_t _num_stack_arg_slots;
237
238 uint16_t _oops_size;
239 #if INCLUDE_JVMCI
240 // _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as:
241 // _metadata_size = mutable_data_size - relocation_size
242 uint16_t _metadata_size;
243 #endif
244
245 // Offset in immutable data section
246 // _dependencies_offset == 0
247 uint16_t _nul_chk_table_offset;
248 uint16_t _handler_table_offset; // This table could be big in C1 code
249 int _scopes_pcs_offset;
250 int _scopes_data_offset;
251 #if INCLUDE_JVMCI
252 int _speculations_offset;
253 #endif
254
255 // location in frame (offset for sp) that deopt can store the original
256 // pc during a deopt.
257 int _orig_pc_offset;
258
259 int _compile_id; // which compilation made this nmethod
260 CompLevel _comp_level; // compilation level (s1)
261 CompilerType _compiler_type; // which compiler made this nmethod (u1)
262
263 // Local state used to keep track of whether unloading is happening or not
264 volatile uint8_t _is_unloading_state;
265
266 // Protected by NMethodState_lock
267 volatile signed char _state; // {not_installed, in_use, not_entrant}
268
269 // set during construction
270 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
271 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
272 _has_wide_vectors:1, // Preserve wide vectors at safepoints
273 _has_monitors:1, // Fastpath monitor detection for continuations
274 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
275 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
276 _is_unlinked:1, // mark during class unloading
277 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
278
279 enum DeoptimizationStatus : u1 {
280 not_marked,
281 deoptimize,
282 deoptimize_noupdate,
283 deoptimize_done
284 };
285
286 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
287
288 DeoptimizationStatus deoptimization_status() const {
289 return Atomic::load(&_deoptimization_status);
290 }
291
292 // Initialize fields to their default values
293 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
294
295 // Post initialization
296 void post_init();
297
298 // For native wrappers
299 nmethod(Method* method,
300 CompilerType type,
301 int nmethod_size,
302 int compile_id,
303 CodeOffsets* offsets,
304 CodeBuffer *code_buffer,
305 int frame_size,
306 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
307 ByteSize basic_lock_sp_offset, /* synchronized natives only */
308 OopMapSet* oop_maps,
309 int mutable_data_size);
310
311 // For normal JIT compiled code
312 nmethod(Method* method,
313 CompilerType type,
314 int nmethod_size,
315 int immutable_data_size,
316 int mutable_data_size,
317 int compile_id,
318 int entry_bci,
319 address immutable_data,
320 CodeOffsets* offsets,
321 int orig_pc_offset,
322 DebugInformationRecorder *recorder,
323 Dependencies* dependencies,
324 CodeBuffer *code_buffer,
325 int frame_size,
326 OopMapSet* oop_maps,
327 ExceptionHandlerTable* handler_table,
328 ImplicitExceptionTable* nul_chk_table,
329 AbstractCompiler* compiler,
330 CompLevel comp_level
331 #if INCLUDE_JVMCI
332 , char* speculations = nullptr,
333 int speculations_len = 0,
334 JVMCINMethodData* jvmci_data = nullptr
335 #endif
336 );
337
338 // helper methods
339 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
340
341 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
342 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
343 // findable by nmethod iterators! In particular, they must not contain oops!
344 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
345
346 const char* reloc_string_for(u_char* begin, u_char* end);
347
348 bool try_transition(signed char new_state);
349
350 // Returns true if this thread changed the state of the nmethod or
351 // false if another thread performed the transition.
352 bool make_entrant() { Unimplemented(); return false; }
353 void inc_decompile_count();
354
355 // Inform external interfaces that a compiled method has been unloaded
356 void post_compiled_method_unload();
357
358 PcDesc* find_pc_desc(address pc, bool approximate) {
359 if (_pc_desc_container == nullptr) return nullptr; // native method
360 return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
361 }
362
363 // STW two-phase nmethod root processing helpers.
364 //
365 // When determining liveness of a given nmethod to do code cache unloading,
366 // some collectors need to do different things depending on whether the nmethods
367 // need to absolutely be kept alive during root processing; "strong"ly reachable
368 // nmethods are known to be kept alive at root processing, but the liveness of
369 // "weak"ly reachable ones is to be determined later.
370 //
371 // We want to allow strong and weak processing of nmethods by different threads
372 // at the same time without heavy synchronization. Additional constraints are
373 // to make sure that every nmethod is processed a minimal amount of time, and
374 // nmethods themselves are always iterated at most once at a particular time.
375 //
376 // Note that strong processing work must be a superset of weak processing work
377 // for this code to work.
378 //
379 // We store state and claim information in the _oops_do_mark_link member, using
380 // the two LSBs for the state and the remaining upper bits for linking together
381 // nmethods that were already visited.
382 // The last element is self-looped, i.e. points to itself to avoid some special
383 // "end-of-list" sentinel value.
384 //
385 // _oops_do_mark_link special values:
386 //
387 // _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e.
388 // is Unclaimed.
389 //
390 // For other values, its lowest two bits indicate the following states of the nmethod:
391 //
392 // weak_request (WR): the nmethod has been claimed by a thread for weak processing
393 // weak_done (WD): weak processing has been completed for this nmethod.
394 // strong_request (SR): the nmethod has been found to need strong processing while
395 // being weak processed.
396 // strong_done (SD): strong processing has been completed for this nmethod .
397 //
398 // The following shows the _only_ possible progressions of the _oops_do_mark_link
399 // pointer.
400 //
401 // Given
402 // N as the nmethod
403 // X the current next value of _oops_do_mark_link
404 //
405 // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
406 // a single thread.
407 // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
408 // completed (as above) another thread found that the nmethod needs strong
409 // processing after all.
410 // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
411 // thread finds that the nmethod needs strong processing, marks it as such and
412 // terminates. The original thread completes strong processing.
413 // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
414 // the beginning by a single thread.
415 //
416 // "|" describes the concatenation of bits in _oops_do_mark_link.
417 //
418 // The diagram also describes the threads responsible for changing the nmethod to
419 // the next state by marking the _transition_ with (C) and (O), which mean "current"
420 // and "other" thread respectively.
421 //
422
423 // States used for claiming nmethods during root processing.
424 static const uint claim_weak_request_tag = 0;
425 static const uint claim_weak_done_tag = 1;
426 static const uint claim_strong_request_tag = 2;
427 static const uint claim_strong_done_tag = 3;
428
429 static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
430 assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
431 assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
432 return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
433 }
434
435 static uint extract_state(oops_do_mark_link* link) {
436 return (uint)((uintptr_t)link & 0x3);
437 }
438
439 static nmethod* extract_nmethod(oops_do_mark_link* link) {
440 return (nmethod*)((uintptr_t)link & ~0x3);
441 }
442
443 void oops_do_log_change(const char* state);
444
445 static bool oops_do_has_weak_request(oops_do_mark_link* next) {
446 return extract_state(next) == claim_weak_request_tag;
447 }
448
449 static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
450 return extract_state(next) >= claim_strong_request_tag;
451 }
452
453 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
454 bool oops_do_try_claim_weak_request();
455
456 // Attempt Unclaimed -> N|SD transition. Returns the current link.
457 oops_do_mark_link* oops_do_try_claim_strong_done();
458 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
459 nmethod* oops_do_try_add_to_list_as_weak_done();
460
461 // Attempt X|WD -> N|SR transition. Returns the current link.
462 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
463 // Attempt X|WD -> X|SD transition. Returns true if successful.
464 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
465
466 // Do the N|SD -> X|SD transition.
467 void oops_do_add_to_list_as_strong_done();
468
469 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
470 // transitions).
471 void oops_do_set_strong_done(nmethod* old_head);
472
473 public:
474 // If you change anything in this enum please patch
475 // vmStructs_jvmci.cpp accordingly.
476 enum class InvalidationReason : s1 {
477 NOT_INVALIDATED = -1,
478 C1_CODEPATCH,
479 C1_DEOPTIMIZE,
480 C1_DEOPTIMIZE_FOR_PATCHING,
481 C1_PREDICATE_FAILED_TRAP,
482 CI_REPLAY,
483 UNLOADING,
484 UNLOADING_COLD,
485 JVMCI_INVALIDATE,
486 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
487 JVMCI_REPLACED_WITH_NEW_CODE,
488 JVMCI_REPROFILE,
489 MARKED_FOR_DEOPTIMIZATION,
490 MISSING_EXCEPTION_HANDLER,
491 NOT_USED,
492 OSR_INVALIDATION_BACK_BRANCH,
493 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
494 OSR_INVALIDATION_OF_LOWER_LEVEL,
495 SET_NATIVE_FUNCTION,
496 UNCOMMON_TRAP,
497 WHITEBOX_DEOPTIMIZATION,
498 ZOMBIE,
499 INVALIDATION_REASONS_COUNT
500 };
501
502
503 static const char* invalidation_reason_to_string(InvalidationReason invalidation_reason) {
504 switch (invalidation_reason) {
505 case InvalidationReason::C1_CODEPATCH:
506 return "C1 code patch";
507 case InvalidationReason::C1_DEOPTIMIZE:
508 return "C1 deoptimized";
509 case InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING:
510 return "C1 deoptimize for patching";
511 case InvalidationReason::C1_PREDICATE_FAILED_TRAP:
512 return "C1 predicate failed trap";
513 case InvalidationReason::CI_REPLAY:
514 return "CI replay";
515 case InvalidationReason::JVMCI_INVALIDATE:
516 return "JVMCI invalidate";
517 case InvalidationReason::JVMCI_MATERIALIZE_VIRTUAL_OBJECT:
518 return "JVMCI materialize virtual object";
519 case InvalidationReason::JVMCI_REPLACED_WITH_NEW_CODE:
520 return "JVMCI replaced with new code";
521 case InvalidationReason::JVMCI_REPROFILE:
522 return "JVMCI reprofile";
523 case InvalidationReason::MARKED_FOR_DEOPTIMIZATION:
524 return "marked for deoptimization";
525 case InvalidationReason::MISSING_EXCEPTION_HANDLER:
526 return "missing exception handler";
527 case InvalidationReason::NOT_USED:
528 return "not used";
529 case InvalidationReason::OSR_INVALIDATION_BACK_BRANCH:
530 return "OSR invalidation back branch";
531 case InvalidationReason::OSR_INVALIDATION_FOR_COMPILING_WITH_C1:
532 return "OSR invalidation for compiling with C1";
533 case InvalidationReason::OSR_INVALIDATION_OF_LOWER_LEVEL:
534 return "OSR invalidation of lower level";
535 case InvalidationReason::SET_NATIVE_FUNCTION:
536 return "set native function";
537 case InvalidationReason::UNCOMMON_TRAP:
538 return "uncommon trap";
539 case InvalidationReason::WHITEBOX_DEOPTIMIZATION:
540 return "whitebox deoptimization";
541 case InvalidationReason::ZOMBIE:
542 return "zombie";
543 default: {
544 assert(false, "Unhandled reason");
545 return "Unknown";
546 }
547 }
548 }
549
550 // create nmethod with entry_bci
551 static nmethod* new_nmethod(const methodHandle& method,
552 int compile_id,
553 int entry_bci,
554 CodeOffsets* offsets,
555 int orig_pc_offset,
556 DebugInformationRecorder* recorder,
557 Dependencies* dependencies,
558 CodeBuffer *code_buffer,
559 int frame_size,
560 OopMapSet* oop_maps,
561 ExceptionHandlerTable* handler_table,
562 ImplicitExceptionTable* nul_chk_table,
563 AbstractCompiler* compiler,
564 CompLevel comp_level
565 #if INCLUDE_JVMCI
566 , char* speculations = nullptr,
567 int speculations_len = 0,
568 JVMCINMethodData* jvmci_data = nullptr
569 #endif
570 );
571
572 static nmethod* new_native_nmethod(const methodHandle& method,
573 int compile_id,
574 CodeBuffer *code_buffer,
575 int vep_offset,
576 int frame_complete,
577 int frame_size,
578 ByteSize receiver_sp_offset,
579 ByteSize basic_lock_sp_offset,
580 OopMapSet* oop_maps,
581 int exception_handler = -1);
582
583 Method* method () const { return _method; }
584 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
585 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
586 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
587
588 // Compiler task identification. Note that all OSR methods
589 // are numbered in an independent sequence if CICountOSR is true,
590 // and native method wrappers are also numbered independently if
591 // CICountNative is true.
592 int compile_id() const { return _compile_id; }
593 const char* compile_kind() const;
594
595 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
596 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
597 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
598 CompilerType compiler_type () const { return _compiler_type; }
599 const char* compiler_name () const;
600
601 // boundaries for different parts
602 address consts_begin () const { return content_begin(); }
603 address consts_end () const { return code_begin() ; }
604 address insts_begin () const { return code_begin() ; }
605 address insts_end () const { return header_begin() + _stub_offset ; }
606 address stub_begin () const { return header_begin() + _stub_offset ; }
607 address stub_end () const { return code_end() ; }
608 address exception_begin () const { return header_begin() + _exception_offset ; }
609 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
610 address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
611 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
612 oop* oops_begin () const { return (oop*) data_begin(); }
613 oop* oops_end () const { return (oop*) data_end(); }
614
615 // mutable data
616 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
617 #if INCLUDE_JVMCI
618 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
619 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
620 address jvmci_data_end () const { return mutable_data_end(); }
621 #else
622 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
623 #endif
624
625 // immutable data
626 address immutable_data_begin () const { return _immutable_data; }
627 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
628 address dependencies_begin () const { return _immutable_data; }
629 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
630 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
631 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
632 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
633 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
634 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
635 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
636 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
637
638 #if INCLUDE_JVMCI
639 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
640 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
641 address speculations_end () const { return immutable_data_end(); }
642 #else
643 address scopes_data_end () const { return immutable_data_end(); }
644 #endif
645
646 // Sizes
647 int immutable_data_size() const { return _immutable_data_size; }
648 int consts_size () const { return int( consts_end () - consts_begin ()); }
649 int insts_size () const { return int( insts_end () - insts_begin ()); }
650 int stub_size () const { return int( stub_end () - stub_begin ()); }
651 int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
652 int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
653 int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); }
654 int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); }
655 int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
656 int handler_table_size () const { return int( handler_table_end() - handler_table_begin()); }
657 int nul_chk_table_size () const { return int( nul_chk_table_end() - nul_chk_table_begin()); }
658 #if INCLUDE_JVMCI
659 int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
660 int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
661 #endif
662
663 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
664 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
665
666 int skipped_instructions_size () const { return _skipped_instructions_size; }
667 int total_size() const;
668
669 // Containment
670 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
671 // Returns true if a given address is in the 'insts' section. The method
672 // insts_contains_inclusive() is end-inclusive.
673 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
674 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
675 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
676 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
677 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
678 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
679 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
680 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
681 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
682
683 // entry points
684 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
685 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
686
687 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
688 // allowed to advance state
689 in_use = 0, // executable nmethod
690 not_entrant = 1 // marked for deoptimization but activations may still exist
691 };
692
693 // flag accessing and manipulation
694 bool is_not_installed() const { return _state == not_installed; }
695 bool is_in_use() const { return _state <= in_use; }
696 bool is_not_entrant() const { return _state == not_entrant; }
697 int get_state() const { return _state; }
698
699 void clear_unloading_state();
700 // Heuristically deduce an nmethod isn't worth keeping around
701 bool is_cold();
702 bool is_unloading();
703 void do_unloading(bool unloading_occurred);
704
705 bool make_in_use() {
706 return try_transition(in_use);
707 }
708 // Make the nmethod non entrant. The nmethod will continue to be
709 // alive. It is used when an uncommon trap happens. Returns true
710 // if this thread changed the state of the nmethod or false if
711 // another thread performed the transition.
712 bool make_not_entrant(InvalidationReason invalidation_reason);
713 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED); }
714
715 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
716 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
717 void set_deoptimized_done();
718
719 bool update_recompile_counts() const {
720 // Update recompile counts when either the update is explicitly requested (deoptimize)
721 // or the nmethod is not marked for deoptimization at all (not_marked).
722 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
723 DeoptimizationStatus status = deoptimization_status();
724 return status != deoptimize_noupdate && status != deoptimize_done;
725 }
726
727 // tells whether frames described by this nmethod can be deoptimized
728 // note: native wrappers cannot be deoptimized.
729 bool can_be_deoptimized() const { return is_java_method(); }
730
731 bool has_dependencies() { return dependencies_size() != 0; }
732 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
733 void flush_dependencies();
734
735 template<typename T>
736 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
737 template<typename T>
738 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
739
740 bool has_unsafe_access() const { return _has_unsafe_access; }
741 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
742
743 bool has_monitors() const { return _has_monitors; }
744 void set_has_monitors(bool z) { _has_monitors = z; }
745
746 bool has_scoped_access() const { return _has_scoped_access; }
747 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
748
749 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
750 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
751
752 bool has_wide_vectors() const { return _has_wide_vectors; }
753 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
754
755 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
756 void set_has_flushed_dependencies(bool z) {
757 assert(!has_flushed_dependencies(), "should only happen once");
758 _has_flushed_dependencies = z;
759 }
760
761 bool is_unlinked() const { return _is_unlinked; }
762 void set_is_unlinked() {
763 assert(!_is_unlinked, "already unlinked");
764 _is_unlinked = true;
765 }
766
767 int comp_level() const { return _comp_level; }
768
769 // Support for oops in scopes and relocs:
770 // Note: index 0 is reserved for null.
771 oop oop_at(int index) const;
772 oop oop_at_phantom(int index) const; // phantom reference
773 oop* oop_addr_at(int index) const { // for GC
774 // relocation indexes are biased by 1 (because 0 is reserved)
775 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
776 return &oops_begin()[index - 1];
777 }
778
779 // Support for meta data in scopes and relocs:
780 // Note: index 0 is reserved for null.
781 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
782 Metadata** metadata_addr_at(int index) const { // for GC
783 // relocation indexes are biased by 1 (because 0 is reserved)
784 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
785 return &metadata_begin()[index - 1];
786 }
787
788 void copy_values(GrowableArray<jobject>* oops);
789 void copy_values(GrowableArray<Metadata*>* metadata);
790 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
791
792 // Relocation support
793 private:
794 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
795 inline void initialize_immediate_oop(oop* dest, jobject handle);
796
797 protected:
798 address oops_reloc_begin() const;
799
800 public:
801 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
802 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
803
804 bool is_at_poll_return(address pc);
805 bool is_at_poll_or_poll_return(address pc);
806
807 protected:
808 // Exception cache support
809 // Note: _exception_cache may be read and cleaned concurrently.
810 ExceptionCache* exception_cache() const { return _exception_cache; }
811 ExceptionCache* exception_cache_acquire() const;
812
813 public:
814 address handler_for_exception_and_pc(Handle exception, address pc);
815 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
816 void clean_exception_cache();
817
818 void add_exception_cache_entry(ExceptionCache* new_entry);
819 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
820
821
822 // MethodHandle
823 bool is_method_handle_return(address return_pc);
824 // Deopt
825 // Return true is the PC is one would expect if the frame is being deopted.
826 inline bool is_deopt_pc(address pc);
827 inline bool is_deopt_mh_entry(address pc);
828 inline bool is_deopt_entry(address pc);
829
830 // Accessor/mutator for the original pc of a frame before a frame was deopted.
831 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
832 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
833
834 const char* state() const;
835
836 bool inlinecache_check_contains(address addr) const {
837 return (addr >= code_begin() && addr < verified_entry_point());
838 }
839
840 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
841
842 // implicit exceptions support
843 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
844 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
845
846 // Inline cache support for class unloading and nmethod unloading
847 private:
848 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
849
850 address continuation_for_implicit_exception(address pc, bool for_div0_check);
851
852 public:
853 // Serial version used by whitebox test
854 void cleanup_inline_caches_whitebox();
855
856 void clear_inline_caches();
857
858 // Execute nmethod barrier code, as if entering through nmethod call.
859 void run_nmethod_entry_barrier();
860
861 void verify_oop_relocations();
862
863 bool has_evol_metadata();
864
865 Method* attached_method(address call_pc);
866 Method* attached_method_before_pc(address pc);
867
868 // GC unloading support
869 // Cleans unloaded klasses and unloaded nmethods in inline caches
870
871 void unload_nmethod_caches(bool class_unloading_occurred);
872
873 void unlink_from_method();
874
875 // On-stack replacement support
876 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
877 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
878 nmethod* osr_link() const { return _osr_link; }
879 void set_osr_link(nmethod *n) { _osr_link = n; }
880 void invalidate_osr_method();
881
882 int num_stack_arg_slots(bool rounded = true) const {
883 return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
884 }
885
886 // Verify calls to dead methods have been cleaned.
887 void verify_clean_inline_caches();
888
889 // Unlink this nmethod from the system
890 void unlink();
891
892 // Deallocate this nmethod - called by the GC
893 void purge(bool unregister_nmethod);
894
895 // See comment at definition of _last_seen_on_stack
896 void mark_as_maybe_on_stack();
897 bool is_maybe_on_stack();
898
899 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
900 void set_method(Method* method) { _method = method; }
901
902 #if INCLUDE_JVMCI
903 // Gets the JVMCI name of this nmethod.
904 const char* jvmci_name();
905
906 // Records the pending failed speculation in the
907 // JVMCI speculation log associated with this nmethod.
908 void update_speculation(JavaThread* thread);
909
910 // Gets the data specific to a JVMCI compiled method.
911 // This returns a non-nullptr value iff this nmethod was
912 // compiled by the JVMCI compiler.
913 JVMCINMethodData* jvmci_nmethod_data() const {
914 return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin();
915 }
916 #endif
917
918 void oops_do(OopClosure* f) { oops_do(f, false); }
919 void oops_do(OopClosure* f, bool allow_dead);
920
921 // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
922 // nmethod.
923 bool oops_do_try_claim();
924
925 // Loom support for following nmethods on the stack
926 void follow_nmethod(OopIterateClosure* cl);
927
928 // Class containing callbacks for the oops_do_process_weak/strong() methods
929 // below.
930 class OopsDoProcessor {
931 public:
932 // Process the oops of the given nmethod based on whether it has been called
933 // in a weak or strong processing context, i.e. apply either weak or strong
934 // work on it.
935 virtual void do_regular_processing(nmethod* nm) = 0;
936 // Assuming that the oops of the given nmethod has already been its weak
937 // processing applied, apply the remaining strong processing part.
938 virtual void do_remaining_strong_processing(nmethod* nm) = 0;
939 };
940
941 // The following two methods do the work corresponding to weak/strong nmethod
942 // processing.
943 void oops_do_process_weak(OopsDoProcessor* p);
944 void oops_do_process_strong(OopsDoProcessor* p);
945
946 static void oops_do_marking_prologue();
947 static void oops_do_marking_epilogue();
948
949 private:
950 ScopeDesc* scope_desc_in(address begin, address end);
951
952 address* orig_pc_addr(const frame* fr);
953
954 // used by jvmti to track if the load events has been reported
955 bool load_reported() const { return _load_reported; }
956 void set_load_reported() { _load_reported = true; }
957
958 public:
959 // ScopeDesc retrieval operation
960 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
961 // pc_desc_near returns the first PcDesc at or after the given pc.
962 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
963
964 // ScopeDesc for an instruction
965 ScopeDesc* scope_desc_at(address pc);
966 ScopeDesc* scope_desc_near(address pc);
967
968 // copying of debugging information
969 void copy_scopes_pcs(PcDesc* pcs, int count);
970 void copy_scopes_data(address buffer, int size);
971
972 int orig_pc_offset() { return _orig_pc_offset; }
973
974 // Post successful compilation
975 void post_compiled_method(CompileTask* task);
976
977 // jvmti support:
978 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
979
980 // verify operations
981 void verify();
982 void verify_scopes();
983 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
984
985 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
986 void decode2(outputStream* st) const;
987 void print_constant_pool(outputStream* st);
988
989 // Avoid hiding of parent's 'decode(outputStream*)' method.
990 void decode(outputStream* st) const { decode2(st); } // just delegate here.
991
992 // printing support
993 void print_on_impl(outputStream* st) const;
994 void print_code();
995 void print_value_on_impl(outputStream* st) const;
996
997 #if defined(SUPPORT_DATA_STRUCTS)
998 // print output in opt build for disassembler library
999 void print_relocations() PRODUCT_RETURN;
1000 void print_pcs_on(outputStream* st);
1001 void print_scopes() { print_scopes_on(tty); }
1002 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1003 void print_handler_table();
1004 void print_nul_chk_table();
1005 void print_recorded_oop(int log_n, int index);
1006 void print_recorded_oops();
1007 void print_recorded_metadata();
1008
1009 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1010 void print_metadata(outputStream* st); // metadata in metadata pool.
1011 #else
1012 void print_pcs_on(outputStream* st) { return; }
1013 #endif
1014
1015 void print_calls(outputStream* st) PRODUCT_RETURN;
1016 static void print_statistics() PRODUCT_RETURN;
1017
1018 void maybe_print_nmethod(const DirectiveSet* directive);
1019 void print_nmethod(bool print_code);
1020
1021 void print_on_with_msg(outputStream* st, const char* msg) const;
1022
1023 // Logging
1024 void log_identity(xmlStream* log) const;
1025 void log_new_nmethod() const;
1026 void log_state_change(InvalidationReason invalidation_reason) const;
1027
1028 // Prints block-level comments, including nmethod specific block labels:
1029 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1030 const char* nmethod_section_label(address pos) const;
1031
1032 // returns whether this nmethod has code comments.
1033 bool has_code_comment(address begin, address end);
1034 // Prints a comment for one native instruction (reloc info, pc desc)
1035 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1036
1037 // tells if this compiled method is dependent on the given changes,
1038 // and the changes have invalidated it
1039 bool check_dependency_on(DepChange& changes);
1040
1041 // Fast breakpoint support. Tells if this compiled method is
1042 // dependent on the given method. Returns true if this nmethod
1043 // corresponds to the given method as well.
1044 bool is_dependent_on_method(Method* dependee);
1045
1046 // JVMTI's GetLocalInstance() support
1047 ByteSize native_receiver_sp_offset() {
1048 assert(is_native_method(), "sanity");
1049 return _native_receiver_sp_offset;
1050 }
1051 ByteSize native_basic_lock_sp_offset() {
1052 assert(is_native_method(), "sanity");
1053 return _native_basic_lock_sp_offset;
1054 }
1055
1056 // support for code generation
1057 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1058 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1059
1060 void metadata_do(MetadataClosure* f);
1061
1062 address call_instruction_address(address pc) const;
1063
1064 void make_deoptimized();
1065 void finalize_relocations();
1066
1067 class Vptr : public CodeBlob::Vptr {
1068 void print_on(const CodeBlob* instance, outputStream* st) const override {
1069 ttyLocker ttyl;
1070 instance->as_nmethod()->print_on_impl(st);
1071 }
1072 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1073 instance->as_nmethod()->print_value_on_impl(st);
1074 }
1075 };
1076
1077 static const Vptr _vpntr;
1078 };
1079
1080 #endif // SHARE_CODE_NMETHOD_HPP