1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
68 inline address handler_at(int index);
69 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
70
71 inline int count();
72 // increment_count is only called under lock, but there may be concurrent readers.
73 void increment_count();
74
75 public:
76
77 ExceptionCache(Handle exception, address pc, address handler);
78
79 Klass* exception_type() { return _exception_type; }
80 ExceptionCache* next();
81 void set_next(ExceptionCache *ec);
82 ExceptionCache* purge_list_next() { return _purge_list_next; }
83 void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
84
85 address match(Handle exception, address pc);
86 bool match_exception_with_space(Handle exception) ;
87 address test_address(address addr);
88 bool add_address_and_handler(address addr, address handler) ;
89 };
90
91 // cache pc descs found in earlier inquiries
92 class PcDescCache {
93 friend class VMStructs;
94 private:
95 enum { cache_size = 4 };
96 // The array elements MUST be volatile! Several threads may modify
97 // and read from the cache concurrently. find_pc_desc_internal has
98 // returned wrong results. C++ compiler (namely xlC12) may duplicate
99 // C++ field accesses if the elements are not volatile.
100 typedef PcDesc* PcDescPtr;
101 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
102 public:
103 PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); }
104 void init_to(PcDesc* initial_pc_desc);
105 PcDesc* find_pc_desc(int pc_offset, bool approximate);
106 void add_pc_desc(PcDesc* pc_desc);
107 PcDesc* last_pc_desc() { return _pc_descs[0]; }
108 };
109
110 class PcDescContainer : public CHeapObj<mtCode> {
111 private:
112 PcDescCache _pc_desc_cache;
113 public:
114 PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
115
116 PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
117 PcDesc* lower, PcDesc* upper);
118
119 PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
120 #ifdef PRODUCT
121 {
122 PcDesc* desc = _pc_desc_cache.last_pc_desc();
123 assert(desc != nullptr, "PcDesc cache should be initialized already");
124 if (desc->pc_offset() == (pc - code_begin)) {
125 // Cached value matched
126 return desc;
127 }
128 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
129 }
130 #endif
131 ;
132 };
133
134 // nmethods (native methods) are the compiled code versions of Java methods.
135 //
136 // An nmethod contains:
137 // - Header (the nmethod structure)
138 // - Constant part (doubles, longs and floats used in nmethod)
139 // - Code part:
140 // - Code body
141 // - Exception handler
142 // - Stub code
143 // - OOP table
144 //
145 // As a CodeBlob, an nmethod references [mutable data] allocated on the C heap:
146 // - CodeBlob relocation data
147 // - Metainfo
148 // - JVMCI data
149 //
150 // An nmethod references [immutable data] allocated on C heap:
151 // - Dependency assertions data
152 // - Implicit null table array
153 // - Handler entry point array
154 // - Debugging information:
155 // - Scopes data array
156 // - Scopes pcs array
157 // - JVMCI speculations array
158
159 #if INCLUDE_JVMCI
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 private:
172
173 // Used to track in which deoptimize handshake this method will be deoptimized.
174 uint64_t _deoptimization_generation;
175
176 uint64_t _gc_epoch;
177
178 Method* _method;
179
180 // To reduce header size union fields which usages do not overlap.
181 union {
182 // To support simple linked-list chaining of nmethods:
183 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
184 struct {
185 // These are used for compiled synchronized native methods to
186 // locate the owner and stack slot for the BasicLock. They are
187 // needed because there is no debug information for compiled native
188 // wrappers and the oop maps are insufficient to allow
189 // frame::retrieve_receiver() to work. Currently they are expected
190 // to be byte offsets from the Java stack pointer for maximum code
191 // sharing between platforms. JVMTI's GetLocalInstance() uses these
192 // offsets to find the receiver for non-static native wrapper frames.
193 ByteSize _native_receiver_sp_offset;
194 ByteSize _native_basic_lock_sp_offset;
195 };
196 };
197
198 // nmethod's read-only data
199 address _immutable_data;
200
201 PcDescContainer* _pc_desc_container;
202 ExceptionCache* volatile _exception_cache;
203
204 void* _gc_data;
205
206 struct oops_do_mark_link; // Opaque data type.
207 static nmethod* volatile _oops_do_mark_nmethods;
208 oops_do_mark_link* volatile _oops_do_mark_link;
209
210 CompiledICData* _compiled_ic_data;
211
212 // offsets for entry points
213 address _osr_entry_point; // entry point for on stack replacement
214 uint16_t _entry_offset; // entry point with class check
215 uint16_t _verified_entry_offset; // entry point without class check
216 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
217 int _immutable_data_size;
218
219 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
220
221 int _skipped_instructions_size;
222
223 int _stub_offset;
224
225 // Offsets for different stubs section parts
226 int _exception_offset;
227 // All deoptee's will resume execution at this location described by
228 // this offset.
229 int _deopt_handler_offset;
230 // All deoptee's at a MethodHandle call site will resume execution
231 // at this location described by this offset.
232 int _deopt_mh_handler_offset;
233 // Offset (from insts_end) of the unwind handler if it exists
234 int16_t _unwind_handler_offset;
235 // Number of arguments passed on the stack
236 uint16_t _num_stack_arg_slots;
237
238 uint16_t _oops_size;
239 #if INCLUDE_JVMCI
240 // _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as:
241 // _metadata_size = mutable_data_size - relocation_size
242 uint16_t _metadata_size;
243 #endif
244
245 // Offset in immutable data section
246 // _dependencies_offset == 0
247 uint16_t _nul_chk_table_offset;
248 uint16_t _handler_table_offset; // This table could be big in C1 code
249 int _scopes_pcs_offset;
250 int _scopes_data_offset;
251 #if INCLUDE_JVMCI
252 int _speculations_offset;
253 #endif
254
255 // location in frame (offset for sp) that deopt can store the original
256 // pc during a deopt.
257 int _orig_pc_offset;
258
259 int _compile_id; // which compilation made this nmethod
260 CompLevel _comp_level; // compilation level (s1)
261 CompilerType _compiler_type; // which compiler made this nmethod (u1)
262
263 // Local state used to keep track of whether unloading is happening or not
264 volatile uint8_t _is_unloading_state;
265
266 // Protected by NMethodState_lock
267 volatile signed char _state; // {not_installed, in_use, not_entrant}
268
269 // set during construction
270 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
271 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
272 _has_wide_vectors:1, // Preserve wide vectors at safepoints
273 _has_monitors:1, // Fastpath monitor detection for continuations
274 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
275 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
276 _is_unlinked:1, // mark during class unloading
277 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
278
279 enum DeoptimizationStatus : u1 {
280 not_marked,
281 deoptimize,
282 deoptimize_noupdate,
283 deoptimize_done
284 };
285
286 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
287
288 DeoptimizationStatus deoptimization_status() const {
289 return Atomic::load(&_deoptimization_status);
290 }
291
292 // Initialize fields to their default values
293 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
294
295 // Post initialization
296 void post_init();
297
298 // For native wrappers
299 nmethod(Method* method,
300 CompilerType type,
301 int nmethod_size,
302 int compile_id,
303 CodeOffsets* offsets,
304 CodeBuffer *code_buffer,
305 int frame_size,
306 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
307 ByteSize basic_lock_sp_offset, /* synchronized natives only */
308 OopMapSet* oop_maps,
309 int mutable_data_size);
310
311 // For normal JIT compiled code
312 nmethod(Method* method,
313 CompilerType type,
314 int nmethod_size,
315 int immutable_data_size,
316 int mutable_data_size,
317 int compile_id,
318 int entry_bci,
319 address immutable_data,
320 CodeOffsets* offsets,
321 int orig_pc_offset,
322 DebugInformationRecorder *recorder,
323 Dependencies* dependencies,
324 CodeBuffer *code_buffer,
325 int frame_size,
326 OopMapSet* oop_maps,
327 ExceptionHandlerTable* handler_table,
328 ImplicitExceptionTable* nul_chk_table,
329 AbstractCompiler* compiler,
330 CompLevel comp_level
331 #if INCLUDE_JVMCI
332 , char* speculations = nullptr,
333 int speculations_len = 0,
334 JVMCINMethodData* jvmci_data = nullptr
335 #endif
336 );
337
338 // helper methods
339 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
340
341 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
342 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
343 // findable by nmethod iterators! In particular, they must not contain oops!
344 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
345
346 const char* reloc_string_for(u_char* begin, u_char* end);
347
348 bool try_transition(signed char new_state);
349
350 // Returns true if this thread changed the state of the nmethod or
351 // false if another thread performed the transition.
352 bool make_entrant() { Unimplemented(); return false; }
353 void inc_decompile_count();
354
355 // Inform external interfaces that a compiled method has been unloaded
356 void post_compiled_method_unload();
357
358 PcDesc* find_pc_desc(address pc, bool approximate) {
359 if (_pc_desc_container == nullptr) return nullptr; // native method
360 return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
361 }
362
363 // STW two-phase nmethod root processing helpers.
364 //
365 // When determining liveness of a given nmethod to do code cache unloading,
366 // some collectors need to do different things depending on whether the nmethods
367 // need to absolutely be kept alive during root processing; "strong"ly reachable
368 // nmethods are known to be kept alive at root processing, but the liveness of
369 // "weak"ly reachable ones is to be determined later.
370 //
371 // We want to allow strong and weak processing of nmethods by different threads
372 // at the same time without heavy synchronization. Additional constraints are
373 // to make sure that every nmethod is processed a minimal amount of time, and
374 // nmethods themselves are always iterated at most once at a particular time.
375 //
376 // Note that strong processing work must be a superset of weak processing work
377 // for this code to work.
378 //
379 // We store state and claim information in the _oops_do_mark_link member, using
380 // the two LSBs for the state and the remaining upper bits for linking together
381 // nmethods that were already visited.
382 // The last element is self-looped, i.e. points to itself to avoid some special
383 // "end-of-list" sentinel value.
384 //
385 // _oops_do_mark_link special values:
386 //
387 // _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e.
388 // is Unclaimed.
389 //
390 // For other values, its lowest two bits indicate the following states of the nmethod:
391 //
392 // weak_request (WR): the nmethod has been claimed by a thread for weak processing
393 // weak_done (WD): weak processing has been completed for this nmethod.
394 // strong_request (SR): the nmethod has been found to need strong processing while
395 // being weak processed.
396 // strong_done (SD): strong processing has been completed for this nmethod .
397 //
398 // The following shows the _only_ possible progressions of the _oops_do_mark_link
399 // pointer.
400 //
401 // Given
402 // N as the nmethod
403 // X the current next value of _oops_do_mark_link
404 //
405 // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
406 // a single thread.
407 // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
408 // completed (as above) another thread found that the nmethod needs strong
409 // processing after all.
410 // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
411 // thread finds that the nmethod needs strong processing, marks it as such and
412 // terminates. The original thread completes strong processing.
413 // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
414 // the beginning by a single thread.
415 //
416 // "|" describes the concatenation of bits in _oops_do_mark_link.
417 //
418 // The diagram also describes the threads responsible for changing the nmethod to
419 // the next state by marking the _transition_ with (C) and (O), which mean "current"
420 // and "other" thread respectively.
421 //
422
423 // States used for claiming nmethods during root processing.
424 static const uint claim_weak_request_tag = 0;
425 static const uint claim_weak_done_tag = 1;
426 static const uint claim_strong_request_tag = 2;
427 static const uint claim_strong_done_tag = 3;
428
429 static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
430 assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
431 assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
432 return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
433 }
434
435 static uint extract_state(oops_do_mark_link* link) {
436 return (uint)((uintptr_t)link & 0x3);
437 }
438
439 static nmethod* extract_nmethod(oops_do_mark_link* link) {
440 return (nmethod*)((uintptr_t)link & ~0x3);
441 }
442
443 void oops_do_log_change(const char* state);
444
445 static bool oops_do_has_weak_request(oops_do_mark_link* next) {
446 return extract_state(next) == claim_weak_request_tag;
447 }
448
449 static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
450 return extract_state(next) >= claim_strong_request_tag;
451 }
452
453 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
454 bool oops_do_try_claim_weak_request();
455
456 // Attempt Unclaimed -> N|SD transition. Returns the current link.
457 oops_do_mark_link* oops_do_try_claim_strong_done();
458 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
459 nmethod* oops_do_try_add_to_list_as_weak_done();
460
461 // Attempt X|WD -> N|SR transition. Returns the current link.
462 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
463 // Attempt X|WD -> X|SD transition. Returns true if successful.
464 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
465
466 // Do the N|SD -> X|SD transition.
467 void oops_do_add_to_list_as_strong_done();
468
469 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
470 // transitions).
471 void oops_do_set_strong_done(nmethod* old_head);
472
473 public:
474 enum class ChangeReason : u1 {
475 C1_codepatch,
476 C1_deoptimize,
477 C1_deoptimize_for_patching,
478 C1_predicate_failed_trap,
479 CI_replay,
480 JVMCI_invalidate_nmethod,
481 JVMCI_invalidate_nmethod_mirror,
482 JVMCI_materialize_virtual_object,
483 JVMCI_new_installation,
484 JVMCI_register_method,
485 JVMCI_replacing_with_new_code,
486 JVMCI_reprofile,
487 marked_for_deoptimization,
488 missing_exception_handler,
489 not_used,
490 OSR_invalidation_back_branch,
491 OSR_invalidation_for_compiling_with_C1,
492 OSR_invalidation_of_lower_level,
493 set_native_function,
494 uncommon_trap,
495 whitebox_deoptimization,
496 zombie,
497 };
498
499
500 static const char* change_reason_to_string(ChangeReason change_reason) {
501 switch (change_reason) {
502 case ChangeReason::C1_codepatch:
503 return "C1 code patch";
504 case ChangeReason::C1_deoptimize:
505 return "C1 deoptimized";
506 case ChangeReason::C1_deoptimize_for_patching:
507 return "C1 deoptimize for patching";
508 case ChangeReason::C1_predicate_failed_trap:
509 return "C1 predicate failed trap";
510 case ChangeReason::CI_replay:
511 return "CI replay";
512 case ChangeReason::JVMCI_invalidate_nmethod:
513 return "JVMCI invalidate nmethod";
514 case ChangeReason::JVMCI_invalidate_nmethod_mirror:
515 return "JVMCI invalidate nmethod mirror";
516 case ChangeReason::JVMCI_materialize_virtual_object:
517 return "JVMCI materialize virtual object";
518 case ChangeReason::JVMCI_new_installation:
519 return "JVMCI new installation";
520 case ChangeReason::JVMCI_register_method:
521 return "JVMCI register method";
522 case ChangeReason::JVMCI_replacing_with_new_code:
523 return "JVMCI replacing with new code";
524 case ChangeReason::JVMCI_reprofile:
525 return "JVMCI reprofile";
526 case ChangeReason::marked_for_deoptimization:
527 return "marked for deoptimization";
528 case ChangeReason::missing_exception_handler:
529 return "missing exception handler";
530 case ChangeReason::not_used:
531 return "not used";
532 case ChangeReason::OSR_invalidation_back_branch:
533 return "OSR invalidation back branch";
534 case ChangeReason::OSR_invalidation_for_compiling_with_C1:
535 return "OSR invalidation for compiling with C1";
536 case ChangeReason::OSR_invalidation_of_lower_level:
537 return "OSR invalidation of lower level";
538 case ChangeReason::set_native_function:
539 return "set native function";
540 case ChangeReason::uncommon_trap:
541 return "uncommon trap";
542 case ChangeReason::whitebox_deoptimization:
543 return "whitebox deoptimization";
544 case ChangeReason::zombie:
545 return "zombie";
546 default: {
547 assert(false, "Unhandled reason");
548 return "Unknown";
549 }
550 }
551 }
552
553 // create nmethod with entry_bci
554 static nmethod* new_nmethod(const methodHandle& method,
555 int compile_id,
556 int entry_bci,
557 CodeOffsets* offsets,
558 int orig_pc_offset,
559 DebugInformationRecorder* recorder,
560 Dependencies* dependencies,
561 CodeBuffer *code_buffer,
562 int frame_size,
563 OopMapSet* oop_maps,
564 ExceptionHandlerTable* handler_table,
565 ImplicitExceptionTable* nul_chk_table,
566 AbstractCompiler* compiler,
567 CompLevel comp_level
568 #if INCLUDE_JVMCI
569 , char* speculations = nullptr,
570 int speculations_len = 0,
571 JVMCINMethodData* jvmci_data = nullptr
572 #endif
573 );
574
575 static nmethod* new_native_nmethod(const methodHandle& method,
576 int compile_id,
577 CodeBuffer *code_buffer,
578 int vep_offset,
579 int frame_complete,
580 int frame_size,
581 ByteSize receiver_sp_offset,
582 ByteSize basic_lock_sp_offset,
583 OopMapSet* oop_maps,
584 int exception_handler = -1);
585
586 Method* method () const { return _method; }
587 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
588 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
589 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
590
591 // Compiler task identification. Note that all OSR methods
592 // are numbered in an independent sequence if CICountOSR is true,
593 // and native method wrappers are also numbered independently if
594 // CICountNative is true.
595 int compile_id() const { return _compile_id; }
596 const char* compile_kind() const;
597
598 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
599 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
600 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
601 CompilerType compiler_type () const { return _compiler_type; }
602 const char* compiler_name () const;
603
604 // boundaries for different parts
605 address consts_begin () const { return content_begin(); }
606 address consts_end () const { return code_begin() ; }
607 address insts_begin () const { return code_begin() ; }
608 address insts_end () const { return header_begin() + _stub_offset ; }
609 address stub_begin () const { return header_begin() + _stub_offset ; }
610 address stub_end () const { return code_end() ; }
611 address exception_begin () const { return header_begin() + _exception_offset ; }
612 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
613 address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
614 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
615 oop* oops_begin () const { return (oop*) data_begin(); }
616 oop* oops_end () const { return (oop*) data_end(); }
617
618 // mutable data
619 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
620 #if INCLUDE_JVMCI
621 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
622 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
623 address jvmci_data_end () const { return mutable_data_end(); }
624 #else
625 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
626 #endif
627
628 // immutable data
629 address immutable_data_begin () const { return _immutable_data; }
630 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
631 address dependencies_begin () const { return _immutable_data; }
632 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
633 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
634 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
635 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
636 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
637 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
638 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
639 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
640
641 #if INCLUDE_JVMCI
642 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
643 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
644 address speculations_end () const { return immutable_data_end(); }
645 #else
646 address scopes_data_end () const { return immutable_data_end(); }
647 #endif
648
649 // Sizes
650 int immutable_data_size() const { return _immutable_data_size; }
651 int consts_size () const { return int( consts_end () - consts_begin ()); }
652 int insts_size () const { return int( insts_end () - insts_begin ()); }
653 int stub_size () const { return int( stub_end () - stub_begin ()); }
654 int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
655 int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
656 int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); }
657 int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); }
658 int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
659 int handler_table_size () const { return int( handler_table_end() - handler_table_begin()); }
660 int nul_chk_table_size () const { return int( nul_chk_table_end() - nul_chk_table_begin()); }
661 #if INCLUDE_JVMCI
662 int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
663 int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
664 #endif
665
666 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
667 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
668
669 int skipped_instructions_size () const { return _skipped_instructions_size; }
670 int total_size() const;
671
672 // Containment
673 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
674 // Returns true if a given address is in the 'insts' section. The method
675 // insts_contains_inclusive() is end-inclusive.
676 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
677 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
678 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
679 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
680 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
681 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
682 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
683 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
684 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
685
686 // entry points
687 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
688 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
689
690 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
691 // allowed to advance state
692 in_use = 0, // executable nmethod
693 not_entrant = 1 // marked for deoptimization but activations may still exist
694 };
695
696 // flag accessing and manipulation
697 bool is_not_installed() const { return _state == not_installed; }
698 bool is_in_use() const { return _state <= in_use; }
699 bool is_not_entrant() const { return _state == not_entrant; }
700 int get_state() const { return _state; }
701
702 void clear_unloading_state();
703 // Heuristically deduce an nmethod isn't worth keeping around
704 bool is_cold();
705 bool is_unloading();
706 void do_unloading(bool unloading_occurred);
707
708 bool make_in_use() {
709 return try_transition(in_use);
710 }
711 // Make the nmethod non entrant. The nmethod will continue to be
712 // alive. It is used when an uncommon trap happens. Returns true
713 // if this thread changed the state of the nmethod or false if
714 // another thread performed the transition.
715 bool make_not_entrant(ChangeReason change_reason);
716 bool make_not_used() { return make_not_entrant(ChangeReason::not_used); }
717
718 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
719 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
720 void set_deoptimized_done();
721
722 bool update_recompile_counts() const {
723 // Update recompile counts when either the update is explicitly requested (deoptimize)
724 // or the nmethod is not marked for deoptimization at all (not_marked).
725 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
726 DeoptimizationStatus status = deoptimization_status();
727 return status != deoptimize_noupdate && status != deoptimize_done;
728 }
729
730 // tells whether frames described by this nmethod can be deoptimized
731 // note: native wrappers cannot be deoptimized.
732 bool can_be_deoptimized() const { return is_java_method(); }
733
734 bool has_dependencies() { return dependencies_size() != 0; }
735 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
736 void flush_dependencies();
737
738 template<typename T>
739 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
740 template<typename T>
741 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
742
743 bool has_unsafe_access() const { return _has_unsafe_access; }
744 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
745
746 bool has_monitors() const { return _has_monitors; }
747 void set_has_monitors(bool z) { _has_monitors = z; }
748
749 bool has_scoped_access() const { return _has_scoped_access; }
750 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
751
752 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
753 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
754
755 bool has_wide_vectors() const { return _has_wide_vectors; }
756 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
757
758 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
759 void set_has_flushed_dependencies(bool z) {
760 assert(!has_flushed_dependencies(), "should only happen once");
761 _has_flushed_dependencies = z;
762 }
763
764 bool is_unlinked() const { return _is_unlinked; }
765 void set_is_unlinked() {
766 assert(!_is_unlinked, "already unlinked");
767 _is_unlinked = true;
768 }
769
770 int comp_level() const { return _comp_level; }
771
772 // Support for oops in scopes and relocs:
773 // Note: index 0 is reserved for null.
774 oop oop_at(int index) const;
775 oop oop_at_phantom(int index) const; // phantom reference
776 oop* oop_addr_at(int index) const { // for GC
777 // relocation indexes are biased by 1 (because 0 is reserved)
778 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
779 return &oops_begin()[index - 1];
780 }
781
782 // Support for meta data in scopes and relocs:
783 // Note: index 0 is reserved for null.
784 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
785 Metadata** metadata_addr_at(int index) const { // for GC
786 // relocation indexes are biased by 1 (because 0 is reserved)
787 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
788 return &metadata_begin()[index - 1];
789 }
790
791 void copy_values(GrowableArray<jobject>* oops);
792 void copy_values(GrowableArray<Metadata*>* metadata);
793 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
794
795 // Relocation support
796 private:
797 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
798 inline void initialize_immediate_oop(oop* dest, jobject handle);
799
800 protected:
801 address oops_reloc_begin() const;
802
803 public:
804 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
805 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
806
807 bool is_at_poll_return(address pc);
808 bool is_at_poll_or_poll_return(address pc);
809
810 protected:
811 // Exception cache support
812 // Note: _exception_cache may be read and cleaned concurrently.
813 ExceptionCache* exception_cache() const { return _exception_cache; }
814 ExceptionCache* exception_cache_acquire() const;
815
816 public:
817 address handler_for_exception_and_pc(Handle exception, address pc);
818 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
819 void clean_exception_cache();
820
821 void add_exception_cache_entry(ExceptionCache* new_entry);
822 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
823
824
825 // MethodHandle
826 bool is_method_handle_return(address return_pc);
827 // Deopt
828 // Return true is the PC is one would expect if the frame is being deopted.
829 inline bool is_deopt_pc(address pc);
830 inline bool is_deopt_mh_entry(address pc);
831 inline bool is_deopt_entry(address pc);
832
833 // Accessor/mutator for the original pc of a frame before a frame was deopted.
834 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
835 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
836
837 const char* state() const;
838
839 bool inlinecache_check_contains(address addr) const {
840 return (addr >= code_begin() && addr < verified_entry_point());
841 }
842
843 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
844
845 // implicit exceptions support
846 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
847 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
848
849 // Inline cache support for class unloading and nmethod unloading
850 private:
851 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
852
853 address continuation_for_implicit_exception(address pc, bool for_div0_check);
854
855 public:
856 // Serial version used by whitebox test
857 void cleanup_inline_caches_whitebox();
858
859 void clear_inline_caches();
860
861 // Execute nmethod barrier code, as if entering through nmethod call.
862 void run_nmethod_entry_barrier();
863
864 void verify_oop_relocations();
865
866 bool has_evol_metadata();
867
868 Method* attached_method(address call_pc);
869 Method* attached_method_before_pc(address pc);
870
871 // GC unloading support
872 // Cleans unloaded klasses and unloaded nmethods in inline caches
873
874 void unload_nmethod_caches(bool class_unloading_occurred);
875
876 void unlink_from_method();
877
878 // On-stack replacement support
879 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
880 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
881 nmethod* osr_link() const { return _osr_link; }
882 void set_osr_link(nmethod *n) { _osr_link = n; }
883 void invalidate_osr_method();
884
885 int num_stack_arg_slots(bool rounded = true) const {
886 return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
887 }
888
889 // Verify calls to dead methods have been cleaned.
890 void verify_clean_inline_caches();
891
892 // Unlink this nmethod from the system
893 void unlink();
894
895 // Deallocate this nmethod - called by the GC
896 void purge(bool unregister_nmethod);
897
898 // See comment at definition of _last_seen_on_stack
899 void mark_as_maybe_on_stack();
900 bool is_maybe_on_stack();
901
902 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
903 void set_method(Method* method) { _method = method; }
904
905 #if INCLUDE_JVMCI
906 // Gets the JVMCI name of this nmethod.
907 const char* jvmci_name();
908
909 // Records the pending failed speculation in the
910 // JVMCI speculation log associated with this nmethod.
911 void update_speculation(JavaThread* thread);
912
913 // Gets the data specific to a JVMCI compiled method.
914 // This returns a non-nullptr value iff this nmethod was
915 // compiled by the JVMCI compiler.
916 JVMCINMethodData* jvmci_nmethod_data() const {
917 return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin();
918 }
919 #endif
920
921 void oops_do(OopClosure* f) { oops_do(f, false); }
922 void oops_do(OopClosure* f, bool allow_dead);
923
924 // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
925 // nmethod.
926 bool oops_do_try_claim();
927
928 // Loom support for following nmethods on the stack
929 void follow_nmethod(OopIterateClosure* cl);
930
931 // Class containing callbacks for the oops_do_process_weak/strong() methods
932 // below.
933 class OopsDoProcessor {
934 public:
935 // Process the oops of the given nmethod based on whether it has been called
936 // in a weak or strong processing context, i.e. apply either weak or strong
937 // work on it.
938 virtual void do_regular_processing(nmethod* nm) = 0;
939 // Assuming that the oops of the given nmethod has already been its weak
940 // processing applied, apply the remaining strong processing part.
941 virtual void do_remaining_strong_processing(nmethod* nm) = 0;
942 };
943
944 // The following two methods do the work corresponding to weak/strong nmethod
945 // processing.
946 void oops_do_process_weak(OopsDoProcessor* p);
947 void oops_do_process_strong(OopsDoProcessor* p);
948
949 static void oops_do_marking_prologue();
950 static void oops_do_marking_epilogue();
951
952 private:
953 ScopeDesc* scope_desc_in(address begin, address end);
954
955 address* orig_pc_addr(const frame* fr);
956
957 // used by jvmti to track if the load events has been reported
958 bool load_reported() const { return _load_reported; }
959 void set_load_reported() { _load_reported = true; }
960
961 public:
962 // ScopeDesc retrieval operation
963 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
964 // pc_desc_near returns the first PcDesc at or after the given pc.
965 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
966
967 // ScopeDesc for an instruction
968 ScopeDesc* scope_desc_at(address pc);
969 ScopeDesc* scope_desc_near(address pc);
970
971 // copying of debugging information
972 void copy_scopes_pcs(PcDesc* pcs, int count);
973 void copy_scopes_data(address buffer, int size);
974
975 int orig_pc_offset() { return _orig_pc_offset; }
976
977 // Post successful compilation
978 void post_compiled_method(CompileTask* task);
979
980 // jvmti support:
981 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
982
983 // verify operations
984 void verify();
985 void verify_scopes();
986 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
987
988 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
989 void decode2(outputStream* st) const;
990 void print_constant_pool(outputStream* st);
991
992 // Avoid hiding of parent's 'decode(outputStream*)' method.
993 void decode(outputStream* st) const { decode2(st); } // just delegate here.
994
995 // printing support
996 void print_on_impl(outputStream* st) const;
997 void print_code();
998 void print_value_on_impl(outputStream* st) const;
999
1000 #if defined(SUPPORT_DATA_STRUCTS)
1001 // print output in opt build for disassembler library
1002 void print_relocations() PRODUCT_RETURN;
1003 void print_pcs_on(outputStream* st);
1004 void print_scopes() { print_scopes_on(tty); }
1005 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1006 void print_handler_table();
1007 void print_nul_chk_table();
1008 void print_recorded_oop(int log_n, int index);
1009 void print_recorded_oops();
1010 void print_recorded_metadata();
1011
1012 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1013 void print_metadata(outputStream* st); // metadata in metadata pool.
1014 #else
1015 void print_pcs_on(outputStream* st) { return; }
1016 #endif
1017
1018 void print_calls(outputStream* st) PRODUCT_RETURN;
1019 static void print_statistics() PRODUCT_RETURN;
1020
1021 void maybe_print_nmethod(const DirectiveSet* directive);
1022 void print_nmethod(bool print_code);
1023
1024 void print_on_with_msg(outputStream* st, const char* msg) const;
1025
1026 // Logging
1027 void log_identity(xmlStream* log) const;
1028 void log_new_nmethod() const;
1029 void log_state_change(ChangeReason change_reason) const;
1030
1031 // Prints block-level comments, including nmethod specific block labels:
1032 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1033 const char* nmethod_section_label(address pos) const;
1034
1035 // returns whether this nmethod has code comments.
1036 bool has_code_comment(address begin, address end);
1037 // Prints a comment for one native instruction (reloc info, pc desc)
1038 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1039
1040 // tells if this compiled method is dependent on the given changes,
1041 // and the changes have invalidated it
1042 bool check_dependency_on(DepChange& changes);
1043
1044 // Fast breakpoint support. Tells if this compiled method is
1045 // dependent on the given method. Returns true if this nmethod
1046 // corresponds to the given method as well.
1047 bool is_dependent_on_method(Method* dependee);
1048
1049 // JVMTI's GetLocalInstance() support
1050 ByteSize native_receiver_sp_offset() {
1051 assert(is_native_method(), "sanity");
1052 return _native_receiver_sp_offset;
1053 }
1054 ByteSize native_basic_lock_sp_offset() {
1055 assert(is_native_method(), "sanity");
1056 return _native_basic_lock_sp_offset;
1057 }
1058
1059 // support for code generation
1060 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1061 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1062
1063 void metadata_do(MetadataClosure* f);
1064
1065 address call_instruction_address(address pc) const;
1066
1067 void make_deoptimized();
1068 void finalize_relocations();
1069
1070 class Vptr : public CodeBlob::Vptr {
1071 void print_on(const CodeBlob* instance, outputStream* st) const override {
1072 ttyLocker ttyl;
1073 instance->as_nmethod()->print_on_impl(st);
1074 }
1075 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1076 instance->as_nmethod()->print_value_on_impl(st);
1077 }
1078 };
1079
1080 static const Vptr _vpntr;
1081 };
1082
1083 #endif // SHARE_CODE_NMETHOD_HPP