1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_CODE_NMETHOD_HPP 26 #define SHARE_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 #include "compiler/compilerDefinitions.hpp" 30 31 class CompileTask; 32 class DepChange; 33 class DirectiveSet; 34 class DebugInformationRecorder; 35 class JvmtiThreadState; 36 class OopIterateClosure; 37 38 // nmethods (native methods) are the compiled code versions of Java methods. 39 // 40 // An nmethod contains: 41 // - header (the nmethod structure) 42 // [Relocation] 43 // - relocation information 44 // - constant part (doubles, longs and floats used in nmethod) 45 // - oop table 46 // [Code] 47 // - code body 48 // - exception handler 49 // - stub code 50 // [Debugging information] 51 // - oop array 52 // - data array 53 // - pcs 54 // [Exception handler table] 55 // - handler entry point array 56 // [Implicit Null Pointer exception table] 57 // - implicit null table array 58 // [Speculations] 59 // - encoded speculations array 60 // [JVMCINMethodData] 61 // - meta data for JVMCI compiled nmethod 62 63 #if INCLUDE_JVMCI 64 class FailedSpeculation; 65 class JVMCINMethodData; 66 #endif 67 68 class nmethod : public CompiledMethod { 69 friend class VMStructs; 70 friend class JVMCIVMStructs; 71 friend class CodeCache; // scavengable oops 72 friend class JVMCINMethodData; 73 74 private: 75 76 uint64_t _gc_epoch; 77 78 // To support simple linked-list chaining of nmethods: 79 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 80 81 // STW two-phase nmethod root processing helpers. 82 // 83 // When determining liveness of a given nmethod to do code cache unloading, 84 // some collectors need to do different things depending on whether the nmethods 85 // need to absolutely be kept alive during root processing; "strong"ly reachable 86 // nmethods are known to be kept alive at root processing, but the liveness of 87 // "weak"ly reachable ones is to be determined later. 88 // 89 // We want to allow strong and weak processing of nmethods by different threads 90 // at the same time without heavy synchronization. Additional constraints are 91 // to make sure that every nmethod is processed a minimal amount of time, and 92 // nmethods themselves are always iterated at most once at a particular time. 93 // 94 // Note that strong processing work must be a superset of weak processing work 95 // for this code to work. 96 // 97 // We store state and claim information in the _oops_do_mark_link member, using 98 // the two LSBs for the state and the remaining upper bits for linking together 99 // nmethods that were already visited. 100 // The last element is self-looped, i.e. points to itself to avoid some special 101 // "end-of-list" sentinel value. 102 // 103 // _oops_do_mark_link special values: 104 // 105 // _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e. 106 // is Unclaimed. 107 // 108 // For other values, its lowest two bits indicate the following states of the nmethod: 109 // 110 // weak_request (WR): the nmethod has been claimed by a thread for weak processing 111 // weak_done (WD): weak processing has been completed for this nmethod. 112 // strong_request (SR): the nmethod has been found to need strong processing while 113 // being weak processed. 114 // strong_done (SD): strong processing has been completed for this nmethod . 115 // 116 // The following shows the _only_ possible progressions of the _oops_do_mark_link 117 // pointer. 118 // 119 // Given 120 // N as the nmethod 121 // X the current next value of _oops_do_mark_link 122 // 123 // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by 124 // a single thread. 125 // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been 126 // completed (as above) another thread found that the nmethod needs strong 127 // processing after all. 128 // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another 129 // thread finds that the nmethod needs strong processing, marks it as such and 130 // terminates. The original thread completes strong processing. 131 // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from 132 // the beginning by a single thread. 133 // 134 // "|" describes the concatenation of bits in _oops_do_mark_link. 135 // 136 // The diagram also describes the threads responsible for changing the nmethod to 137 // the next state by marking the _transition_ with (C) and (O), which mean "current" 138 // and "other" thread respectively. 139 // 140 struct oops_do_mark_link; // Opaque data type. 141 142 // States used for claiming nmethods during root processing. 143 static const uint claim_weak_request_tag = 0; 144 static const uint claim_weak_done_tag = 1; 145 static const uint claim_strong_request_tag = 2; 146 static const uint claim_strong_done_tag = 3; 147 148 static oops_do_mark_link* mark_link(nmethod* nm, uint tag) { 149 assert(tag <= claim_strong_done_tag, "invalid tag %u", tag); 150 assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB"); 151 return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag); 152 } 153 154 static uint extract_state(oops_do_mark_link* link) { 155 return (uint)((uintptr_t)link & 0x3); 156 } 157 158 static nmethod* extract_nmethod(oops_do_mark_link* link) { 159 return (nmethod*)((uintptr_t)link & ~0x3); 160 } 161 162 void oops_do_log_change(const char* state); 163 164 static bool oops_do_has_weak_request(oops_do_mark_link* next) { 165 return extract_state(next) == claim_weak_request_tag; 166 } 167 168 static bool oops_do_has_any_strong_state(oops_do_mark_link* next) { 169 return extract_state(next) >= claim_strong_request_tag; 170 } 171 172 // Attempt Unclaimed -> N|WR transition. Returns true if successful. 173 bool oops_do_try_claim_weak_request(); 174 175 // Attempt Unclaimed -> N|SD transition. Returns the current link. 176 oops_do_mark_link* oops_do_try_claim_strong_done(); 177 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise. 178 nmethod* oops_do_try_add_to_list_as_weak_done(); 179 180 // Attempt X|WD -> N|SR transition. Returns the current link. 181 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next); 182 // Attempt X|WD -> X|SD transition. Returns true if successful. 183 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next); 184 185 // Do the N|SD -> X|SD transition. 186 void oops_do_add_to_list_as_strong_done(); 187 188 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD 189 // transitions). 190 void oops_do_set_strong_done(nmethod* old_head); 191 192 static nmethod* volatile _oops_do_mark_nmethods; 193 oops_do_mark_link* volatile _oops_do_mark_link; 194 195 // offsets for entry points 196 address _entry_point; // entry point with class check 197 address _verified_entry_point; // entry point without class check 198 address _inline_entry_point; // inline type entry point (unpack all inline type args) with class check 199 address _verified_inline_entry_point; // inline type entry point (unpack all inline type args) without class check 200 address _verified_inline_ro_entry_point; // inline type entry point (unpack receiver only) without class check 201 address _osr_entry_point; // entry point for on stack replacement 202 203 nmethod* _unlinked_next; 204 205 // Shared fields for all nmethod's 206 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 207 208 // Offsets for different nmethod parts 209 int _exception_offset; 210 // Offset of the unwind handler if it exists 211 int _unwind_handler_offset; 212 213 int _consts_offset; 214 int _stub_offset; 215 int _oops_offset; // offset to where embedded oop table begins (inside data) 216 int _metadata_offset; // embedded meta data table 217 int _scopes_data_offset; 218 int _scopes_pcs_offset; 219 int _dependencies_offset; 220 int _handler_table_offset; 221 int _nul_chk_table_offset; 222 #if INCLUDE_JVMCI 223 int _speculations_offset; 224 int _jvmci_data_offset; 225 #endif 226 int _nmethod_end_offset; 227 228 int code_offset() const { return int(code_begin() - header_begin()); } 229 230 // location in frame (offset for sp) that deopt can store the original 231 // pc during a deopt. 232 int _orig_pc_offset; 233 234 int _compile_id; // which compilation made this nmethod 235 236 #if INCLUDE_RTM_OPT 237 // RTM state at compile time. Used during deoptimization to decide 238 // whether to restart collecting RTM locking abort statistic again. 239 RTMState _rtm_state; 240 #endif 241 242 // These are used for compiled synchronized native methods to 243 // locate the owner and stack slot for the BasicLock. They are 244 // needed because there is no debug information for compiled native 245 // wrappers and the oop maps are insufficient to allow 246 // frame::retrieve_receiver() to work. Currently they are expected 247 // to be byte offsets from the Java stack pointer for maximum code 248 // sharing between platforms. JVMTI's GetLocalInstance() uses these 249 // offsets to find the receiver for non-static native wrapper frames. 250 ByteSize _native_receiver_sp_offset; 251 ByteSize _native_basic_lock_sp_offset; 252 253 CompLevel _comp_level; // compilation level 254 255 // Local state used to keep track of whether unloading is happening or not 256 volatile uint8_t _is_unloading_state; 257 258 // protected by CodeCache_lock 259 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 260 261 // used by jvmti to track if an event has been posted for this nmethod. 262 bool _load_reported; 263 264 // Protected by CompiledMethod_lock 265 volatile signed char _state; // {not_installed, in_use, not_used, not_entrant} 266 267 int _skipped_instructions_size; 268 269 // For native wrappers 270 nmethod(Method* method, 271 CompilerType type, 272 int nmethod_size, 273 int compile_id, 274 CodeOffsets* offsets, 275 CodeBuffer *code_buffer, 276 int frame_size, 277 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 278 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 279 OopMapSet* oop_maps); 280 281 // Creation support 282 nmethod(Method* method, 283 CompilerType type, 284 int nmethod_size, 285 int compile_id, 286 int entry_bci, 287 CodeOffsets* offsets, 288 int orig_pc_offset, 289 DebugInformationRecorder *recorder, 290 Dependencies* dependencies, 291 CodeBuffer *code_buffer, 292 int frame_size, 293 OopMapSet* oop_maps, 294 ExceptionHandlerTable* handler_table, 295 ImplicitExceptionTable* nul_chk_table, 296 AbstractCompiler* compiler, 297 CompLevel comp_level 298 #if INCLUDE_JVMCI 299 , char* speculations = nullptr, 300 int speculations_len = 0, 301 JVMCINMethodData* jvmci_data = nullptr 302 #endif 303 ); 304 305 // helper methods 306 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 307 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod. 308 // Attention: Only allow NonNMethod space for special nmethods which don't need to be 309 // findable by nmethod iterators! In particular, they must not contain oops! 310 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw(); 311 312 const char* reloc_string_for(u_char* begin, u_char* end); 313 314 bool try_transition(signed char new_state); 315 316 // Returns true if this thread changed the state of the nmethod or 317 // false if another thread performed the transition. 318 bool make_entrant() { Unimplemented(); return false; } 319 void inc_decompile_count(); 320 321 // Inform external interfaces that a compiled method has been unloaded 322 void post_compiled_method_unload(); 323 324 // Initialize fields to their default values 325 void init_defaults(); 326 327 // Offsets 328 int content_offset() const { return int(content_begin() - header_begin()); } 329 int data_offset() const { return _data_offset; } 330 331 address header_end() const { return (address) header_begin() + header_size(); } 332 333 public: 334 // create nmethod with entry_bci 335 static nmethod* new_nmethod(const methodHandle& method, 336 int compile_id, 337 int entry_bci, 338 CodeOffsets* offsets, 339 int orig_pc_offset, 340 DebugInformationRecorder* recorder, 341 Dependencies* dependencies, 342 CodeBuffer *code_buffer, 343 int frame_size, 344 OopMapSet* oop_maps, 345 ExceptionHandlerTable* handler_table, 346 ImplicitExceptionTable* nul_chk_table, 347 AbstractCompiler* compiler, 348 CompLevel comp_level 349 #if INCLUDE_JVMCI 350 , char* speculations = nullptr, 351 int speculations_len = 0, 352 JVMCINMethodData* jvmci_data = nullptr 353 #endif 354 ); 355 356 // Only used for unit tests. 357 nmethod() 358 : CompiledMethod(), 359 _native_receiver_sp_offset(in_ByteSize(-1)), 360 _native_basic_lock_sp_offset(in_ByteSize(-1)), 361 _is_unloading_state(0) {} 362 363 364 static nmethod* new_native_nmethod(const methodHandle& method, 365 int compile_id, 366 CodeBuffer *code_buffer, 367 int vep_offset, 368 int frame_complete, 369 int frame_size, 370 ByteSize receiver_sp_offset, 371 ByteSize basic_lock_sp_offset, 372 OopMapSet* oop_maps, 373 int exception_handler = -1); 374 375 // type info 376 bool is_nmethod() const { return true; } 377 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 378 379 // boundaries for different parts 380 address consts_begin () const { return header_begin() + _consts_offset ; } 381 address consts_end () const { return code_begin() ; } 382 address stub_begin () const { return header_begin() + _stub_offset ; } 383 address stub_end () const { return header_begin() + _oops_offset ; } 384 address exception_begin () const { return header_begin() + _exception_offset ; } 385 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; } 386 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 387 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 388 389 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 390 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 391 392 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 393 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 394 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 395 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 396 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 397 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 398 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 399 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 400 401 int skipped_instructions_size () const { return _skipped_instructions_size ; } 402 403 #if INCLUDE_JVMCI 404 address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } 405 address speculations_begin () const { return header_begin() + _speculations_offset ; } 406 address speculations_end () const { return header_begin() + _jvmci_data_offset ; } 407 address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } 408 address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } 409 #else 410 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 411 #endif 412 413 // Sizes 414 int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); } 415 int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); } 416 int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); } 417 #if INCLUDE_JVMCI 418 int speculations_size () const { return int( speculations_end () - speculations_begin ()); } 419 int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); } 420 #endif 421 422 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 423 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 424 425 int total_size () const; 426 427 // Containment 428 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 429 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 430 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 431 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 432 433 // entry points 434 address entry_point() const { return _entry_point; } // normal entry point 435 address verified_entry_point() const { return _verified_entry_point; } // normal entry point without class check 436 address inline_entry_point() const { return _inline_entry_point; } // inline type entry point (unpack all inline type args) 437 address verified_inline_entry_point() const { return _verified_inline_entry_point; } // inline type entry point (unpack all inline type args) without class check 438 address verified_inline_ro_entry_point() const { return _verified_inline_ro_entry_point; } // inline type entry point (only unpack receiver) without class check 439 440 // flag accessing and manipulation 441 bool is_not_installed() const { return _state == not_installed; } 442 bool is_in_use() const { return _state <= in_use; } 443 bool is_not_entrant() const { return _state == not_entrant; } 444 445 void clear_unloading_state(); 446 // Heuristically deduce an nmethod isn't worth keeping around 447 bool is_cold(); 448 virtual bool is_unloading(); 449 virtual void do_unloading(bool unloading_occurred); 450 451 nmethod* unlinked_next() const { return _unlinked_next; } 452 void set_unlinked_next(nmethod* next) { _unlinked_next = next; } 453 454 #if INCLUDE_RTM_OPT 455 // rtm state accessing and manipulating 456 RTMState rtm_state() const { return _rtm_state; } 457 void set_rtm_state(RTMState state) { _rtm_state = state; } 458 #endif 459 460 bool make_in_use() { 461 return try_transition(in_use); 462 } 463 // Make the nmethod non entrant. The nmethod will continue to be 464 // alive. It is used when an uncommon trap happens. Returns true 465 // if this thread changed the state of the nmethod or false if 466 // another thread performed the transition. 467 bool make_not_entrant(); 468 bool make_not_used() { return make_not_entrant(); } 469 470 int get_state() const { 471 return _state; 472 } 473 474 bool has_dependencies() { return dependencies_size() != 0; } 475 void print_dependencies_on(outputStream* out) PRODUCT_RETURN; 476 void flush_dependencies(); 477 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 478 void set_has_flushed_dependencies() { 479 assert(!has_flushed_dependencies(), "should only happen once"); 480 _has_flushed_dependencies = 1; 481 } 482 483 int comp_level() const { return _comp_level; } 484 485 void unlink_from_method(); 486 487 // Support for oops in scopes and relocs: 488 // Note: index 0 is reserved for null. 489 oop oop_at(int index) const; 490 oop oop_at_phantom(int index) const; // phantom reference 491 oop* oop_addr_at(int index) const { // for GC 492 // relocation indexes are biased by 1 (because 0 is reserved) 493 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 494 return &oops_begin()[index - 1]; 495 } 496 497 // Support for meta data in scopes and relocs: 498 // Note: index 0 is reserved for null. 499 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); } 500 Metadata** metadata_addr_at(int index) const { // for GC 501 // relocation indexes are biased by 1 (because 0 is reserved) 502 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 503 return &metadata_begin()[index - 1]; 504 } 505 506 void copy_values(GrowableArray<jobject>* oops); 507 void copy_values(GrowableArray<Metadata*>* metadata); 508 509 // Relocation support 510 private: 511 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 512 inline void initialize_immediate_oop(oop* dest, jobject handle); 513 514 public: 515 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 516 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); } 517 518 // On-stack replacement support 519 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 520 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 521 void invalidate_osr_method(); 522 nmethod* osr_link() const { return _osr_link; } 523 void set_osr_link(nmethod *n) { _osr_link = n; } 524 525 // Verify calls to dead methods have been cleaned. 526 void verify_clean_inline_caches(); 527 528 // Unlink this nmethod from the system 529 void unlink(); 530 531 // Deallocate this nmethod - called by the GC 532 void flush(); 533 534 // See comment at definition of _last_seen_on_stack 535 void mark_as_maybe_on_stack(); 536 bool is_maybe_on_stack(); 537 538 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 539 void set_method(Method* method) { _method = method; } 540 541 #if INCLUDE_JVMCI 542 // Gets the JVMCI name of this nmethod. 543 const char* jvmci_name(); 544 545 // Records the pending failed speculation in the 546 // JVMCI speculation log associated with this nmethod. 547 void update_speculation(JavaThread* thread); 548 549 // Gets the data specific to a JVMCI compiled method. 550 // This returns a non-nullptr value iff this nmethod was 551 // compiled by the JVMCI compiler. 552 JVMCINMethodData* jvmci_nmethod_data() const { 553 return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin(); 554 } 555 #endif 556 557 public: 558 void oops_do(OopClosure* f) { oops_do(f, false); } 559 void oops_do(OopClosure* f, bool allow_dead); 560 561 // All-in-one claiming of nmethods: returns true if the caller successfully claimed that 562 // nmethod. 563 bool oops_do_try_claim(); 564 565 // Loom support for following nmethods on the stack 566 void follow_nmethod(OopIterateClosure* cl); 567 568 // Class containing callbacks for the oops_do_process_weak/strong() methods 569 // below. 570 class OopsDoProcessor { 571 public: 572 // Process the oops of the given nmethod based on whether it has been called 573 // in a weak or strong processing context, i.e. apply either weak or strong 574 // work on it. 575 virtual void do_regular_processing(nmethod* nm) = 0; 576 // Assuming that the oops of the given nmethod has already been its weak 577 // processing applied, apply the remaining strong processing part. 578 virtual void do_remaining_strong_processing(nmethod* nm) = 0; 579 }; 580 581 // The following two methods do the work corresponding to weak/strong nmethod 582 // processing. 583 void oops_do_process_weak(OopsDoProcessor* p); 584 void oops_do_process_strong(OopsDoProcessor* p); 585 586 static void oops_do_marking_prologue(); 587 static void oops_do_marking_epilogue(); 588 589 private: 590 ScopeDesc* scope_desc_in(address begin, address end); 591 592 address* orig_pc_addr(const frame* fr); 593 594 // used by jvmti to track if the load events has been reported 595 bool load_reported() const { return _load_reported; } 596 void set_load_reported() { _load_reported = true; } 597 598 public: 599 // copying of debugging information 600 void copy_scopes_pcs(PcDesc* pcs, int count); 601 void copy_scopes_data(address buffer, int size); 602 603 int orig_pc_offset() { return _orig_pc_offset; } 604 605 // Post successful compilation 606 void post_compiled_method(CompileTask* task); 607 608 // jvmti support: 609 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr); 610 611 // verify operations 612 void verify(); 613 void verify_scopes(); 614 void verify_interrupt_point(address interrupt_point); 615 616 // Disassemble this nmethod with additional debug information, e.g. information about blocks. 617 void decode2(outputStream* st) const; 618 void print_constant_pool(outputStream* st); 619 620 // Avoid hiding of parent's 'decode(outputStream*)' method. 621 void decode(outputStream* st) const { decode2(st); } // just delegate here. 622 623 // printing support 624 void print() const; 625 void print(outputStream* st) const; 626 void print_code(); 627 628 #if defined(SUPPORT_DATA_STRUCTS) 629 // print output in opt build for disassembler library 630 void print_relocations() PRODUCT_RETURN; 631 void print_pcs() { print_pcs_on(tty); } 632 void print_pcs_on(outputStream* st); 633 void print_scopes() { print_scopes_on(tty); } 634 void print_scopes_on(outputStream* st) PRODUCT_RETURN; 635 void print_value_on(outputStream* st) const; 636 void print_handler_table(); 637 void print_nul_chk_table(); 638 void print_recorded_oop(int log_n, int index); 639 void print_recorded_oops(); 640 void print_recorded_metadata(); 641 642 void print_oops(outputStream* st); // oops from the underlying CodeBlob. 643 void print_metadata(outputStream* st); // metadata in metadata pool. 644 #else 645 // void print_pcs() PRODUCT_RETURN; 646 void print_pcs() { return; } 647 #endif 648 649 void print_calls(outputStream* st) PRODUCT_RETURN; 650 static void print_statistics() PRODUCT_RETURN; 651 652 void maybe_print_nmethod(const DirectiveSet* directive); 653 void print_nmethod(bool print_code); 654 655 // need to re-define this from CodeBlob else the overload hides it 656 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 657 void print_on(outputStream* st, const char* msg) const; 658 659 // Logging 660 void log_identity(xmlStream* log) const; 661 void log_new_nmethod() const; 662 void log_state_change() const; 663 664 // Prints block-level comments, including nmethod specific block labels: 665 virtual void print_block_comment(outputStream* stream, address block_begin) const { 666 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 667 print_nmethod_labels(stream, block_begin); 668 CodeBlob::print_block_comment(stream, block_begin); 669 #endif 670 } 671 672 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const; 673 const char* nmethod_section_label(address pos) const; 674 675 // returns whether this nmethod has code comments. 676 bool has_code_comment(address begin, address end); 677 // Prints a comment for one native instruction (reloc info, pc desc) 678 void print_code_comment_on(outputStream* st, int column, address begin, address end); 679 680 // Compiler task identification. Note that all OSR methods 681 // are numbered in an independent sequence if CICountOSR is true, 682 // and native method wrappers are also numbered independently if 683 // CICountNative is true. 684 virtual int compile_id() const { return _compile_id; } 685 const char* compile_kind() const; 686 687 // tells if any of this method's dependencies have been invalidated 688 // (this is expensive!) 689 static void check_all_dependencies(DepChange& changes); 690 691 // tells if this compiled method is dependent on the given changes, 692 // and the changes have invalidated it 693 bool check_dependency_on(DepChange& changes); 694 695 // Fast breakpoint support. Tells if this compiled method is 696 // dependent on the given method. Returns true if this nmethod 697 // corresponds to the given method as well. 698 virtual bool is_dependent_on_method(Method* dependee); 699 700 // JVMTI's GetLocalInstance() support 701 ByteSize native_receiver_sp_offset() { 702 return _native_receiver_sp_offset; 703 } 704 ByteSize native_basic_lock_sp_offset() { 705 return _native_basic_lock_sp_offset; 706 } 707 708 // support for code generation 709 static ByteSize verified_entry_point_offset() { return byte_offset_of(nmethod, _verified_entry_point); } 710 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); } 711 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); } 712 713 virtual void metadata_do(MetadataClosure* f); 714 715 NativeCallWrapper* call_wrapper_at(address call) const; 716 NativeCallWrapper* call_wrapper_before(address return_pc) const; 717 address call_instruction_address(address pc) const; 718 719 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; 720 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; 721 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; 722 723 virtual void make_deoptimized(); 724 void finalize_relocations(); 725 }; 726 727 #endif // SHARE_CODE_NMETHOD_HPP