1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_CODE_NMETHOD_HPP 26 #define SHARE_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 #include "compiler/compilerDefinitions.hpp" 30 31 class CompileTask; 32 class DepChange; 33 class DirectiveSet; 34 class DebugInformationRecorder; 35 class JvmtiThreadState; 36 class OopIterateClosure; 37 38 // nmethods (native methods) are the compiled code versions of Java methods. 39 // 40 // An nmethod contains: 41 // - header (the nmethod structure) 42 // [Relocation] 43 // - relocation information 44 // - constant part (doubles, longs and floats used in nmethod) 45 // - oop table 46 // [Code] 47 // - code body 48 // - exception handler 49 // - stub code 50 // [Debugging information] 51 // - oop array 52 // - data array 53 // - pcs 54 // [Exception handler table] 55 // - handler entry point array 56 // [Implicit Null Pointer exception table] 57 // - implicit null table array 58 // [Speculations] 59 // - encoded speculations array 60 // [JVMCINMethodData] 61 // - meta data for JVMCI compiled nmethod 62 63 #if INCLUDE_JVMCI 64 class FailedSpeculation; 65 class JVMCINMethodData; 66 #endif 67 68 class nmethod : public CompiledMethod { 69 friend class VMStructs; 70 friend class JVMCIVMStructs; 71 friend class CodeCache; // scavengable oops 72 friend class JVMCINMethodData; 73 74 private: 75 76 uint64_t _gc_epoch; 77 78 // To support simple linked-list chaining of nmethods: 79 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 80 81 // STW two-phase nmethod root processing helpers. 82 // 83 // When determining liveness of a given nmethod to do code cache unloading, 84 // some collectors need to do different things depending on whether the nmethods 85 // need to absolutely be kept alive during root processing; "strong"ly reachable 86 // nmethods are known to be kept alive at root processing, but the liveness of 87 // "weak"ly reachable ones is to be determined later. 88 // 89 // We want to allow strong and weak processing of nmethods by different threads 90 // at the same time without heavy synchronization. Additional constraints are 91 // to make sure that every nmethod is processed a minimal amount of time, and 92 // nmethods themselves are always iterated at most once at a particular time. 93 // 94 // Note that strong processing work must be a superset of weak processing work 95 // for this code to work. 96 // 97 // We store state and claim information in the _oops_do_mark_link member, using 98 // the two LSBs for the state and the remaining upper bits for linking together 99 // nmethods that were already visited. 100 // The last element is self-looped, i.e. points to itself to avoid some special 101 // "end-of-list" sentinel value. 102 // 103 // _oops_do_mark_link special values: 104 // 105 // _oops_do_mark_link == NULL: the nmethod has not been visited at all yet, i.e. 106 // is Unclaimed. 107 // 108 // For other values, its lowest two bits indicate the following states of the nmethod: 109 // 110 // weak_request (WR): the nmethod has been claimed by a thread for weak processing 111 // weak_done (WD): weak processing has been completed for this nmethod. 112 // strong_request (SR): the nmethod has been found to need strong processing while 113 // being weak processed. 114 // strong_done (SD): strong processing has been completed for this nmethod . 115 // 116 // The following shows the _only_ possible progressions of the _oops_do_mark_link 117 // pointer. 118 // 119 // Given 120 // N as the nmethod 121 // X the current next value of _oops_do_mark_link 122 // 123 // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by 124 // a single thread. 125 // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been 126 // completed (as above) another thread found that the nmethod needs strong 127 // processing after all. 128 // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another 129 // thread finds that the nmethod needs strong processing, marks it as such and 130 // terminates. The original thread completes strong processing. 131 // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from 132 // the beginning by a single thread. 133 // 134 // "|" describes the concatenation of bits in _oops_do_mark_link. 135 // 136 // The diagram also describes the threads responsible for changing the nmethod to 137 // the next state by marking the _transition_ with (C) and (O), which mean "current" 138 // and "other" thread respectively. 139 // 140 struct oops_do_mark_link; // Opaque data type. 141 142 // States used for claiming nmethods during root processing. 143 static const uint claim_weak_request_tag = 0; 144 static const uint claim_weak_done_tag = 1; 145 static const uint claim_strong_request_tag = 2; 146 static const uint claim_strong_done_tag = 3; 147 148 static oops_do_mark_link* mark_link(nmethod* nm, uint tag) { 149 assert(tag <= claim_strong_done_tag, "invalid tag %u", tag); 150 assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB"); 151 return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag); 152 } 153 154 static uint extract_state(oops_do_mark_link* link) { 155 return (uint)((uintptr_t)link & 0x3); 156 } 157 158 static nmethod* extract_nmethod(oops_do_mark_link* link) { 159 return (nmethod*)((uintptr_t)link & ~0x3); 160 } 161 162 void oops_do_log_change(const char* state); 163 164 static bool oops_do_has_weak_request(oops_do_mark_link* next) { 165 return extract_state(next) == claim_weak_request_tag; 166 } 167 168 static bool oops_do_has_any_strong_state(oops_do_mark_link* next) { 169 return extract_state(next) >= claim_strong_request_tag; 170 } 171 172 // Attempt Unclaimed -> N|WR transition. Returns true if successful. 173 bool oops_do_try_claim_weak_request(); 174 175 // Attempt Unclaimed -> N|SD transition. Returns the current link. 176 oops_do_mark_link* oops_do_try_claim_strong_done(); 177 // Attempt N|WR -> X|WD transition. Returns NULL if successful, X otherwise. 178 nmethod* oops_do_try_add_to_list_as_weak_done(); 179 180 // Attempt X|WD -> N|SR transition. Returns the current link. 181 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next); 182 // Attempt X|WD -> X|SD transition. Returns true if successful. 183 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next); 184 185 // Do the N|SD -> X|SD transition. 186 void oops_do_add_to_list_as_strong_done(); 187 188 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD 189 // transitions). 190 void oops_do_set_strong_done(nmethod* old_head); 191 192 static nmethod* volatile _oops_do_mark_nmethods; 193 oops_do_mark_link* volatile _oops_do_mark_link; 194 195 // offsets for entry points 196 address _entry_point; // entry point with class check 197 address _verified_entry_point; // entry point without class check 198 address _inline_entry_point; // inline type entry point (unpack all inline type args) with class check 199 address _verified_inline_entry_point; // inline type entry point (unpack all inline type args) without class check 200 address _verified_inline_ro_entry_point; // inline type entry point (unpack receiver only) without class check 201 address _osr_entry_point; // entry point for on stack replacement 202 203 nmethod* _unlinked_next; 204 205 // Shared fields for all nmethod's 206 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 207 208 // Offsets for different nmethod parts 209 int _exception_offset; 210 // Offset of the unwind handler if it exists 211 int _unwind_handler_offset; 212 213 int _consts_offset; 214 int _stub_offset; 215 int _oops_offset; // offset to where embedded oop table begins (inside data) 216 int _metadata_offset; // embedded meta data table 217 int _scopes_data_offset; 218 int _scopes_pcs_offset; 219 int _dependencies_offset; 220 int _handler_table_offset; 221 int _nul_chk_table_offset; 222 #if INCLUDE_JVMCI 223 int _speculations_offset; 224 int _jvmci_data_offset; 225 #endif 226 int _nmethod_end_offset; 227 228 int code_offset() const { return (address) code_begin() - header_begin(); } 229 230 // location in frame (offset for sp) that deopt can store the original 231 // pc during a deopt. 232 int _orig_pc_offset; 233 234 int _compile_id; // which compilation made this nmethod 235 236 #if INCLUDE_RTM_OPT 237 // RTM state at compile time. Used during deoptimization to decide 238 // whether to restart collecting RTM locking abort statistic again. 239 RTMState _rtm_state; 240 #endif 241 242 // These are used for compiled synchronized native methods to 243 // locate the owner and stack slot for the BasicLock. They are 244 // needed because there is no debug information for compiled native 245 // wrappers and the oop maps are insufficient to allow 246 // frame::retrieve_receiver() to work. Currently they are expected 247 // to be byte offsets from the Java stack pointer for maximum code 248 // sharing between platforms. JVMTI's GetLocalInstance() uses these 249 // offsets to find the receiver for non-static native wrapper frames. 250 ByteSize _native_receiver_sp_offset; 251 ByteSize _native_basic_lock_sp_offset; 252 253 CompLevel _comp_level; // compilation level 254 255 // Local state used to keep track of whether unloading is happening or not 256 volatile uint8_t _is_unloading_state; 257 258 // protected by CodeCache_lock 259 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 260 261 // used by jvmti to track if an event has been posted for this nmethod. 262 bool _load_reported; 263 264 // Protected by CompiledMethod_lock 265 volatile signed char _state; // {not_installed, in_use, not_used, not_entrant} 266 267 // For native wrappers 268 nmethod(Method* method, 269 CompilerType type, 270 int nmethod_size, 271 int compile_id, 272 CodeOffsets* offsets, 273 CodeBuffer *code_buffer, 274 int frame_size, 275 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 276 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 277 OopMapSet* oop_maps); 278 279 // Creation support 280 nmethod(Method* method, 281 CompilerType type, 282 int nmethod_size, 283 int compile_id, 284 int entry_bci, 285 CodeOffsets* offsets, 286 int orig_pc_offset, 287 DebugInformationRecorder *recorder, 288 Dependencies* dependencies, 289 CodeBuffer *code_buffer, 290 int frame_size, 291 OopMapSet* oop_maps, 292 ExceptionHandlerTable* handler_table, 293 ImplicitExceptionTable* nul_chk_table, 294 AbstractCompiler* compiler, 295 CompLevel comp_level 296 #if INCLUDE_JVMCI 297 , char* speculations, 298 int speculations_len, 299 int jvmci_data_size 300 #endif 301 ); 302 303 // helper methods 304 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 305 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod. 306 // Attention: Only allow NonNMethod space for special nmethods which don't need to be 307 // findable by nmethod iterators! In particular, they must not contain oops! 308 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw(); 309 310 const char* reloc_string_for(u_char* begin, u_char* end); 311 312 bool try_transition(int new_state); 313 314 // Returns true if this thread changed the state of the nmethod or 315 // false if another thread performed the transition. 316 bool make_entrant() { Unimplemented(); return false; } 317 void inc_decompile_count(); 318 319 // Inform external interfaces that a compiled method has been unloaded 320 void post_compiled_method_unload(); 321 322 // Initialize fields to their default values 323 void init_defaults(); 324 325 // Offsets 326 int content_offset() const { return content_begin() - header_begin(); } 327 int data_offset() const { return _data_offset; } 328 329 address header_end() const { return (address) header_begin() + header_size(); } 330 331 public: 332 // create nmethod with entry_bci 333 static nmethod* new_nmethod(const methodHandle& method, 334 int compile_id, 335 int entry_bci, 336 CodeOffsets* offsets, 337 int orig_pc_offset, 338 DebugInformationRecorder* recorder, 339 Dependencies* dependencies, 340 CodeBuffer *code_buffer, 341 int frame_size, 342 OopMapSet* oop_maps, 343 ExceptionHandlerTable* handler_table, 344 ImplicitExceptionTable* nul_chk_table, 345 AbstractCompiler* compiler, 346 CompLevel comp_level 347 #if INCLUDE_JVMCI 348 , char* speculations = NULL, 349 int speculations_len = 0, 350 int nmethod_mirror_index = -1, 351 const char* nmethod_mirror_name = NULL, 352 FailedSpeculation** failed_speculations = NULL 353 #endif 354 ); 355 356 // Only used for unit tests. 357 nmethod() 358 : CompiledMethod(), 359 _native_receiver_sp_offset(in_ByteSize(-1)), 360 _native_basic_lock_sp_offset(in_ByteSize(-1)), 361 _is_unloading_state(0) {} 362 363 364 static nmethod* new_native_nmethod(const methodHandle& method, 365 int compile_id, 366 CodeBuffer *code_buffer, 367 int vep_offset, 368 int frame_complete, 369 int frame_size, 370 ByteSize receiver_sp_offset, 371 ByteSize basic_lock_sp_offset, 372 OopMapSet* oop_maps, 373 int exception_handler = -1); 374 375 // type info 376 bool is_nmethod() const { return true; } 377 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 378 379 // boundaries for different parts 380 address consts_begin () const { return header_begin() + _consts_offset ; } 381 address consts_end () const { return code_begin() ; } 382 address stub_begin () const { return header_begin() + _stub_offset ; } 383 address stub_end () const { return header_begin() + _oops_offset ; } 384 address exception_begin () const { return header_begin() + _exception_offset ; } 385 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 386 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 387 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 388 389 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 390 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 391 392 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 393 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 394 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 395 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 396 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 397 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 398 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 399 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 400 #if INCLUDE_JVMCI 401 address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } 402 address speculations_begin () const { return header_begin() + _speculations_offset ; } 403 address speculations_end () const { return header_begin() + _jvmci_data_offset ; } 404 address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } 405 address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } 406 #else 407 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 408 #endif 409 410 // Sizes 411 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 412 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 413 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 414 #if INCLUDE_JVMCI 415 int speculations_size () const { return speculations_end () - speculations_begin (); } 416 int jvmci_data_size () const { return jvmci_data_end () - jvmci_data_begin (); } 417 #endif 418 419 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 420 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 421 422 int total_size () const; 423 424 // Containment 425 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 426 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 427 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 428 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 429 430 // entry points 431 address entry_point() const { return _entry_point; } // normal entry point 432 address verified_entry_point() const { return _verified_entry_point; } // normal entry point without class check 433 address inline_entry_point() const { return _inline_entry_point; } // inline type entry point (unpack all inline type args) 434 address verified_inline_entry_point() const { return _verified_inline_entry_point; } // inline type entry point (unpack all inline type args) without class check 435 address verified_inline_ro_entry_point() const { return _verified_inline_ro_entry_point; } // inline type entry point (only unpack receiver) without class check 436 437 // flag accessing and manipulation 438 bool is_not_installed() const { return _state == not_installed; } 439 bool is_in_use() const { return _state <= in_use; } 440 bool is_not_entrant() const { return _state == not_entrant; } 441 442 void clear_unloading_state(); 443 // Heuristically deduce an nmethod isn't worth keeping around 444 bool is_cold(); 445 virtual bool is_unloading(); 446 virtual void do_unloading(bool unloading_occurred); 447 448 nmethod* unlinked_next() const { return _unlinked_next; } 449 void set_unlinked_next(nmethod* next) { _unlinked_next = next; } 450 451 #if INCLUDE_RTM_OPT 452 // rtm state accessing and manipulating 453 RTMState rtm_state() const { return _rtm_state; } 454 void set_rtm_state(RTMState state) { _rtm_state = state; } 455 #endif 456 457 bool make_in_use() { 458 return try_transition(in_use); 459 } 460 // Make the nmethod non entrant. The nmethod will continue to be 461 // alive. It is used when an uncommon trap happens. Returns true 462 // if this thread changed the state of the nmethod or false if 463 // another thread performed the transition. 464 bool make_not_entrant(); 465 bool make_not_used() { return make_not_entrant(); } 466 467 int get_state() const { 468 return _state; 469 } 470 471 bool has_dependencies() { return dependencies_size() != 0; } 472 void print_dependencies() PRODUCT_RETURN; 473 void flush_dependencies(); 474 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 475 void set_has_flushed_dependencies() { 476 assert(!has_flushed_dependencies(), "should only happen once"); 477 _has_flushed_dependencies = 1; 478 } 479 480 int comp_level() const { return _comp_level; } 481 482 void unlink_from_method(); 483 484 // Support for oops in scopes and relocs: 485 // Note: index 0 is reserved for null. 486 oop oop_at(int index) const; 487 oop oop_at_phantom(int index) const; // phantom reference 488 oop* oop_addr_at(int index) const { // for GC 489 // relocation indexes are biased by 1 (because 0 is reserved) 490 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 491 return &oops_begin()[index - 1]; 492 } 493 494 // Support for meta data in scopes and relocs: 495 // Note: index 0 is reserved for null. 496 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 497 Metadata** metadata_addr_at(int index) const { // for GC 498 // relocation indexes are biased by 1 (because 0 is reserved) 499 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 500 return &metadata_begin()[index - 1]; 501 } 502 503 void copy_values(GrowableArray<jobject>* oops); 504 void copy_values(GrowableArray<Metadata*>* metadata); 505 506 // Relocation support 507 private: 508 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 509 inline void initialize_immediate_oop(oop* dest, jobject handle); 510 511 public: 512 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 513 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 514 515 // On-stack replacement support 516 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 517 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 518 void invalidate_osr_method(); 519 nmethod* osr_link() const { return _osr_link; } 520 void set_osr_link(nmethod *n) { _osr_link = n; } 521 522 // Verify calls to dead methods have been cleaned. 523 void verify_clean_inline_caches(); 524 525 // Unlink this nmethod from the system 526 void unlink(); 527 528 // Deallocate this nmethod - called by the GC 529 void flush(); 530 531 // See comment at definition of _last_seen_on_stack 532 void mark_as_maybe_on_stack(); 533 bool is_maybe_on_stack(); 534 535 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 536 void set_method(Method* method) { _method = method; } 537 538 #if INCLUDE_JVMCI 539 // Gets the JVMCI name of this nmethod. 540 const char* jvmci_name(); 541 542 // Records the pending failed speculation in the 543 // JVMCI speculation log associated with this nmethod. 544 void update_speculation(JavaThread* thread); 545 546 // Gets the data specific to a JVMCI compiled method. 547 // This returns a non-NULL value iff this nmethod was 548 // compiled by the JVMCI compiler. 549 JVMCINMethodData* jvmci_nmethod_data() const { 550 return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin(); 551 } 552 #endif 553 554 public: 555 void oops_do(OopClosure* f) { oops_do(f, false); } 556 void oops_do(OopClosure* f, bool allow_dead); 557 558 // All-in-one claiming of nmethods: returns true if the caller successfully claimed that 559 // nmethod. 560 bool oops_do_try_claim(); 561 562 // Loom support for following nmethods on the stack 563 void follow_nmethod(OopIterateClosure* cl); 564 565 // Class containing callbacks for the oops_do_process_weak/strong() methods 566 // below. 567 class OopsDoProcessor { 568 public: 569 // Process the oops of the given nmethod based on whether it has been called 570 // in a weak or strong processing context, i.e. apply either weak or strong 571 // work on it. 572 virtual void do_regular_processing(nmethod* nm) = 0; 573 // Assuming that the oops of the given nmethod has already been its weak 574 // processing applied, apply the remaining strong processing part. 575 virtual void do_remaining_strong_processing(nmethod* nm) = 0; 576 }; 577 578 // The following two methods do the work corresponding to weak/strong nmethod 579 // processing. 580 void oops_do_process_weak(OopsDoProcessor* p); 581 void oops_do_process_strong(OopsDoProcessor* p); 582 583 static void oops_do_marking_prologue(); 584 static void oops_do_marking_epilogue(); 585 586 private: 587 ScopeDesc* scope_desc_in(address begin, address end); 588 589 address* orig_pc_addr(const frame* fr); 590 591 // used by jvmti to track if the load events has been reported 592 bool load_reported() const { return _load_reported; } 593 void set_load_reported() { _load_reported = true; } 594 595 public: 596 // copying of debugging information 597 void copy_scopes_pcs(PcDesc* pcs, int count); 598 void copy_scopes_data(address buffer, int size); 599 600 int orig_pc_offset() { return _orig_pc_offset; } 601 602 // Post successful compilation 603 void post_compiled_method(CompileTask* task); 604 605 // jvmti support: 606 void post_compiled_method_load_event(JvmtiThreadState* state = NULL); 607 608 // verify operations 609 void verify(); 610 void verify_scopes(); 611 void verify_interrupt_point(address interrupt_point); 612 613 // Disassemble this nmethod with additional debug information, e.g. information about blocks. 614 void decode2(outputStream* st) const; 615 void print_constant_pool(outputStream* st); 616 617 // Avoid hiding of parent's 'decode(outputStream*)' method. 618 void decode(outputStream* st) const { decode2(st); } // just delegate here. 619 620 // printing support 621 void print() const; 622 void print(outputStream* st) const; 623 void print_code(); 624 625 #if defined(SUPPORT_DATA_STRUCTS) 626 // print output in opt build for disassembler library 627 void print_relocations() PRODUCT_RETURN; 628 void print_pcs() { print_pcs_on(tty); } 629 void print_pcs_on(outputStream* st); 630 void print_scopes() { print_scopes_on(tty); } 631 void print_scopes_on(outputStream* st) PRODUCT_RETURN; 632 void print_value_on(outputStream* st) const; 633 void print_handler_table(); 634 void print_nul_chk_table(); 635 void print_recorded_oop(int log_n, int index); 636 void print_recorded_oops(); 637 void print_recorded_metadata(); 638 639 void print_oops(outputStream* st); // oops from the underlying CodeBlob. 640 void print_metadata(outputStream* st); // metadata in metadata pool. 641 #else 642 // void print_pcs() PRODUCT_RETURN; 643 void print_pcs() { return; } 644 #endif 645 646 void print_calls(outputStream* st) PRODUCT_RETURN; 647 static void print_statistics() PRODUCT_RETURN; 648 649 void maybe_print_nmethod(const DirectiveSet* directive); 650 void print_nmethod(bool print_code); 651 652 // need to re-define this from CodeBlob else the overload hides it 653 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 654 void print_on(outputStream* st, const char* msg) const; 655 656 // Logging 657 void log_identity(xmlStream* log) const; 658 void log_new_nmethod() const; 659 void log_state_change() const; 660 661 // Prints block-level comments, including nmethod specific block labels: 662 virtual void print_block_comment(outputStream* stream, address block_begin) const { 663 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 664 print_nmethod_labels(stream, block_begin); 665 CodeBlob::print_block_comment(stream, block_begin); 666 #endif 667 } 668 669 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const; 670 const char* nmethod_section_label(address pos) const; 671 672 // returns whether this nmethod has code comments. 673 bool has_code_comment(address begin, address end); 674 // Prints a comment for one native instruction (reloc info, pc desc) 675 void print_code_comment_on(outputStream* st, int column, address begin, address end); 676 677 // Compiler task identification. Note that all OSR methods 678 // are numbered in an independent sequence if CICountOSR is true, 679 // and native method wrappers are also numbered independently if 680 // CICountNative is true. 681 virtual int compile_id() const { return _compile_id; } 682 const char* compile_kind() const; 683 684 // tells if any of this method's dependencies have been invalidated 685 // (this is expensive!) 686 static void check_all_dependencies(DepChange& changes); 687 688 // tells if this compiled method is dependent on the given changes, 689 // and the changes have invalidated it 690 bool check_dependency_on(DepChange& changes); 691 692 // Fast breakpoint support. Tells if this compiled method is 693 // dependent on the given method. Returns true if this nmethod 694 // corresponds to the given method as well. 695 virtual bool is_dependent_on_method(Method* dependee); 696 697 // JVMTI's GetLocalInstance() support 698 ByteSize native_receiver_sp_offset() { 699 return _native_receiver_sp_offset; 700 } 701 ByteSize native_basic_lock_sp_offset() { 702 return _native_basic_lock_sp_offset; 703 } 704 705 // support for code generation 706 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 707 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 708 static int state_offset() { return offset_of(nmethod, _state); } 709 710 virtual void metadata_do(MetadataClosure* f); 711 712 NativeCallWrapper* call_wrapper_at(address call) const; 713 NativeCallWrapper* call_wrapper_before(address return_pc) const; 714 address call_instruction_address(address pc) const; 715 716 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; 717 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; 718 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; 719 720 virtual void make_deoptimized(); 721 void finalize_relocations(); 722 }; 723 724 #endif // SHARE_CODE_NMETHOD_HPP