< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page

        

*** 36,55 **** --- 36,57 ---- #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "compiler/compilerDirectives.hpp" #include "compiler/directivesParser.hpp" #include "compiler/disassembler.hpp" + #include "compiler/oopMap.inline.hpp" #include "interpreter/bytecode.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" #include "oops/method.inline.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" + #include "oops/weakHandle.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "runtime/atomic.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp"
*** 409,422 **** scopes_pcs_size() + handler_table_size() + nul_chk_table_size(); } - address* nmethod::orig_pc_addr(const frame* fr) { - return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); - } - const char* nmethod::compile_kind() const { if (is_osr_method()) return "osr"; if (method() != NULL && is_native_method()) return "c2n"; return NULL; } --- 411,420 ----
*** 577,586 **** --- 575,596 ---- nm->log_new_nmethod(); } return nm; } + class CountOops : public OopClosure { + private: + int _nr_oops; + public: + CountOops() : _nr_oops(0) {} + int nr_oops() const { return _nr_oops; } + + + virtual void do_oop(oop* o) { _nr_oops++; } + virtual void do_oop(narrowOop* o) { _nr_oops++; } + }; + // For native wrappers nmethod::nmethod( Method* method, CompilerType type, int nmethod_size,
*** 589,599 **** CodeBuffer* code_buffer, int frame_size, ByteSize basic_lock_owner_sp_offset, ByteSize basic_lock_sp_offset, OopMapSet* oop_maps ) ! : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), _is_unloading_state(0), _native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_basic_lock_sp_offset(basic_lock_sp_offset) { { --- 599,609 ---- CodeBuffer* code_buffer, int frame_size, ByteSize basic_lock_owner_sp_offset, ByteSize basic_lock_sp_offset, OopMapSet* oop_maps ) ! : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), _is_unloading_state(0), _native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_basic_lock_sp_offset(basic_lock_sp_offset) { {
*** 719,729 **** , char* speculations, int speculations_len, int jvmci_data_size #endif ) ! : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), _is_unloading_state(0), _native_receiver_sp_offset(in_ByteSize(-1)), _native_basic_lock_sp_offset(in_ByteSize(-1)) { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); --- 729,739 ---- , char* speculations, int speculations_len, int jvmci_data_size #endif ) ! : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), _is_unloading_state(0), _native_receiver_sp_offset(in_ByteSize(-1)), _native_basic_lock_sp_offset(in_ByteSize(-1)) { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
*** 834,846 **** --- 844,868 ---- // we use the information of entry points to find out if a method is // static or non static assert(compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point), " entry points must be same for static methods and vice versa"); + + { + CountOops count; + this->oops_do(&count, false, true); + _nr_oops = count.nr_oops(); + } } } + int nmethod::count_oops() { + CountOops count; + this->oops_do(&count, false, true); + return count.nr_oops(); + } + // Print a short set of xml attributes to identify this nmethod. The // output should be embedded in some other element. void nmethod::log_identity(xmlStream* log) const { log->print(" compile_id='%d'", compile_id()); const char* nm_kind = compile_kind();
*** 1119,1130 **** // count can be greater than the stack traversal count before it hits the // nmethod for the second time. // If an is_unloading() nmethod is still not_entrant, then it is not safe to // convert it to zombie due to GC unloading interactions. However, if it // has become unloaded, then it is okay to convert such nmethods to zombie. ! return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() && ! !is_locked_by_vm() && (!is_unloading() || is_unloaded()); } void nmethod::inc_decompile_count() { if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; // Could be gated by ProfileTraps, but do not bother... --- 1141,1152 ---- // count can be greater than the stack traversal count before it hits the // nmethod for the second time. // If an is_unloading() nmethod is still not_entrant, then it is not safe to // convert it to zombie due to GC unloading interactions. However, if it // has become unloaded, then it is okay to convert such nmethods to zombie. ! return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && !is_on_continuation_stack() && ! !is_locked_by_vm() && (!is_unloading() || is_unloaded()); } void nmethod::inc_decompile_count() { if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; // Could be gated by ProfileTraps, but do not bother...
*** 1149,1158 **** --- 1171,1182 ---- } } } void nmethod::make_unloaded() { + assert(!is_on_continuation_stack(), "can't be on continuation stack"); + post_compiled_method_unload(); // This nmethod is being unloaded, make sure that dependencies // recorded in instanceKlasses get flushed. // Since this work is being done during a GC, defer deleting dependencies from the
*** 1801,1814 **** guarantee(unload_nmethod_caches(unloading_occurred), "Should not need transition stubs"); } } ! void nmethod::oops_do(OopClosure* f, bool allow_dead) { // make sure the oops ready to receive visitors assert(allow_dead || is_alive(), "should not call follow on dead nmethod"); // Prevent extra code cache walk for platforms that don't have immediate oops. if (relocInfo::mustIterateImmediateOopsInCode()) { RelocIterator iter(this, oops_reloc_begin()); while (iter.next()) { --- 1825,1847 ---- guarantee(unload_nmethod_caches(unloading_occurred), "Should not need transition stubs"); } } ! void nmethod::oops_do(OopClosure* f, bool allow_dead, bool allow_null, bool keepalive_is_strong) { // make sure the oops ready to receive visitors assert(allow_dead || is_alive(), "should not call follow on dead nmethod"); + if (keepalive_is_strong) { + if (_keepalive != NULL) { + WeakHandle<vm_nmethod_keepalive_data> wh = WeakHandle<vm_nmethod_keepalive_data>::from_raw(_keepalive); + if (wh.resolve() != NULL) { + f->do_oop(_keepalive); + } + } + } + // Prevent extra code cache walk for platforms that don't have immediate oops. if (relocInfo::mustIterateImmediateOopsInCode()) { RelocIterator iter(this, oops_reloc_begin()); while (iter.next()) {
*** 1817,1827 **** // In this loop, we must only follow those oops directly embedded in // the code. Other oops (oop_index>0) are seen as part of scopes_oops. assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place"); ! if (r->oop_is_immediate() && r->oop_value() != NULL) { f->do_oop(r->oop_addr()); } } } } --- 1850,1860 ---- // In this loop, we must only follow those oops directly embedded in // the code. Other oops (oop_index>0) are seen as part of scopes_oops. assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place"); ! if (r->oop_is_immediate() && (r->oop_value() != NULL || allow_null)) { f->do_oop(r->oop_addr()); } } } }
< prev index next >