< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
*** 37,10 ***
--- 37,12 ---
  #include "compiler/compileLog.hpp"
  #include "compiler/compilerDirectives.hpp"
  #include "compiler/directivesParser.hpp"
  #include "compiler/disassembler.hpp"
  #include "compiler/oopMap.hpp"
+ #include "gc/shared/barrierSet.hpp"
+ #include "gc/shared/barrierSetNMethod.hpp"
  #include "gc/shared/collectedHeap.hpp"
  #include "interpreter/bytecode.hpp"
  #include "logging/log.hpp"
  #include "logging/logStream.hpp"
  #include "memory/allocation.inline.hpp"

*** 49,10 ***
--- 51,11 ---
  #include "oops/access.inline.hpp"
  #include "oops/klass.inline.hpp"
  #include "oops/method.inline.hpp"
  #include "oops/methodData.hpp"
  #include "oops/oop.inline.hpp"
+ #include "oops/weakHandle.inline.hpp"
  #include "prims/jvmtiImpl.hpp"
  #include "prims/jvmtiThreadState.hpp"
  #include "prims/methodHandles.hpp"
  #include "runtime/atomic.hpp"
  #include "runtime/deoptimization.hpp"

*** 420,17 ***
      scopes_pcs_size()    +
      handler_table_size() +
      nul_chk_table_size();
  }
  
- address* nmethod::orig_pc_addr(const frame* fr) {
-   return (address*) ((address)fr->unextended_sp() + _orig_pc_offset);
- }
- 
  const char* nmethod::compile_kind() const {
    if (is_osr_method())     return "osr";
!   if (method() != NULL && is_native_method())  return "c2n";
    return NULL;
  }
  
  // Fill in default values for various flag fields
  void nmethod::init_defaults() {
--- 423,18 ---
      scopes_pcs_size()    +
      handler_table_size() +
      nul_chk_table_size();
  }
  
  const char* nmethod::compile_kind() const {
    if (is_osr_method())     return "osr";
!   if (method() != NULL && is_native_method()) {
+     if (method()->is_continuation_enter_intrinsic()) {
+       return "cnt";
+     }
+     return "c2n";
+   }
    return NULL;
  }
  
  // Fill in default values for various flag fields
  void nmethod::init_defaults() {

*** 458,21 ***
    int vep_offset,
    int frame_complete,
    int frame_size,
    ByteSize basic_lock_owner_sp_offset,
    ByteSize basic_lock_sp_offset,
!   OopMapSet* oop_maps) {
    code_buffer->finalize_oop_references(method);
    // create nmethod
    nmethod* nm = NULL;
    {
      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
      int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
  
      CodeOffsets offsets;
      offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
      offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
      nm = new (native_nmethod_size, CompLevel_none)
      nmethod(method(), compiler_none, native_nmethod_size,
              compile_id, &offsets,
              code_buffer, frame_size,
              basic_lock_owner_sp_offset,
--- 462,25 ---
    int vep_offset,
    int frame_complete,
    int frame_size,
    ByteSize basic_lock_owner_sp_offset,
    ByteSize basic_lock_sp_offset,
!   OopMapSet* oop_maps,
+   int exception_handler) {
    code_buffer->finalize_oop_references(method);
    // create nmethod
    nmethod* nm = NULL;
    {
      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
      int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
  
      CodeOffsets offsets;
      offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
      offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+     if (exception_handler != -1) {
+       offsets.set_value(CodeOffsets::Exceptions, exception_handler);
+     }
      nm = new (native_nmethod_size, CompLevel_none)
      nmethod(method(), compiler_none, native_nmethod_size,
              compile_id, &offsets,
              code_buffer, frame_size,
              basic_lock_owner_sp_offset,

*** 589,10 ***
--- 597,22 ---
      nm->log_new_nmethod();
    }
    return nm;
  }
  
+   class CountOops : public OopClosure {
+   private:
+     int _nr_oops;
+   public:
+     CountOops() : _nr_oops(0) {}
+     int nr_oops() const { return _nr_oops; }
+ 
+ 
+     virtual void do_oop(oop* o) { _nr_oops++; }
+     virtual void do_oop(narrowOop* o) { _nr_oops++; }
+   };
+ 
  // For native wrappers
  nmethod::nmethod(
    Method* method,
    CompilerType type,
    int nmethod_size,

*** 601,11 ***
    CodeBuffer* code_buffer,
    int frame_size,
    ByteSize basic_lock_owner_sp_offset,
    ByteSize basic_lock_sp_offset,
    OopMapSet* oop_maps )
!   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
    _is_unloading_state(0),
    _native_receiver_sp_offset(basic_lock_owner_sp_offset),
    _native_basic_lock_sp_offset(basic_lock_sp_offset)
  {
    {
--- 621,11 ---
    CodeBuffer* code_buffer,
    int frame_size,
    ByteSize basic_lock_owner_sp_offset,
    ByteSize basic_lock_sp_offset,
    OopMapSet* oop_maps )
!   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
    _is_unloading_state(0),
    _native_receiver_sp_offset(basic_lock_owner_sp_offset),
    _native_basic_lock_sp_offset(basic_lock_sp_offset)
  {
    {

*** 620,13 ***
      _entry_bci               = InvocationEntryBci;
      // We have no exception handler or deopt handler make the
      // values something that will never match a pc like the nmethod vtable entry
      _exception_offset        = 0;
      _orig_pc_offset          = 0;
  
      _consts_offset           = data_offset();
!     _stub_offset             = data_offset();
      _oops_offset             = data_offset();
      _metadata_offset         = _oops_offset         + align_up(code_buffer->total_oop_size(), oopSize);
      scopes_data_offset       = _metadata_offset     + align_up(code_buffer->total_metadata_size(), wordSize);
      _scopes_pcs_offset       = scopes_data_offset;
      _dependencies_offset     = _scopes_pcs_offset;
--- 640,14 ---
      _entry_bci               = InvocationEntryBci;
      // We have no exception handler or deopt handler make the
      // values something that will never match a pc like the nmethod vtable entry
      _exception_offset        = 0;
      _orig_pc_offset          = 0;
+     _marking_cycle           = 0;
  
      _consts_offset           = data_offset();
!     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
      _oops_offset             = data_offset();
      _metadata_offset         = _oops_offset         + align_up(code_buffer->total_oop_size(), oopSize);
      scopes_data_offset       = _metadata_offset     + align_up(code_buffer->total_metadata_size(), wordSize);
      _scopes_pcs_offset       = scopes_data_offset;
      _dependencies_offset     = _scopes_pcs_offset;

*** 647,10 ***
--- 668,12 ---
      _osr_entry_point         = NULL;
      _exception_cache         = NULL;
      _pc_desc_container.reset_to(NULL);
      _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
  
+     _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
+ 
      _scopes_data_begin = (address) this + scopes_data_offset;
      _deopt_handler_begin = (address) this + deoptimize_offset;
      _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
  
      code_buffer->copy_code_and_locs_to(this);

*** 733,11 ***
    , char* speculations,
    int speculations_len,
    int jvmci_data_size
  #endif
    )
!   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
    _is_unloading_state(0),
    _native_receiver_sp_offset(in_ByteSize(-1)),
    _native_basic_lock_sp_offset(in_ByteSize(-1))
  {
    assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
--- 756,11 ---
    , char* speculations,
    int speculations_len,
    int jvmci_data_size
  #endif
    )
!   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
    _is_unloading_state(0),
    _native_receiver_sp_offset(in_ByteSize(-1)),
    _native_basic_lock_sp_offset(in_ByteSize(-1))
  {
    assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");

*** 853,13 ***
--- 876,25 ---
      // we use the information of entry points to find out if a method is
      // static or non static
      assert(compiler->is_c2() || compiler->is_jvmci() ||
             _method->is_static() == (entry_point() == _verified_entry_point),
             " entry points must be same for static methods and vice versa");
+ 
+     {
+       CountOops count;
+       this->oops_do(&count, false, true);
+       _nr_oops = count.nr_oops();
+     }
    }
  }
  
+ int nmethod::count_oops() {
+   CountOops count;
+   this->oops_do(&count, false, true);
+   return count.nr_oops();
+ }
+ 
  // Print a short set of xml attributes to identify this nmethod.  The
  // output should be embedded in some other element.
  void nmethod::log_identity(xmlStream* log) const {
    log->print(" compile_id='%d'", compile_id());
    const char* nm_kind = compile_kind();

*** 1084,10 ***
--- 1119,51 ---
      }
    }
  }
  
  
+ void nmethod::make_deoptimized() {
+   assert (method() == NULL || can_be_deoptimized(), "");
+ 
+   CompiledICLocker ml(this);
+   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
+   ResourceMark rm;
+   RelocIterator iter(this, oops_reloc_begin());
+ 
+   while(iter.next()) {
+ 
+     switch(iter.type()) {
+       case relocInfo::virtual_call_type:
+       case relocInfo::opt_virtual_call_type: {
+         CompiledIC *ic = CompiledIC_at(&iter);
+         address pc = ic->end_of_call();
+         NativePostCallNop* nop = nativePostCallNop_at(pc);
+         if (nop != NULL) {
+           nop->make_deopt();
+         }
+         assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
+         break;
+       }
+       case relocInfo::static_call_type: {
+         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+         address pc = csc->end_of_call();
+         NativePostCallNop* nop = nativePostCallNop_at(pc);
+         //tty->print_cr(" - static pc %p", pc);
+         if (nop != NULL) {
+           nop->make_deopt();
+         }
+         // We can't assert here, there are some calls to stubs / runtime
+         // that have reloc data and doesn't have a post call NOP.
+         //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
+         break;
+       }
+       default:
+         break;
+     }
+   }
+ }
+ 
  void nmethod::verify_clean_inline_caches() {
    assert(CompiledICLocker::is_safe(this), "mt unsafe call");
  
    ResourceMark rm;
    RelocIterator iter(this, oops_reloc_begin());

*** 1133,10 ***
--- 1209,26 ---
    // Set the traversal mark to ensure that the sweeper does 2
    // cleaning passes before moving to zombie.
    set_stack_traversal_mark(NMethodSweeper::traversal_count());
  }
  
+ void nmethod::mark_as_maybe_on_continuation() {
+   assert(is_alive(), "Must be an alive method");
+   _marking_cycle = CodeCache::marking_cycle();
+ }
+ 
+ bool nmethod::is_not_on_continuation_stack() {
+   // Odd marking cycles are found during concurrent marking. Even numbers are found
+   // in nmethods that are marked when GC is inactive (e.g. nmethod entry barriers during
+   // normal execution). Therefore we align up by 2 so that nmethods encountered during
+   // concurrent marking are treated as if they were encountered in the inactive phase
+   // after that concurrent GC. Each GC increments the marking cycle twice - once when
+   // it starts and once when it ends. So we can only be sure there are no new continuations
+   // when they have not been encountered from before a GC to after a GC.
+   return CodeCache::marking_cycle() >= align_up(_marking_cycle, 2) + 2;
+ }
+ 
  // Tell if a non-entrant method can be converted to a zombie (i.e.,
  // there are no activations on the stack, not in use by the VM,
  // and not in use by the ServiceThread)
  bool nmethod::can_convert_to_zombie() {
    // Note that this is called when the sweeper has observed the nmethod to be

*** 1151,12 ***
    // count can be greater than the stack traversal count before it hits the
    // nmethod for the second time.
    // If an is_unloading() nmethod is still not_entrant, then it is not safe to
    // convert it to zombie due to GC unloading interactions. However, if it
    // has become unloaded, then it is okay to convert such nmethods to zombie.
!   return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
!          !is_locked_by_vm() && (!is_unloading() || is_unloaded());
  }
  
  void nmethod::inc_decompile_count() {
    if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
    // Could be gated by ProfileTraps, but do not bother...
--- 1243,12 ---
    // count can be greater than the stack traversal count before it hits the
    // nmethod for the second time.
    // If an is_unloading() nmethod is still not_entrant, then it is not safe to
    // convert it to zombie due to GC unloading interactions. However, if it
    // has become unloaded, then it is okay to convert such nmethods to zombie.
!   return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && is_not_on_continuation_stack() &&
!           !is_locked_by_vm() && (!is_unloading() || is_unloaded());
  }
  
  void nmethod::inc_decompile_count() {
    if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
    // Could be gated by ProfileTraps, but do not bother...

*** 1844,14 ***
    if (is_unloading()) {
      make_unloaded();
    } else {
      guarantee(unload_nmethod_caches(unloading_occurred),
                "Should not need transition stubs");
    }
  }
  
! void nmethod::oops_do(OopClosure* f, bool allow_dead) {
    // make sure the oops ready to receive visitors
    assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
  
    // Prevent extra code cache walk for platforms that don't have immediate oops.
    if (relocInfo::mustIterateImmediateOopsInCode()) {
--- 1936,18 ---
    if (is_unloading()) {
      make_unloaded();
    } else {
      guarantee(unload_nmethod_caches(unloading_occurred),
                "Should not need transition stubs");
+     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
+     if (bs_nm != NULL) {
+       bs_nm->disarm(this);
+     }
    }
  }
  
! void nmethod::oops_do(OopClosure* f, bool allow_dead, bool allow_null) {
    // make sure the oops ready to receive visitors
    assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
  
    // Prevent extra code cache walk for platforms that don't have immediate oops.
    if (relocInfo::mustIterateImmediateOopsInCode()) {

*** 1863,11 ***
          // In this loop, we must only follow those oops directly embedded in
          // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
          assert(1 == (r->oop_is_immediate()) +
                 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
                 "oop must be found in exactly one place");
!         if (r->oop_is_immediate() && r->oop_value() != NULL) {
            f->do_oop(r->oop_addr());
          }
        }
      }
    }
--- 1959,11 ---
          // In this loop, we must only follow those oops directly embedded in
          // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
          assert(1 == (r->oop_is_immediate()) +
                 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
                 "oop must be found in exactly one place");
!         if (r->oop_is_immediate() && (r->oop_value() != NULL || allow_null)) {
            f->do_oop(r->oop_addr());
          }
        }
      }
    }

*** 1878,10 ***
--- 1974,17 ---
      if (*p == Universe::non_oop_word())  continue;  // skip non-oops
      f->do_oop(p);
    }
  }
  
+ void nmethod::follow_nmethod(OopIterateClosure* cl) {
+   oops_do(cl);
+   mark_as_maybe_on_continuation();
+   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
+   bs_nm->disarm(this);
+ }
+ 
  nmethod* volatile nmethod::_oops_do_mark_nmethods;
  
  void nmethod::oops_do_log_change(const char* state) {
    LogTarget(Trace, gc, nmethod) lt;
    if (lt.is_enabled()) {
< prev index next >