< prev index next >

src/hotspot/share/oops/methodData.cpp

Print this page
*** 20,11 ***
--- 20,13 ---
   * or visit www.oracle.com if you need additional information or have any
   * questions.
   *
   */
  
+ #include "cds/cdsConfig.hpp"
  #include "ci/ciMethodData.hpp"
+ #include "classfile/systemDictionaryShared.hpp"
  #include "classfile/vmSymbols.hpp"
  #include "compiler/compilationPolicy.hpp"
  #include "compiler/compilerDefinitions.inline.hpp"
  #include "compiler/compilerOracle.hpp"
  #include "interpreter/bytecode.hpp"

*** 317,28 ***
      assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
      _ret.post_initialize();
    }
  }
  
  void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
    for (int i = 0; i < _number_of_entries; i++) {
      intptr_t p = type(i);
      Klass* k = (Klass*)klass_part(p);
!     if (k != nullptr && (always_clean || !k->is_loader_alive())) {
!       set_type(i, with_status((Klass*)nullptr, p));
      }
    }
  }
  
  void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
    intptr_t p = type();
    Klass* k = (Klass*)klass_part(p);
!   if (k != nullptr && (always_clean || !k->is_loader_alive())) {
!     set_type(with_status((Klass*)nullptr, p));
    }
  }
  
  bool TypeEntriesAtCall::return_profiling_enabled() {
    return MethodData::profile_return();
  }
  
  bool TypeEntriesAtCall::arguments_profiling_enabled() {
--- 319,72 ---
      assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
      _ret.post_initialize();
    }
  }
  
+ static bool is_excluded(Klass* k) {
+ #if INCLUDE_CDS
+   if (SafepointSynchronize::is_at_safepoint() &&
+       CDSConfig::is_dumping_archive() &&
+       CDSConfig::current_thread_is_vm_or_dumper()) {
+     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
+       log_debug(cds)("Purged %s from MDO: unloaded class", k->name()->as_C_string());
+       return true;
+     } else if (CDSConfig::is_dumping_dynamic_archive() && k->is_shared()) {
+       return false;
+     } else {
+       bool excluded = SystemDictionaryShared::should_be_excluded(k);
+       if (excluded) {
+         log_debug(cds)("Purged %s from MDO: excluded class", k->name()->as_C_string());
+       }
+       return excluded;
+     }
+   }
+ #endif
+   return false;
+ }
+ 
  void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
    for (int i = 0; i < _number_of_entries; i++) {
      intptr_t p = type(i);
      Klass* k = (Klass*)klass_part(p);
!     if (k != nullptr) {
!       if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
+         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
+       }
+       if (always_clean || !k->is_loader_alive() || is_excluded(k)) {
+         set_type(i, with_status((Klass*)nullptr, p));
+       }
      }
    }
  }
  
+ void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
+   for (int i = 0; i < _number_of_entries; i++) {
+     Klass** k = (Klass**)type_adr(i); // tagged
+     it->push(k);
+   }
+ }
+ 
  void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
    intptr_t p = type();
    Klass* k = (Klass*)klass_part(p);
!   if (k != nullptr) {
!     if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
+       return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
+     }
+     if (always_clean || !k->is_loader_alive() || is_excluded(k)) {
+       set_type(with_status((Klass*)nullptr, p));
+     }
    }
  }
  
+ void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+   Klass** k = (Klass**)type_adr(); // tagged
+   it->push(k);
+ }
+ 
  bool TypeEntriesAtCall::return_profiling_enabled() {
    return MethodData::profile_return();
  }
  
  bool TypeEntriesAtCall::arguments_profiling_enabled() {

*** 410,16 ***
  // which are used to store a type profile for the receiver of the check.
  
  void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
      for (uint row = 0; row < row_limit(); row++) {
      Klass* p = receiver(row);
!     if (p != nullptr && (always_clean || !p->is_loader_alive())) {
!       clear_row(row);
      }
    }
  }
  
  void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
    uint row;
    int entries = 0;
    for (row = 0; row < row_limit(); row++) {
      if (receiver(row) != nullptr)  entries++;
--- 456,28 ---
  // which are used to store a type profile for the receiver of the check.
  
  void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
      for (uint row = 0; row < row_limit(); row++) {
      Klass* p = receiver(row);
!     if (p != nullptr) {
!       if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
+         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
+       }
+       if (always_clean || !p->is_loader_alive() || is_excluded(p)) {
+         clear_row(row);
+       }
      }
    }
  }
  
+ void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
+   for (uint row = 0; row < row_limit(); row++) {
+     Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
+     it->push(recv);
+   }
+ }
+ 
  void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
    uint row;
    int entries = 0;
    for (row = 0; row < row_limit(); row++) {
      if (receiver(row) != nullptr)  entries++;

*** 644,10 ***
--- 702,15 ---
    tab(st);
    _parameters.print_data_on(st);
    st->cr();
  }
  
+ void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
+   Method** m = (Method**)intptr_at_adr(speculative_trap_method);
+   it->push(m);
+ }
+ 
  void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
    print_shared(st, "SpeculativeTrapData", extra);
    tab(st);
    method()->print_short_name(st);
    st->cr();

*** 1221,14 ***
  
  // Initialize the MethodData* corresponding to a given method.
  MethodData::MethodData(const methodHandle& method)
    : _method(method()),
      // Holds Compile_lock
-     _extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"),
      _compiler_counters(),
      _parameters_type_data_di(parameters_uninitialized) {
!   initialize();
  }
  
  // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
  // not being accessed while the contents are being rewritten.
  class VM_ReinitializeMDO: public VM_Operation {
--- 1284,18 ---
  
  // Initialize the MethodData* corresponding to a given method.
  MethodData::MethodData(const methodHandle& method)
    : _method(method()),
      // Holds Compile_lock
      _compiler_counters(),
      _parameters_type_data_di(parameters_uninitialized) {
!     _extra_data_lock = nullptr;
+     initialize();
+ }
+ 
+ MethodData::MethodData() {
+   assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
  }
  
  // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
  // not being accessed while the contents are being rewritten.
  class VM_ReinitializeMDO: public VM_Operation {

*** 1362,11 ***
    // Initialize escape flags.
    clear_escape_info();
  }
  
  bool MethodData::is_mature() const {
!   return CompilationPolicy::is_mature(_method);
  }
  
  // Translate a bci to its corresponding data index (di).
  address MethodData::bci_to_dp(int bci) {
    ResourceMark rm;
--- 1429,11 ---
    // Initialize escape flags.
    clear_escape_info();
  }
  
  bool MethodData::is_mature() const {
!   return CompilationPolicy::is_mature((MethodData*)this);
  }
  
  // Translate a bci to its corresponding data index (di).
  address MethodData::bci_to_dp(int bci) {
    ResourceMark rm;

*** 1550,11 ***
    st->print("method data for ");
    method()->print_value_on(st);
  }
  
  void MethodData::print_data_on(outputStream* st) const {
!   ConditionalMutexLocker ml(extra_data_lock(), !extra_data_lock()->owned_by_self(),
                              Mutex::_no_safepoint_check_flag);
    ResourceMark rm;
    ProfileData* data = first_data();
    if (_parameters_type_data_di != no_parameters) {
      parameters_type_data()->print_data_on(st);
--- 1617,12 ---
    st->print("method data for ");
    method()->print_value_on(st);
  }
  
  void MethodData::print_data_on(outputStream* st) const {
!   Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
+   ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
                              Mutex::_no_safepoint_check_flag);
    ResourceMark rm;
    ProfileData* data = first_data();
    if (_parameters_type_data_di != no_parameters) {
      parameters_type_data()->print_data_on(st);

*** 1723,12 ***
    assert(profile_parameters_jsr292_only(), "inconsistent");
    return m->is_compiled_lambda_form();
  }
  
  void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
!   log_trace(cds)("Iter(MethodData): %p", this);
    it->push(&_method);
  }
  
  void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
    check_extra_data_locked();
  
--- 1791,30 ---
    assert(profile_parameters_jsr292_only(), "inconsistent");
    return m->is_compiled_lambda_form();
  }
  
  void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
!   log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
    it->push(&_method);
+   if (_parameters_type_data_di != no_parameters) {
+     parameters_type_data()->metaspace_pointers_do(it);
+   }
+   for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
+     data->metaspace_pointers_do(it);
+   }
+   for (DataLayout* dp = extra_data_base();
+                    dp < extra_data_limit();
+                    dp = MethodData::next_extra(dp)) {
+     if (dp->tag() == DataLayout::speculative_trap_data_tag) {
+       ResourceMark rm;
+       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+       data->metaspace_pointers_do(it);
+     } else if (dp->tag() == DataLayout::no_tag ||
+                dp->tag() == DataLayout::arg_info_data_tag) {
+       break;
+     }
+   }
  }
  
  void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
    check_extra_data_locked();
  

*** 1756,10 ***
--- 1842,13 ---
  class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
    bool _always_clean;
  public:
    CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
    bool is_live(Method* m) {
+     if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
+       return true; // TODO: treat as unloaded instead?
+     }
      return !(_always_clean) && m->method_holder()->is_loader_alive();
    }
  };
  
  // Check for entries that reference a redefined method

*** 1767,10 ***
--- 1856,24 ---
  public:
    CleanExtraDataMethodClosure() {}
    bool is_live(Method* m) { return !m->is_old(); }
  };
  
+ Mutex* MethodData::extra_data_lock() {
+   Mutex* lock = Atomic::load(&_extra_data_lock);
+   if (lock == nullptr) {
+     // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint
+     lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock");
+     Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
+     if (old != nullptr) {
+       // Another thread created the lock before us. Use that lock instead.
+       delete lock;
+       return old;
+     }
+   }
+   return lock;
+ }
  
  // Remove SpeculativeTrapData entries that reference an unloaded or
  // redefined method
  void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
    check_extra_data_locked();

*** 1783,11 ***
      switch(dp->tag()) {
      case DataLayout::speculative_trap_data_tag: {
        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
        Method* m = data->method();
        assert(m != nullptr, "should have a method");
!       if (!cl->is_live(m)) {
          // "shift" accumulates the number of cells for dead
          // SpeculativeTrapData entries that have been seen so
          // far. Following entries must be shifted left by that many
          // cells to remove the dead SpeculativeTrapData entries.
          shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
--- 1886,11 ---
      switch(dp->tag()) {
      case DataLayout::speculative_trap_data_tag: {
        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
        Method* m = data->method();
        assert(m != nullptr, "should have a method");
!       if (is_excluded(m->method_holder()) || !cl->is_live(m)) {
          // "shift" accumulates the number of cells for dead
          // SpeculativeTrapData entries that have been seen so
          // far. Following entries must be shifted left by that many
          // cells to remove the dead SpeculativeTrapData entries.
          shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);

*** 1887,17 ***
  #if INCLUDE_JVMCI
    FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
  #endif
  }
  
  #ifdef ASSERT
  void MethodData::check_extra_data_locked() const {
      // Cast const away, just to be able to verify the lock
      // Usually we only want non-const accesses on the lock,
      // so this here is an exception.
      MethodData* self = (MethodData*)this;
!     assert(self->extra_data_lock()->owned_by_self(), "must have lock");
      assert(!Thread::current()->is_Java_thread() ||
             JavaThread::current()->is_in_no_safepoint_scope(),
             "JavaThread must have NoSafepointVerifier inside lock scope");
  }
  #endif
--- 1990,27 ---
  #if INCLUDE_JVMCI
    FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
  #endif
  }
  
+ #if INCLUDE_CDS
+ void MethodData::remove_unshareable_info() {
+   _extra_data_lock = nullptr;
+ }
+ 
+ void MethodData::restore_unshareable_info(TRAPS) {
+   //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
+ }
+ #endif // INCLUDE_CDS
+        
  #ifdef ASSERT
  void MethodData::check_extra_data_locked() const {
      // Cast const away, just to be able to verify the lock
      // Usually we only want non-const accesses on the lock,
      // so this here is an exception.
      MethodData* self = (MethodData*)this;
!     assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
      assert(!Thread::current()->is_Java_thread() ||
             JavaThread::current()->is_in_no_safepoint_scope(),
             "JavaThread must have NoSafepointVerifier inside lock scope");
  }
  #endif
< prev index next >