< prev index next >

src/hotspot/share/code/codeCache.cpp

Print this page
*** 21,18 ***
--- 21,20 ---
   * questions.
   *
   */
  
  #include "precompiled.hpp"
+ #include "cds/cdsAccess.hpp"
  #include "code/codeBlob.hpp"
  #include "code/codeCache.hpp"
  #include "code/codeHeapState.hpp"
  #include "code/compiledIC.hpp"
  #include "code/dependencies.hpp"
  #include "code/dependencyContext.hpp"
  #include "code/nmethod.hpp"
  #include "code/pcDesc.hpp"
+ #include "code/SCCache.hpp"
  #include "compiler/compilationPolicy.hpp"
  #include "compiler/compileBroker.hpp"
  #include "compiler/compilerDefinitions.inline.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/barrierSetNMethod.hpp"

*** 169,10 ***
--- 171,12 ---
  address CodeCache::_low_bound = 0;
  address CodeCache::_high_bound = 0;
  volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
  ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
  
+ static ReservedSpace _cds_code_space;
+ 
  // Initialize arrays of CodeHeap subsets
  GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
  GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
  GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
  GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);

*** 200,11 ***
    bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
    bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
    bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
    const size_t ps           = page_size(false, 8);
    const size_t min_size     = MAX2(os::vm_allocation_granularity(), ps);
!   const size_t cache_size   = ReservedCodeCacheSize;
    size_t non_nmethod_size   = NonNMethodCodeHeapSize;
    size_t profiled_size      = ProfiledCodeHeapSize;
    size_t non_profiled_size  = NonProfiledCodeHeapSize;
    // Check if total size set via command line flags exceeds the reserved size
    check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
--- 204,11 ---
    bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
    bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
    bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
    const size_t ps           = page_size(false, 8);
    const size_t min_size     = MAX2(os::vm_allocation_granularity(), ps);
!   size_t cache_size         = ReservedCodeCacheSize;
    size_t non_nmethod_size   = NonNMethodCodeHeapSize;
    size_t profiled_size      = ProfiledCodeHeapSize;
    size_t non_profiled_size  = NonProfiledCodeHeapSize;
    // Check if total size set via command line flags exceeds the reserved size
    check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),

*** 220,11 ***
    const int c1_count = CompilationPolicy::c1_count();
    code_buffers_size += c1_count * Compiler::code_buffer_size();
  #endif
  #ifdef COMPILER2
    // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
!   const int c2_count = CompilationPolicy::c2_count();
    // Initial size of constant table (this may be increased if a compiled method needs more space)
    code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
  #endif
  
    // Increase default non_nmethod_size to account for compiler buffers
--- 224,11 ---
    const int c1_count = CompilationPolicy::c1_count();
    code_buffers_size += c1_count * Compiler::code_buffer_size();
  #endif
  #ifdef COMPILER2
    // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
!   const int c2_count = CompilationPolicy::c2_count() + CompilationPolicy::c3_count();
    // Initial size of constant table (this may be increased if a compiled method needs more space)
    code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
  #endif
  
    // Increase default non_nmethod_size to account for compiler buffers

*** 327,22 ***
    // page size. This ensures that the code cache is covered by large pages.
    non_nmethod_size = align_up(non_nmethod_size, min_size);
    profiled_size    = align_down(profiled_size, min_size);
    non_profiled_size = align_down(non_profiled_size, min_size);
  
    // Reserve one continuous chunk of memory for CodeHeaps and split it into
    // parts for the individual heaps. The memory layout looks like this:
    // ---------- high -----------
    //    Non-profiled nmethods
    //         Non-nmethods
    //      Profiled nmethods
    // ---------- low ------------
    ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
!   ReservedSpace profiled_space      = rs.first_part(profiled_size);
!   ReservedSpace rest                = rs.last_part(profiled_size);
!   ReservedSpace non_method_space    = rest.first_part(non_nmethod_size);
!   ReservedSpace non_profiled_space  = rest.last_part(non_nmethod_size);
  
    // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
    LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
  
    // Non-nmethods (stubs, adapters, ...)
--- 331,27 ---
    // page size. This ensures that the code cache is covered by large pages.
    non_nmethod_size = align_up(non_nmethod_size, min_size);
    profiled_size    = align_down(profiled_size, min_size);
    non_profiled_size = align_down(non_profiled_size, min_size);
  
+   const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
+   cache_size += cds_code_size;
+ 
    // Reserve one continuous chunk of memory for CodeHeaps and split it into
    // parts for the individual heaps. The memory layout looks like this:
    // ---------- high -----------
    //    Non-profiled nmethods
    //         Non-nmethods
    //      Profiled nmethods
    // ---------- low ------------
    ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
!   _cds_code_space                   = rs.first_part(cds_code_size);
!   ReservedSpace rest                = rs.last_part(cds_code_size);
!   ReservedSpace profiled_space      = rest.first_part(profiled_size);
!   ReservedSpace rest2               = rest.last_part(profiled_size);
+   ReservedSpace non_method_space    = rest2.first_part(non_nmethod_size);
+   ReservedSpace non_profiled_space  = rest2.last_part(non_nmethod_size);
  
    // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
    LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
  
    // Non-nmethods (stubs, adapters, ...)

*** 351,10 ***
--- 360,18 ---
    add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
    // Tier 1 and tier 4 (non-profiled) methods and native methods
    add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
  }
  
+ void* CodeCache::map_cached_code() {
+   if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
+     return _cds_code_space.base();
+   } else {
+     return nullptr;
+   }
+ }
+ 
  size_t CodeCache::page_size(bool aligned, size_t min_pages) {
    return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
                     os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
  }
  

*** 1224,15 ***
          DependencySignature* current_sig = new DependencySignature(deps);
  
          // Determine if dependency is already checked. table->put(...) returns
          // 'true' if the dependency is added (i.e., was not in the hashtable).
          if (table->put(*current_sig, 1)) {
!           if (deps.check_dependency() != nullptr) {
              // Dependency checking failed. Print out information about the failed
              // dependency and finally fail with an assert. We can fail here, since
              // dependency checking is never done in a product build.
!             tty->print_cr("Failed dependency:");
              changes.print();
              nm->print();
              nm->print_dependencies_on(tty);
              assert(false, "Should have been marked for deoptimization");
            }
--- 1241,16 ---
          DependencySignature* current_sig = new DependencySignature(deps);
  
          // Determine if dependency is already checked. table->put(...) returns
          // 'true' if the dependency is added (i.e., was not in the hashtable).
          if (table->put(*current_sig, 1)) {
!           Klass* witness = deps.check_dependency();
+           if (witness != nullptr) {
              // Dependency checking failed. Print out information about the failed
              // dependency and finally fail with an assert. We can fail here, since
              // dependency checking is never done in a product build.
!             deps.print_dependency(tty, witness, true);
              changes.print();
              nm->print();
              nm->print_dependencies_on(tty);
              assert(false, "Should have been marked for deoptimization");
            }

*** 1254,10 ***
--- 1272,17 ---
    // nmethod::check_all_dependencies works only correctly, if no safepoint
    // can happen
    NoSafepointVerifier nsv;
    for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
      InstanceKlass* d = str.klass();
+     {
+       LogStreamHandle(Trace, dependencies) log;
+       if (log.is_enabled()) {
+         log.print("Processing context ");
+         d->name()->print_value_on(&log);
+       }
+     }
      d->mark_dependent_nmethods(deopt_scope, changes);
    }
  
  #ifndef PRODUCT
    if (VerifyDependencies) {

*** 1518,10 ***
--- 1543,89 ---
    tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
    tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
    tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
  }
  
+ static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
+   if (total > 0) {
+     double ratio = (100.0 * used) / total;
+     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
+   }
+ }
+ 
+ void CodeCache::print_nmethod_statistics_on(outputStream* st) {
+   int stats     [2][6][3][2] = {0};
+   int stats_used[2][6][3][2] = {0};
+ 
+   int total_osr = 0;
+   int total_entrant = 0;
+   int total_non_entrant = 0;
+   int total_other = 0;
+   int total_used = 0;
+ 
+   NMethodIterator iter(NMethodIterator::all_blobs);
+   while (iter.next()) {
+     nmethod* nm = iter.method();
+     if (nm->is_in_use()) {
+       ++total_entrant;
+     } else if (nm->is_not_entrant()) {
+       ++total_non_entrant;
+     } else {
+       ++total_other;
+     }
+     if (nm->is_osr_method()) {
+       ++total_osr;
+     }
+     if (nm->used()) {
+       ++total_used;
+     }
+     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
+ 
+     int idx1 = nm->is_scc() ? 1 : 0;
+     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
+     int idx3 = (nm->is_in_use()      ? 0 :
+                (nm->is_not_entrant() ? 1 :
+                                        2));
+     int idx4 = (nm->is_osr_method() ? 1 : 0);
+     stats[idx1][idx2][idx3][idx4] += 1;
+     if (nm->used()) {
+       stats_used[idx1][idx2][idx3][idx4] += 1;
+     }
+   }
+ 
+   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
+                total_entrant + total_non_entrant + total_other,
+                total_entrant, total_non_entrant, total_osr);
+   if (total_other > 0) {
+     st->print("; %d other", total_other);
+   }
+   st->print_cr(")");
+ 
+   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
+     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
+     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
+     if (total_normal + total_osr > 0) {
+       st->print("  Tier%d:", i);
+       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
+       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
+       st->cr();
+     }
+   }
+   st->cr();
+   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
+     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
+     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
+     assert(total_osr == 0, "sanity");
+     if (total_normal + total_osr > 0) {
+       st->print("  SC T%d:", i);
+       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
+       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
+       st->cr();
+     }
+   }
+ }
+ 
  //------------------------------------------------------------------------------------------------
  // Non-product version
  
  #ifndef PRODUCT
  

*** 1551,24 ***
    int max_nm_size = 0;
    ResourceMark rm;
  
    int i = 0;
    FOR_ALL_ALLOCABLE_HEAPS(heap) {
!     if ((_nmethod_heaps->length() >= 1) && Verbose) {
!       tty->print_cr("-- %s --", (*heap)->name());
-     }
      FOR_ALL_BLOBS(cb, *heap) {
        total++;
        if (cb->is_nmethod()) {
          nmethod* nm = (nmethod*)cb;
  
!         if (Verbose && nm->method() != nullptr) {
!           ResourceMark rm;
-           char *method_name = nm->method()->name_and_sig_as_C_string();
-           tty->print("%s", method_name);
-           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
-         }
  
          nmethodCount++;
  
          if(nm->is_not_entrant()) { nmethodNotEntrant++; }
          if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
--- 1655,20 ---
    int max_nm_size = 0;
    ResourceMark rm;
  
    int i = 0;
    FOR_ALL_ALLOCABLE_HEAPS(heap) {
!     int heap_total = 0;
!     tty->print_cr("-- %s --", (*heap)->name());
      FOR_ALL_BLOBS(cb, *heap) {
        total++;
+       heap_total++;
        if (cb->is_nmethod()) {
          nmethod* nm = (nmethod*)cb;
  
!         tty->print("%4d: ", heap_total);
!         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
  
          nmethodCount++;
  
          if(nm->is_not_entrant()) { nmethodNotEntrant++; }
          if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }

*** 1726,10 ***
--- 1826,29 ---
    }
  
  #endif // !PRODUCT
  }
  
+ void CodeCache::print_nmethods_on(outputStream* st) {
+   ResourceMark rm;
+   int i = 0;
+   FOR_ALL_ALLOCABLE_HEAPS(heap) {
+     st->print_cr("-- %s --", (*heap)->name());
+     FOR_ALL_BLOBS(cb, *heap) {
+       i++;
+       if (cb->is_nmethod()) {
+         nmethod* nm = (nmethod*)cb;
+         st->print("%4d: ", i);
+         CompileTask::print(st, nm, nullptr, true, false);
+ 
+         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
+         st->print_cr(" %c", non_entrant_char);
+       }
+     }
+   }
+ }
+ 
  void CodeCache::print_summary(outputStream* st, bool detailed) {
    int full_count = 0;
    julong total_used = 0;
    julong total_max_used = 0;
    julong total_free = 0;
< prev index next >