1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/cdsAccess.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "code/SCCache.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "compiler/oopMap.hpp"
  39 #include "gc/shared/barrierSetNMethod.hpp"
  40 #include "gc/shared/classUnloadingContext.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "jfr/jfrEvents.hpp"
  43 #include "jvm_io.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/iterator.hpp"
  48 #include "memory/memoryReserver.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/objArrayOop.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "oops/verifyOopClosure.hpp"
  55 #include "runtime/arguments.hpp"
  56 #include "runtime/atomic.hpp"
  57 #include "runtime/deoptimization.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/icache.hpp"
  61 #include "runtime/init.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/mutexLocker.hpp"
  64 #include "runtime/os.inline.hpp"
  65 #include "runtime/safepointVerifiers.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "sanitizers/leak.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/vmError.hpp"
  71 #include "utilities/xmlstream.hpp"
  72 #ifdef COMPILER1
  73 #include "c1/c1_Compilation.hpp"
  74 #include "c1/c1_Compiler.hpp"
  75 #endif
  76 #ifdef COMPILER2
  77 #include "opto/c2compiler.hpp"
  78 #include "opto/compile.hpp"
  79 #include "opto/node.hpp"
  80 #endif
  81 
  82 // Helper class for printing in CodeCache
  83 class CodeBlob_sizes {
  84  private:
  85   int count;
  86   int total_size;
  87   int header_size;
  88   int code_size;
  89   int stub_size;
  90   int relocation_size;
  91   int scopes_oop_size;
  92   int scopes_metadata_size;
  93   int scopes_data_size;
  94   int scopes_pcs_size;
  95 
  96  public:
  97   CodeBlob_sizes() {
  98     count            = 0;
  99     total_size       = 0;
 100     header_size      = 0;
 101     code_size        = 0;
 102     stub_size        = 0;
 103     relocation_size  = 0;
 104     scopes_oop_size  = 0;
 105     scopes_metadata_size  = 0;
 106     scopes_data_size = 0;
 107     scopes_pcs_size  = 0;
 108   }
 109 
 110   int total() const                              { return total_size; }
 111   bool is_empty() const                          { return count == 0; }
 112 
 113   void print(const char* title) const {
 114     if (is_empty()) {
 115       tty->print_cr(" #%d %s = %dK",
 116                     count,
 117                     title,
 118                     total()                 / (int)K);
 119     } else {
 120       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 121                     count,
 122                     title,
 123                     total()                 / (int)K,
 124                     header_size             / (int)K,
 125                     header_size             * 100 / total_size,
 126                     relocation_size         / (int)K,
 127                     relocation_size         * 100 / total_size,
 128                     code_size               / (int)K,
 129                     code_size               * 100 / total_size,
 130                     stub_size               / (int)K,
 131                     stub_size               * 100 / total_size,
 132                     scopes_oop_size         / (int)K,
 133                     scopes_oop_size         * 100 / total_size,
 134                     scopes_metadata_size    / (int)K,
 135                     scopes_metadata_size    * 100 / total_size,
 136                     scopes_data_size        / (int)K,
 137                     scopes_data_size        * 100 / total_size,
 138                     scopes_pcs_size         / (int)K,
 139                     scopes_pcs_size         * 100 / total_size);
 140     }
 141   }
 142 
 143   void add(CodeBlob* cb) {
 144     count++;
 145     total_size       += cb->size();
 146     header_size      += cb->header_size();
 147     relocation_size  += cb->relocation_size();
 148     if (cb->is_nmethod()) {
 149       nmethod* nm = cb->as_nmethod_or_null();
 150       code_size        += nm->insts_size();
 151       stub_size        += nm->stub_size();
 152 
 153       scopes_oop_size  += nm->oops_size();
 154       scopes_metadata_size  += nm->metadata_size();
 155       scopes_data_size += nm->scopes_data_size();
 156       scopes_pcs_size  += nm->scopes_pcs_size();
 157     } else {
 158       code_size        += cb->code_size();
 159     }
 160   }
 161 };
 162 
 163 // Iterate over all CodeHeaps
 164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 165 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 166 
 167 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 168 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 169 
 170 address CodeCache::_low_bound = nullptr;
 171 address CodeCache::_high_bound = nullptr;
 172 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 173 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 174 
 175 static ReservedSpace _cds_code_space;
 176 
 177 // Initialize arrays of CodeHeap subsets
 178 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 181 
 182 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 183   if (size < required_size) {
 184     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 185                          codeheap, size/K, required_size/K);
 186     err_msg title("Not enough space in %s to run VM", codeheap);
 187     err_msg message("%zuK < %zuK", size/K, required_size/K);
 188     vm_exit_during_initialization(title, message);
 189   }
 190 }
 191 
 192 struct CodeHeapInfo {
 193   size_t size;
 194   bool set;
 195   bool enabled;
 196 };
 197 
 198 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 199   assert(!heap->set, "sanity");
 200   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 201 }
 202 
 203 void CodeCache::initialize_heaps() {
 204   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 205   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 206   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 207 
 208   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 209   const size_t ps             = page_size(false, 8);
 210   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 211   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 212   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 213 
 214   // Prerequisites
 215   if (!heap_available(CodeBlobType::MethodProfiled)) {
 216     // For compatibility reasons, disabled tiered compilation overrides
 217     // segment size even if it is set explicitly.
 218     non_profiled.size += profiled.size;
 219     // Profiled code heap is not available, forcibly set size to 0
 220     profiled.size = 0;
 221     profiled.set = true;
 222     profiled.enabled = false;
 223   }
 224 
 225   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 226 
 227   size_t compiler_buffer_size = 0;
 228   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 229   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 230   COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size());
 231 
 232   if (!non_nmethod.set) {
 233     non_nmethod.size += compiler_buffer_size;
 234     // Further down, just before FLAG_SET_ERGO(), all segment sizes are
 235     // aligned down to the next lower multiple of min_size. For large page
 236     // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
 237     // Therefore, force non_nmethod.size to at least min_size.
 238     non_nmethod.size = MAX2(non_nmethod.size, min_size);
 239   }
 240 
 241   if (!profiled.set && !non_profiled.set) {
 242     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 243                                         (cache_size - non_nmethod.size) / 2 : min_size;
 244   }
 245 
 246   if (profiled.set && !non_profiled.set) {
 247     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 248   }
 249 
 250   if (!profiled.set && non_profiled.set) {
 251     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 252   }
 253 
 254   // Compatibility.
 255   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 256   if (!non_nmethod.set && profiled.set && non_profiled.set) {
 257     set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
 258   }
 259 
 260   size_t total = non_nmethod.size + profiled.size + non_profiled.size;
 261   if (total != cache_size && !cache_size_set) {
 262     log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
 263                         "%zuK NonProfiled %zuK Profiled %zuK = %zuK",
 264                         cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
 265     // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
 266     cache_size = total;
 267   }
 268 
 269   log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK"
 270                        " NonProfiled %zuK Profiled %zuK",
 271                        cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
 272 
 273   // Validation
 274   // Check minimal required sizes
 275   check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
 276   if (profiled.enabled) {
 277     check_min_size("profiled code heap", profiled.size, min_size);
 278   }
 279   if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
 280     check_min_size("non-profiled code heap", non_profiled.size, min_size);
 281   }
 282   if (cache_size_set) {
 283     check_min_size("reserved code cache", cache_size, min_cache_size);
 284   }
 285 
 286   // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
 287   if (total != cache_size && cache_size_set) {
 288     err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K);
 289     if (profiled.enabled) {
 290       message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K);
 291     }
 292     if (non_profiled.enabled) {
 293       message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K);
 294     }
 295     message.append(" = %zuK", total/K);
 296     message.append((total > cache_size) ? " is greater than " : " is less than ");
 297     message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
 298 
 299     vm_exit_during_initialization("Invalid code heap sizes", message);
 300   }
 301 
 302   // Compatibility. Print warning if using large pages but not able to use the size given
 303   if (UseLargePages) {
 304     const size_t lg_ps = page_size(false, 1);
 305     if (ps < lg_ps) {
 306       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 307                              "Reverting to smaller page size (" PROPERFMT ").",
 308                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 309     }
 310   }
 311 
 312   // Note: if large page support is enabled, min_size is at least the large
 313   // page size. This ensures that the code cache is covered by large pages.
 314   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 315   non_profiled.size += profiled.size & alignment_mask(min_size);
 316   non_nmethod.size = align_down(non_nmethod.size, min_size);
 317   profiled.size = align_down(profiled.size, min_size);
 318   non_profiled.size = align_down(non_profiled.size, min_size);
 319 
 320   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 321   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 322   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 323   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 324 
 325   const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
 326   cache_size += cds_code_size;
 327 
 328   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 329 
 330   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 331   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 332 
 333   size_t offset = 0;
 334   if (cds_code_size > 0) {
 335     // FIXME: use CodeHeapInfo for this hack ...
 336     _cds_code_space = rs.partition(offset, cds_code_size);
 337     offset += cds_code_size;
 338   }
 339 
 340   if (profiled.enabled) {
 341     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 342     offset += profiled.size;
 343     // Tier 2 and tier 3 (profiled) methods
 344     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 345   }
 346 
 347   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 348   offset += non_nmethod.size;
 349   // Non-nmethods (stubs, adapters, ...)
 350   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 351 
 352   if (non_profiled.enabled) {
 353     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 354     // Tier 1 and tier 4 (non-profiled) methods and native methods
 355     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 356   }
 357 }
 358 
 359 void* CodeCache::map_cached_code() {
 360   if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
 361     return _cds_code_space.base();
 362   } else {
 363     return nullptr;
 364   }
 365 }
 366 
 367 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 368   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 369                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 370 }
 371 
 372 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 373   // Align and reserve space for code cache
 374   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 375   const size_t rs_size = align_up(size, rs_align);
 376 
 377   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 378   if (!rs.is_reserved()) {
 379     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 380                                           rs_size/K));
 381   }
 382 
 383   // Initialize bounds
 384   _low_bound = (address)rs.base();
 385   _high_bound = _low_bound + rs.size();
 386   return rs;
 387 }
 388 
 389 // Heaps available for allocation
 390 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 391   if (!SegmentedCodeCache) {
 392     // No segmentation: use a single code heap
 393     return (code_blob_type == CodeBlobType::All);
 394   } else if (CompilerConfig::is_interpreter_only()) {
 395     // Interpreter only: we don't need any method code heaps
 396     return (code_blob_type == CodeBlobType::NonNMethod);
 397   } else if (CompilerConfig::is_c1_profiling()) {
 398     // Tiered compilation: use all code heaps
 399     return (code_blob_type < CodeBlobType::All);
 400   } else {
 401     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 402     return (code_blob_type == CodeBlobType::NonNMethod) ||
 403            (code_blob_type == CodeBlobType::MethodNonProfiled);
 404   }
 405 }
 406 
 407 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 408   switch(code_blob_type) {
 409   case CodeBlobType::NonNMethod:
 410     return "NonNMethodCodeHeapSize";
 411     break;
 412   case CodeBlobType::MethodNonProfiled:
 413     return "NonProfiledCodeHeapSize";
 414     break;
 415   case CodeBlobType::MethodProfiled:
 416     return "ProfiledCodeHeapSize";
 417     break;
 418   default:
 419     ShouldNotReachHere();
 420     return nullptr;
 421   }
 422 }
 423 
 424 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 425   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 426     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 427   } else {
 428     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 429   }
 430 }
 431 
 432 void CodeCache::add_heap(CodeHeap* heap) {
 433   assert(!Universe::is_fully_initialized(), "late heap addition?");
 434 
 435   _heaps->insert_sorted<code_heap_compare>(heap);
 436 
 437   CodeBlobType type = heap->code_blob_type();
 438   if (code_blob_type_accepts_nmethod(type)) {
 439     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 440   }
 441   if (code_blob_type_accepts_allocable(type)) {
 442     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 443   }
 444 }
 445 
 446 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 447   // Check if heap is needed
 448   if (!heap_available(code_blob_type)) {
 449     return;
 450   }
 451 
 452   // Create CodeHeap
 453   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 454   add_heap(heap);
 455 
 456   // Reserve Space
 457   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 458   size_initial = align_up(size_initial, rs.page_size());
 459   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 460     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)",
 461                                           heap->name(), size_initial/K));
 462   }
 463 
 464   // Register the CodeHeap
 465   MemoryService::add_code_heap_memory_pool(heap, name);
 466 }
 467 
 468 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 469   FOR_ALL_HEAPS(heap) {
 470     if ((*heap)->contains(start)) {
 471       return *heap;
 472     }
 473   }
 474   return nullptr;
 475 }
 476 
 477 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 478   assert(cb != nullptr, "CodeBlob is null");
 479   FOR_ALL_HEAPS(heap) {
 480     if ((*heap)->contains(cb)) {
 481       return *heap;
 482     }
 483   }
 484   ShouldNotReachHere();
 485   return nullptr;
 486 }
 487 
 488 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 489   FOR_ALL_HEAPS(heap) {
 490     if ((*heap)->accepts(code_blob_type)) {
 491       return *heap;
 492     }
 493   }
 494   return nullptr;
 495 }
 496 
 497 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 498   assert_locked_or_safepoint(CodeCache_lock);
 499   assert(heap != nullptr, "heap is null");
 500   return (CodeBlob*)heap->first();
 501 }
 502 
 503 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 504   if (heap_available(code_blob_type)) {
 505     return first_blob(get_code_heap(code_blob_type));
 506   } else {
 507     return nullptr;
 508   }
 509 }
 510 
 511 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 512   assert_locked_or_safepoint(CodeCache_lock);
 513   assert(heap != nullptr, "heap is null");
 514   return (CodeBlob*)heap->next(cb);
 515 }
 516 
 517 /**
 518  * Do not seize the CodeCache lock here--if the caller has not
 519  * already done so, we are going to lose bigtime, since the code
 520  * cache will contain a garbage CodeBlob until the caller can
 521  * run the constructor for the CodeBlob subclass he is busy
 522  * instantiating.
 523  */
 524 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 525   assert_locked_or_safepoint(CodeCache_lock);
 526   assert(size > 0, "Code cache allocation request must be > 0");
 527   if (size == 0) {
 528     return nullptr;
 529   }
 530   CodeBlob* cb = nullptr;
 531 
 532   // Get CodeHeap for the given CodeBlobType
 533   CodeHeap* heap = get_code_heap(code_blob_type);
 534   assert(heap != nullptr, "heap is null");
 535 
 536   while (true) {
 537     cb = (CodeBlob*)heap->allocate(size);
 538     if (cb != nullptr) break;
 539     if (!heap->expand_by(CodeCacheExpansionSize)) {
 540       // Save original type for error reporting
 541       if (orig_code_blob_type == CodeBlobType::All) {
 542         orig_code_blob_type = code_blob_type;
 543       }
 544       // Expansion failed
 545       if (SegmentedCodeCache) {
 546         // Fallback solution: Try to store code in another code heap.
 547         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 548         CodeBlobType type = code_blob_type;
 549         switch (type) {
 550         case CodeBlobType::NonNMethod:
 551           type = CodeBlobType::MethodNonProfiled;
 552           break;
 553         case CodeBlobType::MethodNonProfiled:
 554           type = CodeBlobType::MethodProfiled;
 555           break;
 556         case CodeBlobType::MethodProfiled:
 557           // Avoid loop if we already tried that code heap
 558           if (type == orig_code_blob_type) {
 559             type = CodeBlobType::MethodNonProfiled;
 560           }
 561           break;
 562         default:
 563           break;
 564         }
 565         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 566           if (PrintCodeCacheExtension) {
 567             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 568                           heap->name(), get_code_heap(type)->name());
 569           }
 570           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 571         }
 572       }
 573       if (handle_alloc_failure) {
 574         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 575         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 576       }
 577       return nullptr;
 578     } else {
 579       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 580     }
 581     if (PrintCodeCacheExtension) {
 582       ResourceMark rm;
 583       if (_nmethod_heaps->length() >= 1) {
 584         tty->print("%s", heap->name());
 585       } else {
 586         tty->print("CodeCache");
 587       }
 588       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
 589                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 590                     (address)heap->high() - (address)heap->low_boundary());
 591     }
 592   }
 593   print_trace("allocation", cb, size);
 594   return cb;
 595 }
 596 
 597 void CodeCache::free(CodeBlob* cb) {
 598   assert_locked_or_safepoint(CodeCache_lock);
 599   CodeHeap* heap = get_code_heap(cb);
 600   print_trace("free", cb);
 601   if (cb->is_nmethod()) {
 602     heap->set_nmethod_count(heap->nmethod_count() - 1);
 603     if (((nmethod *)cb)->has_dependencies()) {
 604       Atomic::dec(&_number_of_nmethods_with_dependencies);
 605     }
 606   }
 607   if (cb->is_adapter_blob()) {
 608     heap->set_adapter_count(heap->adapter_count() - 1);
 609   }
 610 
 611   cb->~CodeBlob();
 612   // Get heap for given CodeBlob and deallocate
 613   heap->deallocate(cb);
 614 
 615   assert(heap->blob_count() >= 0, "sanity check");
 616 }
 617 
 618 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 619   assert_locked_or_safepoint(CodeCache_lock);
 620   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 621   print_trace("free_unused_tail", cb);
 622 
 623   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 624   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 625   used += CodeBlob::align_code_offset(cb->header_size());
 626 
 627   // Get heap for given CodeBlob and deallocate its unused tail
 628   get_code_heap(cb)->deallocate_tail(cb, used);
 629   // Adjust the sizes of the CodeBlob
 630   cb->adjust_size(used);
 631 }
 632 
 633 void CodeCache::commit(CodeBlob* cb) {
 634   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 635   assert_locked_or_safepoint(CodeCache_lock);
 636   CodeHeap* heap = get_code_heap(cb);
 637   if (cb->is_nmethod()) {
 638     heap->set_nmethod_count(heap->nmethod_count() + 1);
 639     if (((nmethod *)cb)->has_dependencies()) {
 640       Atomic::inc(&_number_of_nmethods_with_dependencies);
 641     }
 642   }
 643   if (cb->is_adapter_blob()) {
 644     heap->set_adapter_count(heap->adapter_count() + 1);
 645   }
 646 }
 647 
 648 bool CodeCache::contains(void *p) {
 649   // S390 uses contains() in current_frame(), which is used before
 650   // code cache initialization if NativeMemoryTracking=detail is set.
 651   S390_ONLY(if (_heaps == nullptr) return false;)
 652   // It should be ok to call contains without holding a lock.
 653   FOR_ALL_HEAPS(heap) {
 654     if ((*heap)->contains(p)) {
 655       return true;
 656     }
 657   }
 658   return false;
 659 }
 660 
 661 bool CodeCache::contains(nmethod *nm) {
 662   return contains((void *)nm);
 663 }
 664 
 665 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 666 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 667 CodeBlob* CodeCache::find_blob(void* start) {
 668   // NMT can walk the stack before code cache is created
 669   if (_heaps != nullptr) {
 670     CodeHeap* heap = get_code_heap_containing(start);
 671     if (heap != nullptr) {
 672       return heap->find_blob(start);
 673     }
 674   }
 675   return nullptr;
 676 }
 677 
 678 nmethod* CodeCache::find_nmethod(void* start) {
 679   CodeBlob* cb = find_blob(start);
 680   assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
 681   return (nmethod*)cb;
 682 }
 683 
 684 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 685   assert_locked_or_safepoint(CodeCache_lock);
 686   FOR_ALL_HEAPS(heap) {
 687     FOR_ALL_BLOBS(cb, *heap) {
 688       f(cb);
 689     }
 690   }
 691 }
 692 
 693 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 694   assert_locked_or_safepoint(CodeCache_lock);
 695   NMethodIterator iter(NMethodIterator::all);
 696   while(iter.next()) {
 697     f(iter.method());
 698   }
 699 }
 700 
 701 void CodeCache::nmethods_do(NMethodClosure* cl) {
 702   assert_locked_or_safepoint(CodeCache_lock);
 703   NMethodIterator iter(NMethodIterator::all);
 704   while(iter.next()) {
 705     cl->do_nmethod(iter.method());
 706   }
 707 }
 708 
 709 void CodeCache::metadata_do(MetadataClosure* f) {
 710   assert_locked_or_safepoint(CodeCache_lock);
 711   NMethodIterator iter(NMethodIterator::all);
 712   while(iter.next()) {
 713     iter.method()->metadata_do(f);
 714   }
 715 }
 716 
 717 // Calculate the number of GCs after which an nmethod is expected to have been
 718 // used in order to not be classed as cold.
 719 void CodeCache::update_cold_gc_count() {
 720   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 721     // No aging
 722     return;
 723   }
 724 
 725   size_t last_used = _last_unloading_used;
 726   double last_time = _last_unloading_time;
 727 
 728   double time = os::elapsedTime();
 729 
 730   size_t free = unallocated_capacity();
 731   size_t max = max_capacity();
 732   size_t used = max - free;
 733   double gc_interval = time - last_time;
 734 
 735   _unloading_threshold_gc_requested = false;
 736   _last_unloading_time = time;
 737   _last_unloading_used = used;
 738 
 739   if (last_time == 0.0) {
 740     // The first GC doesn't have enough information to make good
 741     // decisions, so just keep everything afloat
 742     log_info(codecache)("Unknown code cache pressure; don't age code");
 743     return;
 744   }
 745 
 746   if (gc_interval <= 0.0 || last_used >= used) {
 747     // Dodge corner cases where there is no pressure or negative pressure
 748     // on the code cache. Just don't unload when this happens.
 749     _cold_gc_count = INT_MAX;
 750     log_info(codecache)("No code cache pressure; don't age code");
 751     return;
 752   }
 753 
 754   double allocation_rate = (used - last_used) / gc_interval;
 755 
 756   _unloading_allocation_rates.add(allocation_rate);
 757   _unloading_gc_intervals.add(gc_interval);
 758 
 759   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 760   if (free < aggressive_sweeping_free_threshold) {
 761     // We are already in the red zone; be very aggressive to avoid disaster
 762     // But not more aggressive than 2. This ensures that an nmethod must
 763     // have been unused at least between two GCs to be considered cold still.
 764     _cold_gc_count = 2;
 765     log_info(codecache)("Code cache critically low; use aggressive aging");
 766     return;
 767   }
 768 
 769   // The code cache has an expected time for cold nmethods to "time out"
 770   // when they have not been used. The time for nmethods to time out
 771   // depends on how long we expect we can keep allocating code until
 772   // aggressive sweeping starts, based on sampled allocation rates.
 773   double average_gc_interval = _unloading_gc_intervals.avg();
 774   double average_allocation_rate = _unloading_allocation_rates.avg();
 775   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 776   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 777 
 778   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 779   // that is that the _cold_gc_count will be added to an epoch number
 780   // and that addition must not overflow, or we can crash the VM.
 781   // But not more aggressive than 2. This ensures that an nmethod must
 782   // have been unused at least between two GCs to be considered cold still.
 783   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 784 
 785   double used_ratio = double(used) / double(max);
 786   double last_used_ratio = double(last_used) / double(max);
 787   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 788                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 789                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 790                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 791 
 792 }
 793 
 794 uint64_t CodeCache::cold_gc_count() {
 795   return _cold_gc_count;
 796 }
 797 
 798 void CodeCache::gc_on_allocation() {
 799   if (!is_init_completed()) {
 800     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 801     return;
 802   }
 803 
 804   size_t free = unallocated_capacity();
 805   size_t max = max_capacity();
 806   size_t used = max - free;
 807   double free_ratio = double(free) / double(max);
 808   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 809     // In case the GC is concurrent, we make sure only one thread requests the GC.
 810     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 811       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 812       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 813     }
 814     return;
 815   }
 816 
 817   size_t last_used = _last_unloading_used;
 818   if (last_used >= used) {
 819     // No increase since last GC; no need to sweep yet
 820     return;
 821   }
 822   size_t allocated_since_last = used - last_used;
 823   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 824   double threshold = SweeperThreshold / 100.0;
 825   double used_ratio = double(used) / double(max);
 826   double last_used_ratio = double(last_used) / double(max);
 827   if (used_ratio > threshold) {
 828     // After threshold is reached, scale it by free_ratio so that more aggressive
 829     // GC is triggered as we approach code cache exhaustion
 830     threshold *= free_ratio;
 831   }
 832   // If code cache has been allocated without any GC at all, let's make sure
 833   // it is eventually invoked to avoid trouble.
 834   if (allocated_since_last_ratio > threshold) {
 835     // In case the GC is concurrent, we make sure only one thread requests the GC.
 836     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 837       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 838                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 839       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 840     }
 841   }
 842 }
 843 
 844 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 845 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 846 //
 847 // Odd values mean that marking is in progress, and even values mean that no
 848 // marking is currently active.
 849 uint64_t CodeCache::_gc_epoch = 2;
 850 
 851 // How many GCs after an nmethod has not been used, do we consider it cold?
 852 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 853 
 854 double CodeCache::_last_unloading_time = 0.0;
 855 size_t CodeCache::_last_unloading_used = 0;
 856 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 857 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 858 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 859 
 860 uint64_t CodeCache::gc_epoch() {
 861   return _gc_epoch;
 862 }
 863 
 864 bool CodeCache::is_gc_marking_cycle_active() {
 865   // Odd means that marking is active
 866   return (_gc_epoch % 2) == 1;
 867 }
 868 
 869 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 870   if (is_gc_marking_cycle_active()) {
 871     return _gc_epoch - 2;
 872   } else {
 873     return _gc_epoch - 1;
 874   }
 875 }
 876 
 877 void CodeCache::on_gc_marking_cycle_start() {
 878   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 879   ++_gc_epoch;
 880 }
 881 
 882 // Once started the code cache marking cycle must only be finished after marking of
 883 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 884 // if they have frames in continuation StackChunks that were not yet visited.
 885 void CodeCache::on_gc_marking_cycle_finish() {
 886   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 887   ++_gc_epoch;
 888   update_cold_gc_count();
 889 }
 890 
 891 void CodeCache::arm_all_nmethods() {
 892   BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
 893 }
 894 
 895 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 896 void CodeCache::do_unloading(bool unloading_occurred) {
 897   assert_locked_or_safepoint(CodeCache_lock);
 898   NMethodIterator iter(NMethodIterator::all);
 899   while(iter.next()) {
 900     iter.method()->do_unloading(unloading_occurred);
 901   }
 902 }
 903 
 904 void CodeCache::verify_clean_inline_caches() {
 905 #ifdef ASSERT
 906   NMethodIterator iter(NMethodIterator::not_unloading);
 907   while(iter.next()) {
 908     nmethod* nm = iter.method();
 909     nm->verify_clean_inline_caches();
 910     nm->verify();
 911   }
 912 #endif
 913 }
 914 
 915 // Defer freeing of concurrently cleaned ExceptionCache entries until
 916 // after a global handshake operation.
 917 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 918   if (SafepointSynchronize::is_at_safepoint()) {
 919     delete entry;
 920   } else {
 921     for (;;) {
 922       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 923       entry->set_purge_list_next(purge_list_head);
 924       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 925         break;
 926       }
 927     }
 928   }
 929 }
 930 
 931 // Delete exception caches that have been concurrently unlinked,
 932 // followed by a global handshake operation.
 933 void CodeCache::purge_exception_caches() {
 934   ExceptionCache* curr = _exception_cache_purge_list;
 935   while (curr != nullptr) {
 936     ExceptionCache* next = curr->purge_list_next();
 937     delete curr;
 938     curr = next;
 939   }
 940   _exception_cache_purge_list = nullptr;
 941 }
 942 
 943 // Restart compiler if possible and required..
 944 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 945 
 946   // Try to start the compiler again if we freed any memory
 947   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 948     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 949     log_info(codecache)("Restarting compiler");
 950     EventJITRestart event;
 951     event.set_freedMemory(freed_memory);
 952     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 953     event.commit();
 954   }
 955 }
 956 
 957 uint8_t CodeCache::_unloading_cycle = 1;
 958 
 959 void CodeCache::increment_unloading_cycle() {
 960   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 961   // 0 is reserved for new methods.
 962   _unloading_cycle = (_unloading_cycle + 1) % 4;
 963   if (_unloading_cycle == 0) {
 964     _unloading_cycle = 1;
 965   }
 966 }
 967 
 968 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 969   : _is_unloading_behaviour(is_alive)
 970 {
 971   _saved_behaviour = IsUnloadingBehaviour::current();
 972   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 973   increment_unloading_cycle();
 974   DependencyContext::cleaning_start();
 975 }
 976 
 977 CodeCache::UnlinkingScope::~UnlinkingScope() {
 978   IsUnloadingBehaviour::set_current(_saved_behaviour);
 979   DependencyContext::cleaning_end();
 980 }
 981 
 982 void CodeCache::verify_oops() {
 983   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 984   VerifyOopClosure voc;
 985   NMethodIterator iter(NMethodIterator::not_unloading);
 986   while(iter.next()) {
 987     nmethod* nm = iter.method();
 988     nm->oops_do(&voc);
 989     nm->verify_oop_relocations();
 990   }
 991 }
 992 
 993 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 994   CodeHeap* heap = get_code_heap(code_blob_type);
 995   return (heap != nullptr) ? heap->blob_count() : 0;
 996 }
 997 
 998 int CodeCache::blob_count() {
 999   int count = 0;
1000   FOR_ALL_HEAPS(heap) {
1001     count += (*heap)->blob_count();
1002   }
1003   return count;
1004 }
1005 
1006 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
1007   CodeHeap* heap = get_code_heap(code_blob_type);
1008   return (heap != nullptr) ? heap->nmethod_count() : 0;
1009 }
1010 
1011 int CodeCache::nmethod_count() {
1012   int count = 0;
1013   for (CodeHeap* heap : *_nmethod_heaps) {
1014     count += heap->nmethod_count();
1015   }
1016   return count;
1017 }
1018 
1019 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1020   CodeHeap* heap = get_code_heap(code_blob_type);
1021   return (heap != nullptr) ? heap->adapter_count() : 0;
1022 }
1023 
1024 int CodeCache::adapter_count() {
1025   int count = 0;
1026   FOR_ALL_HEAPS(heap) {
1027     count += (*heap)->adapter_count();
1028   }
1029   return count;
1030 }
1031 
1032 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1033   CodeHeap* heap = get_code_heap(code_blob_type);
1034   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1035 }
1036 
1037 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1038   CodeHeap* heap = get_code_heap(code_blob_type);
1039   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1040 }
1041 
1042 size_t CodeCache::capacity() {
1043   size_t cap = 0;
1044   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1045     cap += (*heap)->capacity();
1046   }
1047   return cap;
1048 }
1049 
1050 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1051   CodeHeap* heap = get_code_heap(code_blob_type);
1052   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1053 }
1054 
1055 size_t CodeCache::unallocated_capacity() {
1056   size_t unallocated_cap = 0;
1057   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1058     unallocated_cap += (*heap)->unallocated_capacity();
1059   }
1060   return unallocated_cap;
1061 }
1062 
1063 size_t CodeCache::max_capacity() {
1064   size_t max_cap = 0;
1065   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1066     max_cap += (*heap)->max_capacity();
1067   }
1068   return max_cap;
1069 }
1070 
1071 bool CodeCache::is_non_nmethod(address addr) {
1072   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1073   return blob->contains(addr);
1074 }
1075 
1076 size_t CodeCache::max_distance_to_non_nmethod() {
1077   if (!SegmentedCodeCache) {
1078     return ReservedCodeCacheSize;
1079   } else {
1080     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1081     // the max distance is minimized by placing the NonNMethod segment
1082     // in between MethodProfiled and MethodNonProfiled segments
1083     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1084     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1085     return dist1 > dist2 ? dist1 : dist2;
1086   }
1087 }
1088 
1089 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1090 // is free, reverse_free_ratio() returns 4.
1091 // Since code heap for each type of code blobs falls forward to the next
1092 // type of code heap, return the reverse free ratio for the entire
1093 // code cache.
1094 double CodeCache::reverse_free_ratio() {
1095   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1096   double max = (double)max_capacity();
1097   double result = max / unallocated;
1098   assert (max >= unallocated, "Must be");
1099   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1100   return result;
1101 }
1102 
1103 size_t CodeCache::bytes_allocated_in_freelists() {
1104   size_t allocated_bytes = 0;
1105   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1106     allocated_bytes += (*heap)->allocated_in_freelist();
1107   }
1108   return allocated_bytes;
1109 }
1110 
1111 int CodeCache::allocated_segments() {
1112   int number_of_segments = 0;
1113   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1114     number_of_segments += (*heap)->allocated_segments();
1115   }
1116   return number_of_segments;
1117 }
1118 
1119 size_t CodeCache::freelists_length() {
1120   size_t length = 0;
1121   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1122     length += (*heap)->freelist_length();
1123   }
1124   return length;
1125 }
1126 
1127 void icache_init();
1128 
1129 void CodeCache::initialize() {
1130   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1131 #ifdef COMPILER2
1132   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1133 #endif
1134   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1135   // This was originally just a check of the alignment, causing failure, instead, round
1136   // the code cache to the page size.  In particular, Solaris is moving to a larger
1137   // default page size.
1138   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1139 
1140   if (SegmentedCodeCache) {
1141     // Use multiple code heaps
1142     initialize_heaps();
1143   } else {
1144     // Use a single code heap
1145     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1146     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1147     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1148 
1149     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1150     // users want to use the largest available page.
1151     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1152     ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1153     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1154     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1155     add_heap(rs, "CodeCache", CodeBlobType::All);
1156   }
1157 
1158   // Initialize ICache flush mechanism
1159   // This service is needed for os::register_code_area
1160   icache_init();
1161 
1162   // Give OS a chance to register generated code area.
1163   // This is used on Windows 64 bit platforms to register
1164   // Structured Exception Handlers for our generated code.
1165   os::register_code_area((char*)low_bound(), (char*)high_bound());
1166 }
1167 
1168 void codeCache_init() {
1169   CodeCache::initialize();
1170 }
1171 
1172 //------------------------------------------------------------------------------------------------
1173 
1174 bool CodeCache::has_nmethods_with_dependencies() {
1175   return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1176 }
1177 
1178 void CodeCache::clear_inline_caches() {
1179   assert_locked_or_safepoint(CodeCache_lock);
1180   NMethodIterator iter(NMethodIterator::not_unloading);
1181   while(iter.next()) {
1182     iter.method()->clear_inline_caches();
1183   }
1184 }
1185 
1186 // Only used by whitebox API
1187 void CodeCache::cleanup_inline_caches_whitebox() {
1188   assert_locked_or_safepoint(CodeCache_lock);
1189   NMethodIterator iter(NMethodIterator::not_unloading);
1190   while(iter.next()) {
1191     iter.method()->cleanup_inline_caches_whitebox();
1192   }
1193 }
1194 
1195 // Keeps track of time spent for checking dependencies
1196 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1197 
1198 #ifndef PRODUCT
1199 // Check if any of live methods dependencies have been invalidated.
1200 // (this is expensive!)
1201 static void check_live_nmethods_dependencies(DepChange& changes) {
1202   // Checked dependencies are allocated into this ResourceMark
1203   ResourceMark rm;
1204 
1205   // Turn off dependency tracing while actually testing dependencies.
1206   FlagSetting fs(Dependencies::_verify_in_progress, true);
1207 
1208   typedef ResourceHashtable<DependencySignature, int, 11027,
1209                             AnyObj::RESOURCE_AREA, mtInternal,
1210                             &DependencySignature::hash,
1211                             &DependencySignature::equals> DepTable;
1212 
1213   DepTable* table = new DepTable();
1214 
1215   // Iterate over live nmethods and check dependencies of all nmethods that are not
1216   // marked for deoptimization. A particular dependency is only checked once.
1217   NMethodIterator iter(NMethodIterator::not_unloading);
1218   while(iter.next()) {
1219     nmethod* nm = iter.method();
1220     // Only notify for live nmethods
1221     if (!nm->is_marked_for_deoptimization()) {
1222       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1223         // Construct abstraction of a dependency.
1224         DependencySignature* current_sig = new DependencySignature(deps);
1225 
1226         // Determine if dependency is already checked. table->put(...) returns
1227         // 'true' if the dependency is added (i.e., was not in the hashtable).
1228         if (table->put(*current_sig, 1)) {
1229           Klass* witness = deps.check_dependency();
1230           if (witness != nullptr) {
1231             // Dependency checking failed. Print out information about the failed
1232             // dependency and finally fail with an assert. We can fail here, since
1233             // dependency checking is never done in a product build.
1234             deps.print_dependency(tty, witness, true);
1235             changes.print();
1236             nm->print();
1237             nm->print_dependencies_on(tty);
1238             assert(false, "Should have been marked for deoptimization");
1239           }
1240         }
1241       }
1242     }
1243   }
1244 }
1245 #endif
1246 
1247 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1248   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1249 
1250   // search the hierarchy looking for nmethods which are affected by the loading of this class
1251 
1252   // then search the interfaces this class implements looking for nmethods
1253   // which might be dependent of the fact that an interface only had one
1254   // implementor.
1255   // nmethod::check_all_dependencies works only correctly, if no safepoint
1256   // can happen
1257   NoSafepointVerifier nsv;
1258   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1259     InstanceKlass* d = str.klass();
1260     {
1261       LogStreamHandle(Trace, dependencies) log;
1262       if (log.is_enabled()) {
1263         log.print("Processing context ");
1264         d->name()->print_value_on(&log);
1265       }
1266     }
1267     d->mark_dependent_nmethods(deopt_scope, changes);
1268   }
1269 
1270 #ifndef PRODUCT
1271   if (VerifyDependencies) {
1272     // Object pointers are used as unique identifiers for dependency arguments. This
1273     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1274     dependentCheckTime.start();
1275     check_live_nmethods_dependencies(changes);
1276     dependentCheckTime.stop();
1277   }
1278 #endif
1279 }
1280 
1281 #if INCLUDE_JVMTI
1282 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1283 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1284 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1285 
1286 static void add_to_old_table(nmethod* c) {
1287   if (old_nmethod_table == nullptr) {
1288     old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1289   }
1290   old_nmethod_table->push(c);
1291 }
1292 
1293 static void reset_old_method_table() {
1294   if (old_nmethod_table != nullptr) {
1295     delete old_nmethod_table;
1296     old_nmethod_table = nullptr;
1297   }
1298 }
1299 
1300 // Remove this method when flushed.
1301 void CodeCache::unregister_old_nmethod(nmethod* c) {
1302   assert_lock_strong(CodeCache_lock);
1303   if (old_nmethod_table != nullptr) {
1304     int index = old_nmethod_table->find(c);
1305     if (index != -1) {
1306       old_nmethod_table->delete_at(index);
1307     }
1308   }
1309 }
1310 
1311 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1312   // Walk old method table and mark those on stack.
1313   int length = 0;
1314   if (old_nmethod_table != nullptr) {
1315     length = old_nmethod_table->length();
1316     for (int i = 0; i < length; i++) {
1317       // Walk all methods saved on the last pass.  Concurrent class unloading may
1318       // also be looking at this method's metadata, so don't delete it yet if
1319       // it is marked as unloaded.
1320       old_nmethod_table->at(i)->metadata_do(f);
1321     }
1322   }
1323   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1324 }
1325 
1326 // Walk compiled methods and mark dependent methods for deoptimization.
1327 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1328   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1329   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1330   // So delete old method table and create a new one.
1331   reset_old_method_table();
1332 
1333   NMethodIterator iter(NMethodIterator::all);
1334   while(iter.next()) {
1335     nmethod* nm = iter.method();
1336     // Walk all alive nmethods to check for old Methods.
1337     // This includes methods whose inline caches point to old methods, so
1338     // inline cache clearing is unnecessary.
1339     if (nm->has_evol_metadata()) {
1340       deopt_scope->mark(nm);
1341       add_to_old_table(nm);
1342     }
1343   }
1344 }
1345 
1346 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1347   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1348   NMethodIterator iter(NMethodIterator::all);
1349   while(iter.next()) {
1350     nmethod* nm = iter.method();
1351     if (!nm->method()->is_method_handle_intrinsic()) {
1352       if (nm->can_be_deoptimized()) {
1353         deopt_scope->mark(nm);
1354       }
1355       if (nm->has_evol_metadata()) {
1356         add_to_old_table(nm);
1357       }
1358     }
1359   }
1360 }
1361 
1362 #endif // INCLUDE_JVMTI
1363 
1364 // Mark methods for deopt (if safe or possible).
1365 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1366   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1367   NMethodIterator iter(NMethodIterator::not_unloading);
1368   while(iter.next()) {
1369     nmethod* nm = iter.method();
1370     if (!nm->is_native_method()) {
1371       deopt_scope->mark(nm);
1372     }
1373   }
1374 }
1375 
1376 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1377   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1378 
1379   NMethodIterator iter(NMethodIterator::not_unloading);
1380   while(iter.next()) {
1381     nmethod* nm = iter.method();
1382     if (nm->is_dependent_on_method(dependee)) {
1383       deopt_scope->mark(nm);
1384     }
1385   }
1386 }
1387 
1388 void CodeCache::make_marked_nmethods_deoptimized() {
1389   RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1390   while(iter.next()) {
1391     nmethod* nm = iter.method();
1392     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1393       nm->make_not_entrant("marked for deoptimization");
1394       nm->make_deoptimized();
1395     }
1396   }
1397 }
1398 
1399 // Marks compiled methods dependent on dependee.
1400 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1401   assert_lock_strong(Compile_lock);
1402 
1403   if (!has_nmethods_with_dependencies()) {
1404     return;
1405   }
1406 
1407   if (dependee->is_linked()) {
1408     // Class initialization state change.
1409     KlassInitDepChange changes(dependee);
1410     mark_for_deoptimization(deopt_scope, changes);
1411   } else {
1412     // New class is loaded.
1413     NewKlassDepChange changes(dependee);
1414     mark_for_deoptimization(deopt_scope, changes);
1415   }
1416 }
1417 
1418 // Marks compiled methods dependent on dependee
1419 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1420   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1421 
1422   DeoptimizationScope deopt_scope;
1423   // Compute the dependent nmethods
1424   mark_for_deoptimization(&deopt_scope, m_h());
1425   deopt_scope.deoptimize_marked();
1426 }
1427 
1428 void CodeCache::verify() {
1429   assert_locked_or_safepoint(CodeCache_lock);
1430   FOR_ALL_HEAPS(heap) {
1431     (*heap)->verify();
1432     FOR_ALL_BLOBS(cb, *heap) {
1433       cb->verify();
1434     }
1435   }
1436 }
1437 
1438 // A CodeHeap is full. Print out warning and report event.
1439 PRAGMA_DIAG_PUSH
1440 PRAGMA_FORMAT_NONLITERAL_IGNORED
1441 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1442   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1443   CodeHeap* heap = get_code_heap(code_blob_type);
1444   assert(heap != nullptr, "heap is null");
1445 
1446   int full_count = heap->report_full();
1447 
1448   if ((full_count == 1) || print) {
1449     // Not yet reported for this heap, report
1450     if (SegmentedCodeCache) {
1451       ResourceMark rm;
1452       stringStream msg1_stream, msg2_stream;
1453       msg1_stream.print("%s is full. Compiler has been disabled.",
1454                         get_code_heap_name(code_blob_type));
1455       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1456                  get_code_heap_flag_name(code_blob_type));
1457       const char *msg1 = msg1_stream.as_string();
1458       const char *msg2 = msg2_stream.as_string();
1459 
1460       log_warning(codecache)("%s", msg1);
1461       log_warning(codecache)("%s", msg2);
1462       warning("%s", msg1);
1463       warning("%s", msg2);
1464     } else {
1465       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1466       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1467 
1468       log_warning(codecache)("%s", msg1);
1469       log_warning(codecache)("%s", msg2);
1470       warning("%s", msg1);
1471       warning("%s", msg2);
1472     }
1473     stringStream s;
1474     // Dump code cache into a buffer before locking the tty.
1475     {
1476       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1477       print_summary(&s);
1478     }
1479     {
1480       ttyLocker ttyl;
1481       tty->print("%s", s.freeze());
1482     }
1483 
1484     if (full_count == 1) {
1485       if (PrintCodeHeapAnalytics) {
1486         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1487       }
1488     }
1489   }
1490 
1491   EventCodeCacheFull event;
1492   if (event.should_commit()) {
1493     event.set_codeBlobType((u1)code_blob_type);
1494     event.set_startAddress((u8)heap->low_boundary());
1495     event.set_commitedTopAddress((u8)heap->high());
1496     event.set_reservedTopAddress((u8)heap->high_boundary());
1497     event.set_entryCount(heap->blob_count());
1498     event.set_methodCount(heap->nmethod_count());
1499     event.set_adaptorCount(heap->adapter_count());
1500     event.set_unallocatedCapacity(heap->unallocated_capacity());
1501     event.set_fullCount(heap->full_count());
1502     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1503     event.commit();
1504   }
1505 }
1506 PRAGMA_DIAG_POP
1507 
1508 void CodeCache::print_memory_overhead() {
1509   size_t wasted_bytes = 0;
1510   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1511       CodeHeap* curr_heap = *heap;
1512       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1513         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1514         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1515       }
1516   }
1517   // Print bytes that are allocated in the freelist
1518   ttyLocker ttl;
1519   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1520   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1521   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1522   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1523 }
1524 
1525 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1526   if (total > 0) {
1527     double ratio = (100.0 * used) / total;
1528     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1529   }
1530 }
1531 
1532 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1533   int stats     [2][6][3][2] = {0};
1534   int stats_used[2][6][3][2] = {0};
1535 
1536   int total_osr = 0;
1537   int total_entrant = 0;
1538   int total_non_entrant = 0;
1539   int total_other = 0;
1540   int total_used = 0;
1541 
1542   NMethodIterator iter(NMethodIterator::all);
1543   while (iter.next()) {
1544     nmethod* nm = iter.method();
1545     if (nm->is_in_use()) {
1546       ++total_entrant;
1547     } else if (nm->is_not_entrant()) {
1548       ++total_non_entrant;
1549     } else {
1550       ++total_other;
1551     }
1552     if (nm->is_osr_method()) {
1553       ++total_osr;
1554     }
1555     if (nm->used()) {
1556       ++total_used;
1557     }
1558     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1559 
1560     int idx1 = nm->is_scc() ? 1 : 0;
1561     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1562     int idx3 = (nm->is_in_use()      ? 0 :
1563                (nm->is_not_entrant() ? 1 :
1564                                        2));
1565     int idx4 = (nm->is_osr_method() ? 1 : 0);
1566     stats[idx1][idx2][idx3][idx4] += 1;
1567     if (nm->used()) {
1568       stats_used[idx1][idx2][idx3][idx4] += 1;
1569     }
1570   }
1571 
1572   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1573                total_entrant + total_non_entrant + total_other,
1574                total_entrant, total_non_entrant, total_osr);
1575   if (total_other > 0) {
1576     st->print("; %d other", total_other);
1577   }
1578   st->print_cr(")");
1579 
1580   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1581     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1582     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1583     if (total_normal + total_osr > 0) {
1584       st->print("  Tier%d:", i);
1585       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1586       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1587       st->cr();
1588     }
1589   }
1590   st->cr();
1591   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1592     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1593     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1594     assert(total_osr == 0, "sanity");
1595     if (total_normal + total_osr > 0) {
1596       st->print("  SC T%d:", i);
1597       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1598       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1599       st->cr();
1600     }
1601   }
1602 }
1603 
1604 //------------------------------------------------------------------------------------------------
1605 // Non-product version
1606 
1607 #ifndef PRODUCT
1608 
1609 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1610   if (PrintCodeCache2) {  // Need to add a new flag
1611     ResourceMark rm;
1612     if (size == 0) {
1613       int s = cb->size();
1614       assert(s >= 0, "CodeBlob size is negative: %d", s);
1615       size = (uint) s;
1616     }
1617     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1618   }
1619 }
1620 
1621 void CodeCache::print_internals() {
1622   int nmethodCount = 0;
1623   int runtimeStubCount = 0;
1624   int upcallStubCount = 0;
1625   int adapterCount = 0;
1626   int mhAdapterCount = 0;
1627   int vtableBlobCount = 0;
1628   int deoptimizationStubCount = 0;
1629   int uncommonTrapStubCount = 0;
1630   int exceptionStubCount = 0;
1631   int safepointStubCount = 0;
1632   int bufferBlobCount = 0;
1633   int total = 0;
1634   int nmethodNotEntrant = 0;
1635   int nmethodJava = 0;
1636   int nmethodNative = 0;
1637   int max_nm_size = 0;
1638   ResourceMark rm;
1639 
1640   int i = 0;
1641   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1642     int heap_total = 0;
1643     tty->print_cr("-- %s --", (*heap)->name());
1644     FOR_ALL_BLOBS(cb, *heap) {
1645       total++;
1646       heap_total++;
1647       if (cb->is_nmethod()) {
1648         nmethod* nm = (nmethod*)cb;
1649 
1650         tty->print("%4d: ", heap_total);
1651         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1652 
1653         nmethodCount++;
1654 
1655         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1656         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1657 
1658         if(nm->method() != nullptr && nm->is_java_method()) {
1659           nmethodJava++;
1660           max_nm_size = MAX2(max_nm_size, nm->size());
1661         }
1662       } else if (cb->is_runtime_stub()) {
1663         runtimeStubCount++;
1664       } else if (cb->is_upcall_stub()) {
1665         upcallStubCount++;
1666       } else if (cb->is_deoptimization_stub()) {
1667         deoptimizationStubCount++;
1668       } else if (cb->is_uncommon_trap_stub()) {
1669         uncommonTrapStubCount++;
1670       } else if (cb->is_exception_stub()) {
1671         exceptionStubCount++;
1672       } else if (cb->is_safepoint_stub()) {
1673         safepointStubCount++;
1674       } else if (cb->is_adapter_blob()) {
1675         adapterCount++;
1676       } else if (cb->is_method_handles_adapter_blob()) {
1677         mhAdapterCount++;
1678       } else if (cb->is_vtable_blob()) {
1679         vtableBlobCount++;
1680       } else if (cb->is_buffer_blob()) {
1681         bufferBlobCount++;
1682       }
1683     }
1684   }
1685 
1686   int bucketSize = 512;
1687   int bucketLimit = max_nm_size / bucketSize + 1;
1688   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1689   memset(buckets, 0, sizeof(int) * bucketLimit);
1690 
1691   NMethodIterator iter(NMethodIterator::all);
1692   while(iter.next()) {
1693     nmethod* nm = iter.method();
1694     if(nm->method() != nullptr && nm->is_java_method()) {
1695       buckets[nm->size() / bucketSize]++;
1696     }
1697   }
1698 
1699   tty->print_cr("Code Cache Entries (total of %d)",total);
1700   tty->print_cr("-------------------------------------------------");
1701   tty->print_cr("nmethods: %d",nmethodCount);
1702   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1703   tty->print_cr("\tjava: %d",nmethodJava);
1704   tty->print_cr("\tnative: %d",nmethodNative);
1705   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1706   tty->print_cr("upcall_stubs: %d",upcallStubCount);
1707   tty->print_cr("adapters: %d",adapterCount);
1708   tty->print_cr("MH adapters: %d",mhAdapterCount);
1709   tty->print_cr("VTables: %d",vtableBlobCount);
1710   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1711   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1712   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1713   tty->print_cr("exception_stubs: %d",exceptionStubCount);
1714   tty->print_cr("safepoint_stubs: %d",safepointStubCount);
1715   tty->print_cr("\nnmethod size distribution");
1716   tty->print_cr("-------------------------------------------------");
1717 
1718   for(int i=0; i<bucketLimit; i++) {
1719     if(buckets[i] != 0) {
1720       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1721       tty->fill_to(40);
1722       tty->print_cr("%d",buckets[i]);
1723     }
1724   }
1725 
1726   FREE_C_HEAP_ARRAY(int, buckets);
1727   print_memory_overhead();
1728 }
1729 
1730 #endif // !PRODUCT
1731 
1732 void CodeCache::print() {
1733   print_summary(tty);
1734 
1735 #ifndef PRODUCT
1736   if (!Verbose) return;
1737 
1738   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1739   CodeBlob_sizes runtimeStub;
1740   CodeBlob_sizes upcallStub;
1741   CodeBlob_sizes uncommonTrapStub;
1742   CodeBlob_sizes deoptimizationStub;
1743   CodeBlob_sizes exceptionStub;
1744   CodeBlob_sizes safepointStub;
1745   CodeBlob_sizes adapter;
1746   CodeBlob_sizes mhAdapter;
1747   CodeBlob_sizes vtableBlob;
1748   CodeBlob_sizes bufferBlob;
1749   CodeBlob_sizes other;
1750 
1751   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1752     FOR_ALL_BLOBS(cb, *heap) {
1753       if (cb->is_nmethod()) {
1754         const int level = cb->as_nmethod()->comp_level();
1755         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1756         live[level].add(cb);
1757       } else if (cb->is_runtime_stub()) {
1758         runtimeStub.add(cb);
1759       } else if (cb->is_upcall_stub()) {
1760         upcallStub.add(cb);
1761       } else if (cb->is_deoptimization_stub()) {
1762         deoptimizationStub.add(cb);
1763       } else if (cb->is_uncommon_trap_stub()) {
1764         uncommonTrapStub.add(cb);
1765       } else if (cb->is_exception_stub()) {
1766         exceptionStub.add(cb);
1767       } else if (cb->is_safepoint_stub()) {
1768         safepointStub.add(cb);
1769       } else if (cb->is_adapter_blob()) {
1770         adapter.add(cb);
1771       } else if (cb->is_method_handles_adapter_blob()) {
1772         mhAdapter.add(cb);
1773       } else if (cb->is_vtable_blob()) {
1774         vtableBlob.add(cb);
1775       } else if (cb->is_buffer_blob()) {
1776         bufferBlob.add(cb);
1777       } else {
1778         other.add(cb);
1779       }
1780     }
1781   }
1782 
1783   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1784 
1785   tty->print_cr("nmethod blobs per compilation level:");
1786   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1787     const char *level_name;
1788     switch (i) {
1789     case CompLevel_none:              level_name = "none";              break;
1790     case CompLevel_simple:            level_name = "simple";            break;
1791     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1792     case CompLevel_full_profile:      level_name = "full profile";      break;
1793     case CompLevel_full_optimization: level_name = "full optimization"; break;
1794     default: assert(false, "invalid compilation level");
1795     }
1796     tty->print_cr("%s:", level_name);
1797     live[i].print("live");
1798   }
1799 
1800   struct {
1801     const char* name;
1802     const CodeBlob_sizes* sizes;
1803   } non_nmethod_blobs[] = {
1804     { "runtime",        &runtimeStub },
1805     { "upcall",         &upcallStub },
1806     { "uncommon trap",  &uncommonTrapStub },
1807     { "deoptimization", &deoptimizationStub },
1808     { "exception",      &exceptionStub },
1809     { "safepoint",      &safepointStub },
1810     { "adapter",        &adapter },
1811     { "mh_adapter",     &mhAdapter },
1812     { "vtable",         &vtableBlob },
1813     { "buffer blob",    &bufferBlob },
1814     { "other",          &other },
1815   };
1816   tty->print_cr("Non-nmethod blobs:");
1817   for (auto& blob: non_nmethod_blobs) {
1818     blob.sizes->print(blob.name);
1819   }
1820 
1821   if (WizardMode) {
1822      // print the oop_map usage
1823     int code_size = 0;
1824     int number_of_blobs = 0;
1825     int number_of_oop_maps = 0;
1826     int map_size = 0;
1827     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1828       FOR_ALL_BLOBS(cb, *heap) {
1829         number_of_blobs++;
1830         code_size += cb->code_size();
1831         ImmutableOopMapSet* set = cb->oop_maps();
1832         if (set != nullptr) {
1833           number_of_oop_maps += set->count();
1834           map_size           += set->nr_of_bytes();
1835         }
1836       }
1837     }
1838     tty->print_cr("OopMaps");
1839     tty->print_cr("  #blobs    = %d", number_of_blobs);
1840     tty->print_cr("  code size = %d", code_size);
1841     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1842     tty->print_cr("  map size  = %d", map_size);
1843   }
1844 
1845 #endif // !PRODUCT
1846 }
1847 
1848 void CodeCache::print_nmethods_on(outputStream* st) {
1849   ResourceMark rm;
1850   int i = 0;
1851   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1852     st->print_cr("-- %s --", (*heap)->name());
1853     FOR_ALL_BLOBS(cb, *heap) {
1854       i++;
1855       if (cb->is_nmethod()) {
1856         nmethod* nm = (nmethod*)cb;
1857         st->print("%4d: ", i);
1858         CompileTask::print(st, nm, nullptr, true, false);
1859 
1860         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1861         st->print_cr(" %c", non_entrant_char);
1862       }
1863     }
1864   }
1865 }
1866 
1867 void CodeCache::print_summary(outputStream* st, bool detailed) {
1868   int full_count = 0;
1869   julong total_used = 0;
1870   julong total_max_used = 0;
1871   julong total_free = 0;
1872   julong total_size = 0;
1873   FOR_ALL_HEAPS(heap_iterator) {
1874     CodeHeap* heap = (*heap_iterator);
1875     size_t total = (heap->high_boundary() - heap->low_boundary());
1876     if (_heaps->length() >= 1) {
1877       st->print("%s:", heap->name());
1878     } else {
1879       st->print("CodeCache:");
1880     }
1881     size_t size = total/K;
1882     size_t used = (total - heap->unallocated_capacity())/K;
1883     size_t max_used = heap->max_allocated_capacity()/K;
1884     size_t free = heap->unallocated_capacity()/K;
1885     total_size += size;
1886     total_used += used;
1887     total_max_used += max_used;
1888     total_free += free;
1889     st->print_cr(" size=%zuKb used=%zu"
1890                  "Kb max_used=%zuKb free=%zuKb",
1891                  size, used, max_used, free);
1892 
1893     if (detailed) {
1894       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1895                    p2i(heap->low_boundary()),
1896                    p2i(heap->high()),
1897                    p2i(heap->high_boundary()));
1898 
1899       full_count += get_codemem_full_count(heap->code_blob_type());
1900     }
1901   }
1902 
1903   if (detailed) {
1904     if (SegmentedCodeCache) {
1905       st->print("CodeCache:");
1906       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1907                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1908                    total_size, total_used, total_max_used, total_free);
1909     }
1910     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1911                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1912                  blob_count(), nmethod_count(), adapter_count(), full_count);
1913     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1914                  CompileBroker::should_compile_new_jobs() ?
1915                  "enabled" : Arguments::mode() == Arguments::_int ?
1916                  "disabled (interpreter mode)" :
1917                  "disabled (not enough contiguous free space left)",
1918                  CompileBroker::get_total_compiler_stopped_count(),
1919                  CompileBroker::get_total_compiler_restarted_count());
1920   }
1921 }
1922 
1923 void CodeCache::print_codelist(outputStream* st) {
1924   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1925 
1926   NMethodIterator iter(NMethodIterator::not_unloading);
1927   while (iter.next()) {
1928     nmethod* nm = iter.method();
1929     ResourceMark rm;
1930     char* method_name = nm->method()->name_and_sig_as_C_string();
1931     const char* jvmci_name = nullptr;
1932 #if INCLUDE_JVMCI
1933     jvmci_name = nm->jvmci_name();
1934 #endif
1935     st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1936                  nm->compile_id(), nm->comp_level(), nm->get_state(),
1937                  method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1938                  (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1939   }
1940 }
1941 
1942 void CodeCache::print_layout(outputStream* st) {
1943   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1944   ResourceMark rm;
1945   print_summary(st, true);
1946 }
1947 
1948 void CodeCache::log_state(outputStream* st) {
1949   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1950             " adapters='" UINT32_FORMAT "' free_code_cache='%zu'",
1951             blob_count(), nmethod_count(), adapter_count(),
1952             unallocated_capacity());
1953 }
1954 
1955 #ifdef LINUX
1956 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1957   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1958   char fname[JVM_MAXPATHLEN];
1959   if (filename == nullptr) {
1960     // Invocation outside of jcmd requires pid substitution.
1961     if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1962                                     strlen(DEFAULT_PERFMAP_FILENAME),
1963                                     fname, JVM_MAXPATHLEN)) {
1964       st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1965       return;
1966     }
1967     filename = fname;
1968   }
1969   fileStream fs(filename, "w");
1970   if (!fs.is_open()) {
1971     st->print_cr("Warning: Failed to create %s for perf map", filename);
1972     return;
1973   }
1974 
1975   AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1976   while (iter.next()) {
1977     CodeBlob *cb = iter.method();
1978     ResourceMark rm;
1979     const char* method_name = nullptr;
1980     const char* jvmci_name = nullptr;
1981     if (cb->is_nmethod()) {
1982       nmethod* nm = cb->as_nmethod();
1983       method_name = nm->method()->external_name();
1984 #if INCLUDE_JVMCI
1985       jvmci_name = nm->jvmci_name();
1986 #endif
1987     } else {
1988       method_name = cb->name();
1989     }
1990     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1991                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1992                 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1993   }
1994 }
1995 #endif // LINUX
1996 
1997 //---<  BEGIN  >--- CodeHeap State Analytics.
1998 
1999 void CodeCache::aggregate(outputStream *out, size_t granularity) {
2000   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2001     CodeHeapState::aggregate(out, (*heap), granularity);
2002   }
2003 }
2004 
2005 void CodeCache::discard(outputStream *out) {
2006   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2007     CodeHeapState::discard(out, (*heap));
2008   }
2009 }
2010 
2011 void CodeCache::print_usedSpace(outputStream *out) {
2012   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2013     CodeHeapState::print_usedSpace(out, (*heap));
2014   }
2015 }
2016 
2017 void CodeCache::print_freeSpace(outputStream *out) {
2018   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2019     CodeHeapState::print_freeSpace(out, (*heap));
2020   }
2021 }
2022 
2023 void CodeCache::print_count(outputStream *out) {
2024   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2025     CodeHeapState::print_count(out, (*heap));
2026   }
2027 }
2028 
2029 void CodeCache::print_space(outputStream *out) {
2030   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2031     CodeHeapState::print_space(out, (*heap));
2032   }
2033 }
2034 
2035 void CodeCache::print_age(outputStream *out) {
2036   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2037     CodeHeapState::print_age(out, (*heap));
2038   }
2039 }
2040 
2041 void CodeCache::print_names(outputStream *out) {
2042   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2043     CodeHeapState::print_names(out, (*heap));
2044   }
2045 }
2046 //---<  END  >--- CodeHeap State Analytics.