1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/cdsAccess.hpp"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/SCCache.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "compiler/oopMap.hpp"
  40 #include "gc/shared/barrierSetNMethod.hpp"
  41 #include "gc/shared/classUnloadingContext.hpp"
  42 #include "gc/shared/collectedHeap.hpp"
  43 #include "jfr/jfrEvents.hpp"
  44 #include "jvm_io.h"
  45 #include "logging/log.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/memoryReserver.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/method.inline.hpp"
  53 #include "oops/objArrayOop.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "oops/verifyOopClosure.hpp"
  56 #include "runtime/arguments.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/deoptimization.hpp"
  59 #include "runtime/globals_extension.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/icache.hpp"
  62 #include "runtime/init.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/mutexLocker.hpp"
  65 #include "runtime/os.inline.hpp"
  66 #include "runtime/safepointVerifiers.hpp"
  67 #include "runtime/vmThread.hpp"
  68 #include "sanitizers/leak.hpp"
  69 #include "services/memoryService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/vmError.hpp"
  72 #include "utilities/xmlstream.hpp"
  73 #ifdef COMPILER1
  74 #include "c1/c1_Compilation.hpp"
  75 #include "c1/c1_Compiler.hpp"
  76 #endif
  77 #ifdef COMPILER2
  78 #include "opto/c2compiler.hpp"
  79 #include "opto/compile.hpp"
  80 #include "opto/node.hpp"
  81 #endif
  82 
  83 // Helper class for printing in CodeCache
  84 class CodeBlob_sizes {
  85  private:
  86   int count;
  87   int total_size;
  88   int header_size;
  89   int code_size;
  90   int stub_size;
  91   int relocation_size;
  92   int scopes_oop_size;
  93   int scopes_metadata_size;
  94   int scopes_data_size;
  95   int scopes_pcs_size;
  96 
  97  public:
  98   CodeBlob_sizes() {
  99     count            = 0;
 100     total_size       = 0;
 101     header_size      = 0;
 102     code_size        = 0;
 103     stub_size        = 0;
 104     relocation_size  = 0;
 105     scopes_oop_size  = 0;
 106     scopes_metadata_size  = 0;
 107     scopes_data_size = 0;
 108     scopes_pcs_size  = 0;
 109   }
 110 
 111   int total() const                              { return total_size; }
 112   bool is_empty() const                          { return count == 0; }
 113 
 114   void print(const char* title) const {
 115     if (is_empty()) {
 116       tty->print_cr(" #%d %s = %dK",
 117                     count,
 118                     title,
 119                     total()                 / (int)K);
 120     } else {
 121       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 122                     count,
 123                     title,
 124                     total()                 / (int)K,
 125                     header_size             / (int)K,
 126                     header_size             * 100 / total_size,
 127                     relocation_size         / (int)K,
 128                     relocation_size         * 100 / total_size,
 129                     code_size               / (int)K,
 130                     code_size               * 100 / total_size,
 131                     stub_size               / (int)K,
 132                     stub_size               * 100 / total_size,
 133                     scopes_oop_size         / (int)K,
 134                     scopes_oop_size         * 100 / total_size,
 135                     scopes_metadata_size    / (int)K,
 136                     scopes_metadata_size    * 100 / total_size,
 137                     scopes_data_size        / (int)K,
 138                     scopes_data_size        * 100 / total_size,
 139                     scopes_pcs_size         / (int)K,
 140                     scopes_pcs_size         * 100 / total_size);
 141     }
 142   }
 143 
 144   void add(CodeBlob* cb) {
 145     count++;
 146     total_size       += cb->size();
 147     header_size      += cb->header_size();
 148     relocation_size  += cb->relocation_size();
 149     if (cb->is_nmethod()) {
 150       nmethod* nm = cb->as_nmethod_or_null();
 151       code_size        += nm->insts_size();
 152       stub_size        += nm->stub_size();
 153 
 154       scopes_oop_size  += nm->oops_size();
 155       scopes_metadata_size  += nm->metadata_size();
 156       scopes_data_size += nm->scopes_data_size();
 157       scopes_pcs_size  += nm->scopes_pcs_size();
 158     } else {
 159       code_size        += cb->code_size();
 160     }
 161   }
 162 };
 163 
 164 // Iterate over all CodeHeaps
 165 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 166 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 167 
 168 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 169 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 170 
 171 address CodeCache::_low_bound = nullptr;
 172 address CodeCache::_high_bound = nullptr;
 173 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 174 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 175 
 176 static ReservedSpace _cds_code_space;
 177 
 178 // Initialize arrays of CodeHeap subsets
 179 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 181 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 182 
 183 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 184   if (size < required_size) {
 185     log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
 186                          codeheap, size/K, required_size/K);
 187     err_msg title("Not enough space in %s to run VM", codeheap);
 188     err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
 189     vm_exit_during_initialization(title, message);
 190   }
 191 }
 192 
 193 struct CodeHeapInfo {
 194   size_t size;
 195   bool set;
 196   bool enabled;
 197 };
 198 
 199 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 200   assert(!heap->set, "sanity");
 201   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 202 }
 203 
 204 void CodeCache::initialize_heaps() {
 205   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 206   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 207   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 208 
 209   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 210   const size_t ps             = page_size(false, 8);
 211   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 212   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 213   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 214 
 215   // Prerequisites
 216   if (!heap_available(CodeBlobType::MethodProfiled)) {
 217     // For compatibility reasons, disabled tiered compilation overrides
 218     // segment size even if it is set explicitly.
 219     non_profiled.size += profiled.size;
 220     // Profiled code heap is not available, forcibly set size to 0
 221     profiled.size = 0;
 222     profiled.set = true;
 223     profiled.enabled = false;
 224   }
 225 
 226   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 227 
 228   size_t compiler_buffer_size = 0;
 229   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 230   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 231   COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size());
 232 
 233   if (!non_nmethod.set) {
 234     non_nmethod.size += compiler_buffer_size;
 235     // Further down, just before FLAG_SET_ERGO(), all segment sizes are
 236     // aligned down to the next lower multiple of min_size. For large page
 237     // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
 238     // Therefore, force non_nmethod.size to at least min_size.
 239     non_nmethod.size = MAX2(non_nmethod.size, min_size);
 240   }
 241 
 242   if (!profiled.set && !non_profiled.set) {
 243     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 244                                         (cache_size - non_nmethod.size) / 2 : min_size;
 245   }
 246 
 247   if (profiled.set && !non_profiled.set) {
 248     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 249   }
 250 
 251   if (!profiled.set && non_profiled.set) {
 252     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 253   }
 254 
 255   // Compatibility.
 256   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 257   if (!non_nmethod.set && profiled.set && non_profiled.set) {
 258     set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
 259   }
 260 
 261   size_t total = non_nmethod.size + profiled.size + non_profiled.size;
 262   if (total != cache_size && !cache_size_set) {
 263     log_info(codecache)("ReservedCodeCache size " SIZE_FORMAT "K changed to total segments size NonNMethod "
 264                         SIZE_FORMAT "K NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K = " SIZE_FORMAT "K",
 265                         cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
 266     // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
 267     cache_size = total;
 268   }
 269 
 270   log_debug(codecache)("Initializing code heaps ReservedCodeCache " SIZE_FORMAT "K NonNMethod " SIZE_FORMAT "K"
 271                        " NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K",
 272                        cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
 273 
 274   // Validation
 275   // Check minimal required sizes
 276   check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
 277   if (profiled.enabled) {
 278     check_min_size("profiled code heap", profiled.size, min_size);
 279   }
 280   if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
 281     check_min_size("non-profiled code heap", non_profiled.size, min_size);
 282   }
 283   if (cache_size_set) {
 284     check_min_size("reserved code cache", cache_size, min_cache_size);
 285   }
 286 
 287   // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
 288   if (total != cache_size && cache_size_set) {
 289     err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K)", non_nmethod.size/K);
 290     if (profiled.enabled) {
 291       message.append(" + ProfiledCodeHeapSize (" SIZE_FORMAT "K)", profiled.size/K);
 292     }
 293     if (non_profiled.enabled) {
 294       message.append(" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K)", non_profiled.size/K);
 295     }
 296     message.append(" = " SIZE_FORMAT "K", total/K);
 297     message.append((total > cache_size) ? " is greater than " : " is less than ");
 298     message.append("ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 299 
 300     vm_exit_during_initialization("Invalid code heap sizes", message);
 301   }
 302 
 303   // Compatibility. Print warning if using large pages but not able to use the size given
 304   if (UseLargePages) {
 305     const size_t lg_ps = page_size(false, 1);
 306     if (ps < lg_ps) {
 307       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 308                              "Reverting to smaller page size (" PROPERFMT ").",
 309                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 310     }
 311   }
 312 
 313   // Note: if large page support is enabled, min_size is at least the large
 314   // page size. This ensures that the code cache is covered by large pages.
 315   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 316   non_profiled.size += profiled.size & alignment_mask(min_size);
 317   non_nmethod.size = align_down(non_nmethod.size, min_size);
 318   profiled.size = align_down(profiled.size, min_size);
 319   non_profiled.size = align_down(non_profiled.size, min_size);
 320 
 321   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 322   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 323   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 324   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 325 
 326   const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
 327   cache_size += cds_code_size;
 328 
 329   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 330 
 331   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 332   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 333 
 334   size_t offset = 0;
 335   if (cds_code_size > 0) {
 336     // FIXME: use CodeHeapInfo for this hack ...
 337     _cds_code_space = rs.partition(offset, cds_code_size);
 338     offset += cds_code_size;
 339   }
 340 
 341   if (profiled.enabled) {
 342     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 343     offset += profiled.size;
 344     // Tier 2 and tier 3 (profiled) methods
 345     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 346   }
 347 
 348   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 349   offset += non_nmethod.size;
 350   // Non-nmethods (stubs, adapters, ...)
 351   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 352 
 353   if (non_profiled.enabled) {
 354     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 355     // Tier 1 and tier 4 (non-profiled) methods and native methods
 356     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 357   }
 358 }
 359 
 360 void* CodeCache::map_cached_code() {
 361   if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
 362     return _cds_code_space.base();
 363   } else {
 364     return nullptr;
 365   }
 366 }
 367 
 368 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 369   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 370                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 371 }
 372 
 373 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 374   // Align and reserve space for code cache
 375   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 376   const size_t rs_size = align_up(size, rs_align);
 377 
 378   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 379   if (!rs.is_reserved()) {
 380     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 381                                           rs_size/K));
 382   }
 383 
 384   // Initialize bounds
 385   _low_bound = (address)rs.base();
 386   _high_bound = _low_bound + rs.size();
 387   return rs;
 388 }
 389 
 390 // Heaps available for allocation
 391 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 392   if (!SegmentedCodeCache) {
 393     // No segmentation: use a single code heap
 394     return (code_blob_type == CodeBlobType::All);
 395   } else if (CompilerConfig::is_interpreter_only()) {
 396     // Interpreter only: we don't need any method code heaps
 397     return (code_blob_type == CodeBlobType::NonNMethod);
 398   } else if (CompilerConfig::is_c1_profiling()) {
 399     // Tiered compilation: use all code heaps
 400     return (code_blob_type < CodeBlobType::All);
 401   } else {
 402     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 403     return (code_blob_type == CodeBlobType::NonNMethod) ||
 404            (code_blob_type == CodeBlobType::MethodNonProfiled);
 405   }
 406 }
 407 
 408 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 409   switch(code_blob_type) {
 410   case CodeBlobType::NonNMethod:
 411     return "NonNMethodCodeHeapSize";
 412     break;
 413   case CodeBlobType::MethodNonProfiled:
 414     return "NonProfiledCodeHeapSize";
 415     break;
 416   case CodeBlobType::MethodProfiled:
 417     return "ProfiledCodeHeapSize";
 418     break;
 419   default:
 420     ShouldNotReachHere();
 421     return nullptr;
 422   }
 423 }
 424 
 425 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 426   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 427     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 428   } else {
 429     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 430   }
 431 }
 432 
 433 void CodeCache::add_heap(CodeHeap* heap) {
 434   assert(!Universe::is_fully_initialized(), "late heap addition?");
 435 
 436   _heaps->insert_sorted<code_heap_compare>(heap);
 437 
 438   CodeBlobType type = heap->code_blob_type();
 439   if (code_blob_type_accepts_nmethod(type)) {
 440     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 441   }
 442   if (code_blob_type_accepts_allocable(type)) {
 443     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 444   }
 445 }
 446 
 447 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 448   // Check if heap is needed
 449   if (!heap_available(code_blob_type)) {
 450     return;
 451   }
 452 
 453   // Create CodeHeap
 454   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 455   add_heap(heap);
 456 
 457   // Reserve Space
 458   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 459   size_initial = align_up(size_initial, rs.page_size());
 460   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 461     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 462                                           heap->name(), size_initial/K));
 463   }
 464 
 465   // Register the CodeHeap
 466   MemoryService::add_code_heap_memory_pool(heap, name);
 467 }
 468 
 469 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 470   FOR_ALL_HEAPS(heap) {
 471     if ((*heap)->contains(start)) {
 472       return *heap;
 473     }
 474   }
 475   return nullptr;
 476 }
 477 
 478 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 479   assert(cb != nullptr, "CodeBlob is null");
 480   FOR_ALL_HEAPS(heap) {
 481     if ((*heap)->contains(cb)) {
 482       return *heap;
 483     }
 484   }
 485   ShouldNotReachHere();
 486   return nullptr;
 487 }
 488 
 489 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 490   FOR_ALL_HEAPS(heap) {
 491     if ((*heap)->accepts(code_blob_type)) {
 492       return *heap;
 493     }
 494   }
 495   return nullptr;
 496 }
 497 
 498 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 499   assert_locked_or_safepoint(CodeCache_lock);
 500   assert(heap != nullptr, "heap is null");
 501   return (CodeBlob*)heap->first();
 502 }
 503 
 504 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 505   if (heap_available(code_blob_type)) {
 506     return first_blob(get_code_heap(code_blob_type));
 507   } else {
 508     return nullptr;
 509   }
 510 }
 511 
 512 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 513   assert_locked_or_safepoint(CodeCache_lock);
 514   assert(heap != nullptr, "heap is null");
 515   return (CodeBlob*)heap->next(cb);
 516 }
 517 
 518 /**
 519  * Do not seize the CodeCache lock here--if the caller has not
 520  * already done so, we are going to lose bigtime, since the code
 521  * cache will contain a garbage CodeBlob until the caller can
 522  * run the constructor for the CodeBlob subclass he is busy
 523  * instantiating.
 524  */
 525 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 526   assert_locked_or_safepoint(CodeCache_lock);
 527   assert(size > 0, "Code cache allocation request must be > 0");
 528   if (size == 0) {
 529     return nullptr;
 530   }
 531   CodeBlob* cb = nullptr;
 532 
 533   // Get CodeHeap for the given CodeBlobType
 534   CodeHeap* heap = get_code_heap(code_blob_type);
 535   assert(heap != nullptr, "heap is null");
 536 
 537   while (true) {
 538     cb = (CodeBlob*)heap->allocate(size);
 539     if (cb != nullptr) break;
 540     if (!heap->expand_by(CodeCacheExpansionSize)) {
 541       // Save original type for error reporting
 542       if (orig_code_blob_type == CodeBlobType::All) {
 543         orig_code_blob_type = code_blob_type;
 544       }
 545       // Expansion failed
 546       if (SegmentedCodeCache) {
 547         // Fallback solution: Try to store code in another code heap.
 548         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 549         CodeBlobType type = code_blob_type;
 550         switch (type) {
 551         case CodeBlobType::NonNMethod:
 552           type = CodeBlobType::MethodNonProfiled;
 553           break;
 554         case CodeBlobType::MethodNonProfiled:
 555           type = CodeBlobType::MethodProfiled;
 556           break;
 557         case CodeBlobType::MethodProfiled:
 558           // Avoid loop if we already tried that code heap
 559           if (type == orig_code_blob_type) {
 560             type = CodeBlobType::MethodNonProfiled;
 561           }
 562           break;
 563         default:
 564           break;
 565         }
 566         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 567           if (PrintCodeCacheExtension) {
 568             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 569                           heap->name(), get_code_heap(type)->name());
 570           }
 571           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 572         }
 573       }
 574       if (handle_alloc_failure) {
 575         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 576         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 577       }
 578       return nullptr;
 579     } else {
 580       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 581     }
 582     if (PrintCodeCacheExtension) {
 583       ResourceMark rm;
 584       if (_nmethod_heaps->length() >= 1) {
 585         tty->print("%s", heap->name());
 586       } else {
 587         tty->print("CodeCache");
 588       }
 589       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
 590                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 591                     (address)heap->high() - (address)heap->low_boundary());
 592     }
 593   }
 594   print_trace("allocation", cb, size);
 595   return cb;
 596 }
 597 
 598 void CodeCache::free(CodeBlob* cb) {
 599   assert_locked_or_safepoint(CodeCache_lock);
 600   CodeHeap* heap = get_code_heap(cb);
 601   print_trace("free", cb);
 602   if (cb->is_nmethod()) {
 603     heap->set_nmethod_count(heap->nmethod_count() - 1);
 604     if (((nmethod *)cb)->has_dependencies()) {
 605       Atomic::dec(&_number_of_nmethods_with_dependencies);
 606     }
 607   }
 608   if (cb->is_adapter_blob()) {
 609     heap->set_adapter_count(heap->adapter_count() - 1);
 610   }
 611 
 612   cb->~CodeBlob();
 613   // Get heap for given CodeBlob and deallocate
 614   heap->deallocate(cb);
 615 
 616   assert(heap->blob_count() >= 0, "sanity check");
 617 }
 618 
 619 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 620   assert_locked_or_safepoint(CodeCache_lock);
 621   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 622   print_trace("free_unused_tail", cb);
 623 
 624   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 625   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 626   used += CodeBlob::align_code_offset(cb->header_size());
 627 
 628   // Get heap for given CodeBlob and deallocate its unused tail
 629   get_code_heap(cb)->deallocate_tail(cb, used);
 630   // Adjust the sizes of the CodeBlob
 631   cb->adjust_size(used);
 632 }
 633 
 634 void CodeCache::commit(CodeBlob* cb) {
 635   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 636   assert_locked_or_safepoint(CodeCache_lock);
 637   CodeHeap* heap = get_code_heap(cb);
 638   if (cb->is_nmethod()) {
 639     heap->set_nmethod_count(heap->nmethod_count() + 1);
 640     if (((nmethod *)cb)->has_dependencies()) {
 641       Atomic::inc(&_number_of_nmethods_with_dependencies);
 642     }
 643   }
 644   if (cb->is_adapter_blob()) {
 645     heap->set_adapter_count(heap->adapter_count() + 1);
 646   }
 647 }
 648 
 649 bool CodeCache::contains(void *p) {
 650   // S390 uses contains() in current_frame(), which is used before
 651   // code cache initialization if NativeMemoryTracking=detail is set.
 652   S390_ONLY(if (_heaps == nullptr) return false;)
 653   // It should be ok to call contains without holding a lock.
 654   FOR_ALL_HEAPS(heap) {
 655     if ((*heap)->contains(p)) {
 656       return true;
 657     }
 658   }
 659   return false;
 660 }
 661 
 662 bool CodeCache::contains(nmethod *nm) {
 663   return contains((void *)nm);
 664 }
 665 
 666 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 667 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 668 CodeBlob* CodeCache::find_blob(void* start) {
 669   // NMT can walk the stack before code cache is created
 670   if (_heaps != nullptr) {
 671     CodeHeap* heap = get_code_heap_containing(start);
 672     if (heap != nullptr) {
 673       return heap->find_blob(start);
 674     }
 675   }
 676   return nullptr;
 677 }
 678 
 679 nmethod* CodeCache::find_nmethod(void* start) {
 680   CodeBlob* cb = find_blob(start);
 681   assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
 682   return (nmethod*)cb;
 683 }
 684 
 685 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 686   assert_locked_or_safepoint(CodeCache_lock);
 687   FOR_ALL_HEAPS(heap) {
 688     FOR_ALL_BLOBS(cb, *heap) {
 689       f(cb);
 690     }
 691   }
 692 }
 693 
 694 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 695   assert_locked_or_safepoint(CodeCache_lock);
 696   NMethodIterator iter(NMethodIterator::all);
 697   while(iter.next()) {
 698     f(iter.method());
 699   }
 700 }
 701 
 702 void CodeCache::nmethods_do(NMethodClosure* cl) {
 703   assert_locked_or_safepoint(CodeCache_lock);
 704   NMethodIterator iter(NMethodIterator::all);
 705   while(iter.next()) {
 706     cl->do_nmethod(iter.method());
 707   }
 708 }
 709 
 710 void CodeCache::metadata_do(MetadataClosure* f) {
 711   assert_locked_or_safepoint(CodeCache_lock);
 712   NMethodIterator iter(NMethodIterator::all);
 713   while(iter.next()) {
 714     iter.method()->metadata_do(f);
 715   }
 716 }
 717 
 718 // Calculate the number of GCs after which an nmethod is expected to have been
 719 // used in order to not be classed as cold.
 720 void CodeCache::update_cold_gc_count() {
 721   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 722     // No aging
 723     return;
 724   }
 725 
 726   size_t last_used = _last_unloading_used;
 727   double last_time = _last_unloading_time;
 728 
 729   double time = os::elapsedTime();
 730 
 731   size_t free = unallocated_capacity();
 732   size_t max = max_capacity();
 733   size_t used = max - free;
 734   double gc_interval = time - last_time;
 735 
 736   _unloading_threshold_gc_requested = false;
 737   _last_unloading_time = time;
 738   _last_unloading_used = used;
 739 
 740   if (last_time == 0.0) {
 741     // The first GC doesn't have enough information to make good
 742     // decisions, so just keep everything afloat
 743     log_info(codecache)("Unknown code cache pressure; don't age code");
 744     return;
 745   }
 746 
 747   if (gc_interval <= 0.0 || last_used >= used) {
 748     // Dodge corner cases where there is no pressure or negative pressure
 749     // on the code cache. Just don't unload when this happens.
 750     _cold_gc_count = INT_MAX;
 751     log_info(codecache)("No code cache pressure; don't age code");
 752     return;
 753   }
 754 
 755   double allocation_rate = (used - last_used) / gc_interval;
 756 
 757   _unloading_allocation_rates.add(allocation_rate);
 758   _unloading_gc_intervals.add(gc_interval);
 759 
 760   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 761   if (free < aggressive_sweeping_free_threshold) {
 762     // We are already in the red zone; be very aggressive to avoid disaster
 763     // But not more aggressive than 2. This ensures that an nmethod must
 764     // have been unused at least between two GCs to be considered cold still.
 765     _cold_gc_count = 2;
 766     log_info(codecache)("Code cache critically low; use aggressive aging");
 767     return;
 768   }
 769 
 770   // The code cache has an expected time for cold nmethods to "time out"
 771   // when they have not been used. The time for nmethods to time out
 772   // depends on how long we expect we can keep allocating code until
 773   // aggressive sweeping starts, based on sampled allocation rates.
 774   double average_gc_interval = _unloading_gc_intervals.avg();
 775   double average_allocation_rate = _unloading_allocation_rates.avg();
 776   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 777   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 778 
 779   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 780   // that is that the _cold_gc_count will be added to an epoch number
 781   // and that addition must not overflow, or we can crash the VM.
 782   // But not more aggressive than 2. This ensures that an nmethod must
 783   // have been unused at least between two GCs to be considered cold still.
 784   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 785 
 786   double used_ratio = double(used) / double(max);
 787   double last_used_ratio = double(last_used) / double(max);
 788   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 789                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 790                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 791                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 792 
 793 }
 794 
 795 uint64_t CodeCache::cold_gc_count() {
 796   return _cold_gc_count;
 797 }
 798 
 799 void CodeCache::gc_on_allocation() {
 800   if (!is_init_completed()) {
 801     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 802     return;
 803   }
 804 
 805   size_t free = unallocated_capacity();
 806   size_t max = max_capacity();
 807   size_t used = max - free;
 808   double free_ratio = double(free) / double(max);
 809   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 810     // In case the GC is concurrent, we make sure only one thread requests the GC.
 811     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 812       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 813       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 814     }
 815     return;
 816   }
 817 
 818   size_t last_used = _last_unloading_used;
 819   if (last_used >= used) {
 820     // No increase since last GC; no need to sweep yet
 821     return;
 822   }
 823   size_t allocated_since_last = used - last_used;
 824   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 825   double threshold = SweeperThreshold / 100.0;
 826   double used_ratio = double(used) / double(max);
 827   double last_used_ratio = double(last_used) / double(max);
 828   if (used_ratio > threshold) {
 829     // After threshold is reached, scale it by free_ratio so that more aggressive
 830     // GC is triggered as we approach code cache exhaustion
 831     threshold *= free_ratio;
 832   }
 833   // If code cache has been allocated without any GC at all, let's make sure
 834   // it is eventually invoked to avoid trouble.
 835   if (allocated_since_last_ratio > threshold) {
 836     // In case the GC is concurrent, we make sure only one thread requests the GC.
 837     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 838       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 839                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 840       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 841     }
 842   }
 843 }
 844 
 845 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 846 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 847 //
 848 // Odd values mean that marking is in progress, and even values mean that no
 849 // marking is currently active.
 850 uint64_t CodeCache::_gc_epoch = 2;
 851 
 852 // How many GCs after an nmethod has not been used, do we consider it cold?
 853 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 854 
 855 double CodeCache::_last_unloading_time = 0.0;
 856 size_t CodeCache::_last_unloading_used = 0;
 857 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 858 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 859 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 860 
 861 uint64_t CodeCache::gc_epoch() {
 862   return _gc_epoch;
 863 }
 864 
 865 bool CodeCache::is_gc_marking_cycle_active() {
 866   // Odd means that marking is active
 867   return (_gc_epoch % 2) == 1;
 868 }
 869 
 870 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 871   if (is_gc_marking_cycle_active()) {
 872     return _gc_epoch - 2;
 873   } else {
 874     return _gc_epoch - 1;
 875   }
 876 }
 877 
 878 void CodeCache::on_gc_marking_cycle_start() {
 879   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 880   ++_gc_epoch;
 881 }
 882 
 883 // Once started the code cache marking cycle must only be finished after marking of
 884 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 885 // if they have frames in continuation StackChunks that were not yet visited.
 886 void CodeCache::on_gc_marking_cycle_finish() {
 887   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 888   ++_gc_epoch;
 889   update_cold_gc_count();
 890 }
 891 
 892 void CodeCache::arm_all_nmethods() {
 893   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 894   if (bs_nm != nullptr) {
 895     bs_nm->arm_all_nmethods();
 896   }
 897 }
 898 
 899 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 900 void CodeCache::do_unloading(bool unloading_occurred) {
 901   assert_locked_or_safepoint(CodeCache_lock);
 902   NMethodIterator iter(NMethodIterator::all);
 903   while(iter.next()) {
 904     iter.method()->do_unloading(unloading_occurred);
 905   }
 906 }
 907 
 908 void CodeCache::verify_clean_inline_caches() {
 909 #ifdef ASSERT
 910   NMethodIterator iter(NMethodIterator::not_unloading);
 911   while(iter.next()) {
 912     nmethod* nm = iter.method();
 913     nm->verify_clean_inline_caches();
 914     nm->verify();
 915   }
 916 #endif
 917 }
 918 
 919 // Defer freeing of concurrently cleaned ExceptionCache entries until
 920 // after a global handshake operation.
 921 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 922   if (SafepointSynchronize::is_at_safepoint()) {
 923     delete entry;
 924   } else {
 925     for (;;) {
 926       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 927       entry->set_purge_list_next(purge_list_head);
 928       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 929         break;
 930       }
 931     }
 932   }
 933 }
 934 
 935 // Delete exception caches that have been concurrently unlinked,
 936 // followed by a global handshake operation.
 937 void CodeCache::purge_exception_caches() {
 938   ExceptionCache* curr = _exception_cache_purge_list;
 939   while (curr != nullptr) {
 940     ExceptionCache* next = curr->purge_list_next();
 941     delete curr;
 942     curr = next;
 943   }
 944   _exception_cache_purge_list = nullptr;
 945 }
 946 
 947 // Restart compiler if possible and required..
 948 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 949 
 950   // Try to start the compiler again if we freed any memory
 951   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 952     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 953     log_info(codecache)("Restarting compiler");
 954     EventJITRestart event;
 955     event.set_freedMemory(freed_memory);
 956     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 957     event.commit();
 958   }
 959 }
 960 
 961 uint8_t CodeCache::_unloading_cycle = 1;
 962 
 963 void CodeCache::increment_unloading_cycle() {
 964   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 965   // 0 is reserved for new methods.
 966   _unloading_cycle = (_unloading_cycle + 1) % 4;
 967   if (_unloading_cycle == 0) {
 968     _unloading_cycle = 1;
 969   }
 970 }
 971 
 972 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 973   : _is_unloading_behaviour(is_alive)
 974 {
 975   _saved_behaviour = IsUnloadingBehaviour::current();
 976   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 977   increment_unloading_cycle();
 978   DependencyContext::cleaning_start();
 979 }
 980 
 981 CodeCache::UnlinkingScope::~UnlinkingScope() {
 982   IsUnloadingBehaviour::set_current(_saved_behaviour);
 983   DependencyContext::cleaning_end();
 984 }
 985 
 986 void CodeCache::verify_oops() {
 987   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 988   VerifyOopClosure voc;
 989   NMethodIterator iter(NMethodIterator::not_unloading);
 990   while(iter.next()) {
 991     nmethod* nm = iter.method();
 992     nm->oops_do(&voc);
 993     nm->verify_oop_relocations();
 994   }
 995 }
 996 
 997 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 998   CodeHeap* heap = get_code_heap(code_blob_type);
 999   return (heap != nullptr) ? heap->blob_count() : 0;
1000 }
1001 
1002 int CodeCache::blob_count() {
1003   int count = 0;
1004   FOR_ALL_HEAPS(heap) {
1005     count += (*heap)->blob_count();
1006   }
1007   return count;
1008 }
1009 
1010 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
1011   CodeHeap* heap = get_code_heap(code_blob_type);
1012   return (heap != nullptr) ? heap->nmethod_count() : 0;
1013 }
1014 
1015 int CodeCache::nmethod_count() {
1016   int count = 0;
1017   for (CodeHeap* heap : *_nmethod_heaps) {
1018     count += heap->nmethod_count();
1019   }
1020   return count;
1021 }
1022 
1023 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1024   CodeHeap* heap = get_code_heap(code_blob_type);
1025   return (heap != nullptr) ? heap->adapter_count() : 0;
1026 }
1027 
1028 int CodeCache::adapter_count() {
1029   int count = 0;
1030   FOR_ALL_HEAPS(heap) {
1031     count += (*heap)->adapter_count();
1032   }
1033   return count;
1034 }
1035 
1036 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1037   CodeHeap* heap = get_code_heap(code_blob_type);
1038   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1039 }
1040 
1041 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1042   CodeHeap* heap = get_code_heap(code_blob_type);
1043   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1044 }
1045 
1046 size_t CodeCache::capacity() {
1047   size_t cap = 0;
1048   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1049     cap += (*heap)->capacity();
1050   }
1051   return cap;
1052 }
1053 
1054 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1055   CodeHeap* heap = get_code_heap(code_blob_type);
1056   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1057 }
1058 
1059 size_t CodeCache::unallocated_capacity() {
1060   size_t unallocated_cap = 0;
1061   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1062     unallocated_cap += (*heap)->unallocated_capacity();
1063   }
1064   return unallocated_cap;
1065 }
1066 
1067 size_t CodeCache::max_capacity() {
1068   size_t max_cap = 0;
1069   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1070     max_cap += (*heap)->max_capacity();
1071   }
1072   return max_cap;
1073 }
1074 
1075 bool CodeCache::is_non_nmethod(address addr) {
1076   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1077   return blob->contains(addr);
1078 }
1079 
1080 size_t CodeCache::max_distance_to_non_nmethod() {
1081   if (!SegmentedCodeCache) {
1082     return ReservedCodeCacheSize;
1083   } else {
1084     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1085     // the max distance is minimized by placing the NonNMethod segment
1086     // in between MethodProfiled and MethodNonProfiled segments
1087     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1088     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1089     return dist1 > dist2 ? dist1 : dist2;
1090   }
1091 }
1092 
1093 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1094 // is free, reverse_free_ratio() returns 4.
1095 // Since code heap for each type of code blobs falls forward to the next
1096 // type of code heap, return the reverse free ratio for the entire
1097 // code cache.
1098 double CodeCache::reverse_free_ratio() {
1099   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1100   double max = (double)max_capacity();
1101   double result = max / unallocated;
1102   assert (max >= unallocated, "Must be");
1103   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1104   return result;
1105 }
1106 
1107 size_t CodeCache::bytes_allocated_in_freelists() {
1108   size_t allocated_bytes = 0;
1109   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1110     allocated_bytes += (*heap)->allocated_in_freelist();
1111   }
1112   return allocated_bytes;
1113 }
1114 
1115 int CodeCache::allocated_segments() {
1116   int number_of_segments = 0;
1117   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1118     number_of_segments += (*heap)->allocated_segments();
1119   }
1120   return number_of_segments;
1121 }
1122 
1123 size_t CodeCache::freelists_length() {
1124   size_t length = 0;
1125   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1126     length += (*heap)->freelist_length();
1127   }
1128   return length;
1129 }
1130 
1131 void icache_init();
1132 
1133 void CodeCache::initialize() {
1134   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1135 #ifdef COMPILER2
1136   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1137 #endif
1138   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1139   // This was originally just a check of the alignment, causing failure, instead, round
1140   // the code cache to the page size.  In particular, Solaris is moving to a larger
1141   // default page size.
1142   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1143 
1144   if (SegmentedCodeCache) {
1145     // Use multiple code heaps
1146     initialize_heaps();
1147   } else {
1148     // Use a single code heap
1149     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1150     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1151     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1152 
1153     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1154     // users want to use the largest available page.
1155     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1156     ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1157     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1158     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1159     add_heap(rs, "CodeCache", CodeBlobType::All);
1160   }
1161 
1162   // Initialize ICache flush mechanism
1163   // This service is needed for os::register_code_area
1164   icache_init();
1165 
1166   // Give OS a chance to register generated code area.
1167   // This is used on Windows 64 bit platforms to register
1168   // Structured Exception Handlers for our generated code.
1169   os::register_code_area((char*)low_bound(), (char*)high_bound());
1170 }
1171 
1172 void codeCache_init() {
1173   CodeCache::initialize();
1174 }
1175 
1176 //------------------------------------------------------------------------------------------------
1177 
1178 bool CodeCache::has_nmethods_with_dependencies() {
1179   return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1180 }
1181 
1182 void CodeCache::clear_inline_caches() {
1183   assert_locked_or_safepoint(CodeCache_lock);
1184   NMethodIterator iter(NMethodIterator::not_unloading);
1185   while(iter.next()) {
1186     iter.method()->clear_inline_caches();
1187   }
1188 }
1189 
1190 // Only used by whitebox API
1191 void CodeCache::cleanup_inline_caches_whitebox() {
1192   assert_locked_or_safepoint(CodeCache_lock);
1193   NMethodIterator iter(NMethodIterator::not_unloading);
1194   while(iter.next()) {
1195     iter.method()->cleanup_inline_caches_whitebox();
1196   }
1197 }
1198 
1199 // Keeps track of time spent for checking dependencies
1200 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1201 
1202 #ifndef PRODUCT
1203 // Check if any of live methods dependencies have been invalidated.
1204 // (this is expensive!)
1205 static void check_live_nmethods_dependencies(DepChange& changes) {
1206   // Checked dependencies are allocated into this ResourceMark
1207   ResourceMark rm;
1208 
1209   // Turn off dependency tracing while actually testing dependencies.
1210   FlagSetting fs(Dependencies::_verify_in_progress, true);
1211 
1212   typedef ResourceHashtable<DependencySignature, int, 11027,
1213                             AnyObj::RESOURCE_AREA, mtInternal,
1214                             &DependencySignature::hash,
1215                             &DependencySignature::equals> DepTable;
1216 
1217   DepTable* table = new DepTable();
1218 
1219   // Iterate over live nmethods and check dependencies of all nmethods that are not
1220   // marked for deoptimization. A particular dependency is only checked once.
1221   NMethodIterator iter(NMethodIterator::not_unloading);
1222   while(iter.next()) {
1223     nmethod* nm = iter.method();
1224     // Only notify for live nmethods
1225     if (!nm->is_marked_for_deoptimization()) {
1226       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1227         // Construct abstraction of a dependency.
1228         DependencySignature* current_sig = new DependencySignature(deps);
1229 
1230         // Determine if dependency is already checked. table->put(...) returns
1231         // 'true' if the dependency is added (i.e., was not in the hashtable).
1232         if (table->put(*current_sig, 1)) {
1233           Klass* witness = deps.check_dependency();
1234           if (witness != nullptr) {
1235             // Dependency checking failed. Print out information about the failed
1236             // dependency and finally fail with an assert. We can fail here, since
1237             // dependency checking is never done in a product build.
1238             deps.print_dependency(tty, witness, true);
1239             changes.print();
1240             nm->print();
1241             nm->print_dependencies_on(tty);
1242             assert(false, "Should have been marked for deoptimization");
1243           }
1244         }
1245       }
1246     }
1247   }
1248 }
1249 #endif
1250 
1251 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1252   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1253 
1254   // search the hierarchy looking for nmethods which are affected by the loading of this class
1255 
1256   // then search the interfaces this class implements looking for nmethods
1257   // which might be dependent of the fact that an interface only had one
1258   // implementor.
1259   // nmethod::check_all_dependencies works only correctly, if no safepoint
1260   // can happen
1261   NoSafepointVerifier nsv;
1262   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1263     InstanceKlass* d = str.klass();
1264     {
1265       LogStreamHandle(Trace, dependencies) log;
1266       if (log.is_enabled()) {
1267         log.print("Processing context ");
1268         d->name()->print_value_on(&log);
1269       }
1270     }
1271     d->mark_dependent_nmethods(deopt_scope, changes);
1272   }
1273 
1274 #ifndef PRODUCT
1275   if (VerifyDependencies) {
1276     // Object pointers are used as unique identifiers for dependency arguments. This
1277     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1278     dependentCheckTime.start();
1279     check_live_nmethods_dependencies(changes);
1280     dependentCheckTime.stop();
1281   }
1282 #endif
1283 }
1284 
1285 #if INCLUDE_JVMTI
1286 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1287 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1288 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1289 
1290 static void add_to_old_table(nmethod* c) {
1291   if (old_nmethod_table == nullptr) {
1292     old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1293   }
1294   old_nmethod_table->push(c);
1295 }
1296 
1297 static void reset_old_method_table() {
1298   if (old_nmethod_table != nullptr) {
1299     delete old_nmethod_table;
1300     old_nmethod_table = nullptr;
1301   }
1302 }
1303 
1304 // Remove this method when flushed.
1305 void CodeCache::unregister_old_nmethod(nmethod* c) {
1306   assert_lock_strong(CodeCache_lock);
1307   if (old_nmethod_table != nullptr) {
1308     int index = old_nmethod_table->find(c);
1309     if (index != -1) {
1310       old_nmethod_table->delete_at(index);
1311     }
1312   }
1313 }
1314 
1315 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1316   // Walk old method table and mark those on stack.
1317   int length = 0;
1318   if (old_nmethod_table != nullptr) {
1319     length = old_nmethod_table->length();
1320     for (int i = 0; i < length; i++) {
1321       // Walk all methods saved on the last pass.  Concurrent class unloading may
1322       // also be looking at this method's metadata, so don't delete it yet if
1323       // it is marked as unloaded.
1324       old_nmethod_table->at(i)->metadata_do(f);
1325     }
1326   }
1327   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1328 }
1329 
1330 // Walk compiled methods and mark dependent methods for deoptimization.
1331 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1332   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1333   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1334   // So delete old method table and create a new one.
1335   reset_old_method_table();
1336 
1337   NMethodIterator iter(NMethodIterator::all);
1338   while(iter.next()) {
1339     nmethod* nm = iter.method();
1340     // Walk all alive nmethods to check for old Methods.
1341     // This includes methods whose inline caches point to old methods, so
1342     // inline cache clearing is unnecessary.
1343     if (nm->has_evol_metadata()) {
1344       deopt_scope->mark(nm);
1345       add_to_old_table(nm);
1346     }
1347   }
1348 }
1349 
1350 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1351   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1352   NMethodIterator iter(NMethodIterator::all);
1353   while(iter.next()) {
1354     nmethod* nm = iter.method();
1355     if (!nm->method()->is_method_handle_intrinsic()) {
1356       if (nm->can_be_deoptimized()) {
1357         deopt_scope->mark(nm);
1358       }
1359       if (nm->has_evol_metadata()) {
1360         add_to_old_table(nm);
1361       }
1362     }
1363   }
1364 }
1365 
1366 #endif // INCLUDE_JVMTI
1367 
1368 // Mark methods for deopt (if safe or possible).
1369 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1370   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1371   NMethodIterator iter(NMethodIterator::not_unloading);
1372   while(iter.next()) {
1373     nmethod* nm = iter.method();
1374     if (!nm->is_native_method()) {
1375       deopt_scope->mark(nm);
1376     }
1377   }
1378 }
1379 
1380 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1381   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1382 
1383   NMethodIterator iter(NMethodIterator::not_unloading);
1384   while(iter.next()) {
1385     nmethod* nm = iter.method();
1386     if (nm->is_dependent_on_method(dependee)) {
1387       deopt_scope->mark(nm);
1388     }
1389   }
1390 }
1391 
1392 void CodeCache::make_marked_nmethods_deoptimized() {
1393   RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1394   while(iter.next()) {
1395     nmethod* nm = iter.method();
1396     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1397       nm->make_not_entrant();
1398       nm->make_deoptimized();
1399     }
1400   }
1401 }
1402 
1403 // Marks compiled methods dependent on dependee.
1404 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1405   assert_lock_strong(Compile_lock);
1406 
1407   if (!has_nmethods_with_dependencies()) {
1408     return;
1409   }
1410 
1411   if (dependee->is_linked()) {
1412     // Class initialization state change.
1413     KlassInitDepChange changes(dependee);
1414     mark_for_deoptimization(deopt_scope, changes);
1415   } else {
1416     // New class is loaded.
1417     NewKlassDepChange changes(dependee);
1418     mark_for_deoptimization(deopt_scope, changes);
1419   }
1420 }
1421 
1422 // Marks compiled methods dependent on dependee
1423 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1424   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1425 
1426   DeoptimizationScope deopt_scope;
1427   // Compute the dependent nmethods
1428   mark_for_deoptimization(&deopt_scope, m_h());
1429   deopt_scope.deoptimize_marked();
1430 }
1431 
1432 void CodeCache::verify() {
1433   assert_locked_or_safepoint(CodeCache_lock);
1434   FOR_ALL_HEAPS(heap) {
1435     (*heap)->verify();
1436     FOR_ALL_BLOBS(cb, *heap) {
1437       cb->verify();
1438     }
1439   }
1440 }
1441 
1442 // A CodeHeap is full. Print out warning and report event.
1443 PRAGMA_DIAG_PUSH
1444 PRAGMA_FORMAT_NONLITERAL_IGNORED
1445 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1446   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1447   CodeHeap* heap = get_code_heap(code_blob_type);
1448   assert(heap != nullptr, "heap is null");
1449 
1450   int full_count = heap->report_full();
1451 
1452   if ((full_count == 1) || print) {
1453     // Not yet reported for this heap, report
1454     if (SegmentedCodeCache) {
1455       ResourceMark rm;
1456       stringStream msg1_stream, msg2_stream;
1457       msg1_stream.print("%s is full. Compiler has been disabled.",
1458                         get_code_heap_name(code_blob_type));
1459       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1460                  get_code_heap_flag_name(code_blob_type));
1461       const char *msg1 = msg1_stream.as_string();
1462       const char *msg2 = msg2_stream.as_string();
1463 
1464       log_warning(codecache)("%s", msg1);
1465       log_warning(codecache)("%s", msg2);
1466       warning("%s", msg1);
1467       warning("%s", msg2);
1468     } else {
1469       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1470       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1471 
1472       log_warning(codecache)("%s", msg1);
1473       log_warning(codecache)("%s", msg2);
1474       warning("%s", msg1);
1475       warning("%s", msg2);
1476     }
1477     stringStream s;
1478     // Dump code cache into a buffer before locking the tty.
1479     {
1480       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1481       print_summary(&s);
1482     }
1483     {
1484       ttyLocker ttyl;
1485       tty->print("%s", s.freeze());
1486     }
1487 
1488     if (full_count == 1) {
1489       if (PrintCodeHeapAnalytics) {
1490         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1491       }
1492     }
1493   }
1494 
1495   EventCodeCacheFull event;
1496   if (event.should_commit()) {
1497     event.set_codeBlobType((u1)code_blob_type);
1498     event.set_startAddress((u8)heap->low_boundary());
1499     event.set_commitedTopAddress((u8)heap->high());
1500     event.set_reservedTopAddress((u8)heap->high_boundary());
1501     event.set_entryCount(heap->blob_count());
1502     event.set_methodCount(heap->nmethod_count());
1503     event.set_adaptorCount(heap->adapter_count());
1504     event.set_unallocatedCapacity(heap->unallocated_capacity());
1505     event.set_fullCount(heap->full_count());
1506     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1507     event.commit();
1508   }
1509 }
1510 PRAGMA_DIAG_POP
1511 
1512 void CodeCache::print_memory_overhead() {
1513   size_t wasted_bytes = 0;
1514   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1515       CodeHeap* curr_heap = *heap;
1516       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1517         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1518         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1519       }
1520   }
1521   // Print bytes that are allocated in the freelist
1522   ttyLocker ttl;
1523   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1524   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1525   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1526   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1527 }
1528 
1529 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1530   if (total > 0) {
1531     double ratio = (100.0 * used) / total;
1532     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1533   }
1534 }
1535 
1536 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1537   int stats     [2][6][3][2] = {0};
1538   int stats_used[2][6][3][2] = {0};
1539 
1540   int total_osr = 0;
1541   int total_entrant = 0;
1542   int total_non_entrant = 0;
1543   int total_other = 0;
1544   int total_used = 0;
1545 
1546   NMethodIterator iter(NMethodIterator::all);
1547   while (iter.next()) {
1548     nmethod* nm = iter.method();
1549     if (nm->is_in_use()) {
1550       ++total_entrant;
1551     } else if (nm->is_not_entrant()) {
1552       ++total_non_entrant;
1553     } else {
1554       ++total_other;
1555     }
1556     if (nm->is_osr_method()) {
1557       ++total_osr;
1558     }
1559     if (nm->used()) {
1560       ++total_used;
1561     }
1562     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1563 
1564     int idx1 = nm->is_scc() ? 1 : 0;
1565     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1566     int idx3 = (nm->is_in_use()      ? 0 :
1567                (nm->is_not_entrant() ? 1 :
1568                                        2));
1569     int idx4 = (nm->is_osr_method() ? 1 : 0);
1570     stats[idx1][idx2][idx3][idx4] += 1;
1571     if (nm->used()) {
1572       stats_used[idx1][idx2][idx3][idx4] += 1;
1573     }
1574   }
1575 
1576   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1577                total_entrant + total_non_entrant + total_other,
1578                total_entrant, total_non_entrant, total_osr);
1579   if (total_other > 0) {
1580     st->print("; %d other", total_other);
1581   }
1582   st->print_cr(")");
1583 
1584   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1585     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1586     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1587     if (total_normal + total_osr > 0) {
1588       st->print("  Tier%d:", i);
1589       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1590       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1591       st->cr();
1592     }
1593   }
1594   st->cr();
1595   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1596     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1597     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1598     assert(total_osr == 0, "sanity");
1599     if (total_normal + total_osr > 0) {
1600       st->print("  SC T%d:", i);
1601       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1602       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1603       st->cr();
1604     }
1605   }
1606 }
1607 
1608 //------------------------------------------------------------------------------------------------
1609 // Non-product version
1610 
1611 #ifndef PRODUCT
1612 
1613 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1614   if (PrintCodeCache2) {  // Need to add a new flag
1615     ResourceMark rm;
1616     if (size == 0) {
1617       int s = cb->size();
1618       assert(s >= 0, "CodeBlob size is negative: %d", s);
1619       size = (uint) s;
1620     }
1621     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1622   }
1623 }
1624 
1625 void CodeCache::print_internals() {
1626   int nmethodCount = 0;
1627   int runtimeStubCount = 0;
1628   int adapterCount = 0;
1629   int deoptimizationStubCount = 0;
1630   int uncommonTrapStubCount = 0;
1631   int bufferBlobCount = 0;
1632   int total = 0;
1633   int nmethodNotEntrant = 0;
1634   int nmethodJava = 0;
1635   int nmethodNative = 0;
1636   int max_nm_size = 0;
1637   ResourceMark rm;
1638 
1639   int i = 0;
1640   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1641     int heap_total = 0;
1642     tty->print_cr("-- %s --", (*heap)->name());
1643     FOR_ALL_BLOBS(cb, *heap) {
1644       total++;
1645       heap_total++;
1646       if (cb->is_nmethod()) {
1647         nmethod* nm = (nmethod*)cb;
1648 
1649         tty->print("%4d: ", heap_total);
1650         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1651 
1652         nmethodCount++;
1653 
1654         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1655         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1656 
1657         if(nm->method() != nullptr && nm->is_java_method()) {
1658           nmethodJava++;
1659           max_nm_size = MAX2(max_nm_size, nm->size());
1660         }
1661       } else if (cb->is_runtime_stub()) {
1662         runtimeStubCount++;
1663       } else if (cb->is_deoptimization_stub()) {
1664         deoptimizationStubCount++;
1665       } else if (cb->is_uncommon_trap_stub()) {
1666         uncommonTrapStubCount++;
1667       } else if (cb->is_adapter_blob()) {
1668         adapterCount++;
1669       } else if (cb->is_buffer_blob()) {
1670         bufferBlobCount++;
1671       }
1672     }
1673   }
1674 
1675   int bucketSize = 512;
1676   int bucketLimit = max_nm_size / bucketSize + 1;
1677   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1678   memset(buckets, 0, sizeof(int) * bucketLimit);
1679 
1680   NMethodIterator iter(NMethodIterator::all);
1681   while(iter.next()) {
1682     nmethod* nm = iter.method();
1683     if(nm->method() != nullptr && nm->is_java_method()) {
1684       buckets[nm->size() / bucketSize]++;
1685     }
1686   }
1687 
1688   tty->print_cr("Code Cache Entries (total of %d)",total);
1689   tty->print_cr("-------------------------------------------------");
1690   tty->print_cr("nmethods: %d",nmethodCount);
1691   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1692   tty->print_cr("\tjava: %d",nmethodJava);
1693   tty->print_cr("\tnative: %d",nmethodNative);
1694   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1695   tty->print_cr("adapters: %d",adapterCount);
1696   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1697   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1698   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1699   tty->print_cr("\nnmethod size distribution");
1700   tty->print_cr("-------------------------------------------------");
1701 
1702   for(int i=0; i<bucketLimit; i++) {
1703     if(buckets[i] != 0) {
1704       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1705       tty->fill_to(40);
1706       tty->print_cr("%d",buckets[i]);
1707     }
1708   }
1709 
1710   FREE_C_HEAP_ARRAY(int, buckets);
1711   print_memory_overhead();
1712 }
1713 
1714 #endif // !PRODUCT
1715 
1716 void CodeCache::print() {
1717   print_summary(tty);
1718 
1719 #ifndef PRODUCT
1720   if (!Verbose) return;
1721 
1722   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1723   CodeBlob_sizes runtimeStub;
1724   CodeBlob_sizes uncommonTrapStub;
1725   CodeBlob_sizes deoptimizationStub;
1726   CodeBlob_sizes adapter;
1727   CodeBlob_sizes bufferBlob;
1728   CodeBlob_sizes other;
1729 
1730   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1731     FOR_ALL_BLOBS(cb, *heap) {
1732       if (cb->is_nmethod()) {
1733         const int level = cb->as_nmethod()->comp_level();
1734         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1735         live[level].add(cb);
1736       } else if (cb->is_runtime_stub()) {
1737         runtimeStub.add(cb);
1738       } else if (cb->is_deoptimization_stub()) {
1739         deoptimizationStub.add(cb);
1740       } else if (cb->is_uncommon_trap_stub()) {
1741         uncommonTrapStub.add(cb);
1742       } else if (cb->is_adapter_blob()) {
1743         adapter.add(cb);
1744       } else if (cb->is_buffer_blob()) {
1745         bufferBlob.add(cb);
1746       } else {
1747         other.add(cb);
1748       }
1749     }
1750   }
1751 
1752   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1753 
1754   tty->print_cr("nmethod blobs per compilation level:");
1755   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1756     const char *level_name;
1757     switch (i) {
1758     case CompLevel_none:              level_name = "none";              break;
1759     case CompLevel_simple:            level_name = "simple";            break;
1760     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1761     case CompLevel_full_profile:      level_name = "full profile";      break;
1762     case CompLevel_full_optimization: level_name = "full optimization"; break;
1763     default: assert(false, "invalid compilation level");
1764     }
1765     tty->print_cr("%s:", level_name);
1766     live[i].print("live");
1767   }
1768 
1769   struct {
1770     const char* name;
1771     const CodeBlob_sizes* sizes;
1772   } non_nmethod_blobs[] = {
1773     { "runtime",        &runtimeStub },
1774     { "uncommon trap",  &uncommonTrapStub },
1775     { "deoptimization", &deoptimizationStub },
1776     { "adapter",        &adapter },
1777     { "buffer blob",    &bufferBlob },
1778     { "other",          &other },
1779   };
1780   tty->print_cr("Non-nmethod blobs:");
1781   for (auto& blob: non_nmethod_blobs) {
1782     blob.sizes->print(blob.name);
1783   }
1784 
1785   if (WizardMode) {
1786      // print the oop_map usage
1787     int code_size = 0;
1788     int number_of_blobs = 0;
1789     int number_of_oop_maps = 0;
1790     int map_size = 0;
1791     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1792       FOR_ALL_BLOBS(cb, *heap) {
1793         number_of_blobs++;
1794         code_size += cb->code_size();
1795         ImmutableOopMapSet* set = cb->oop_maps();
1796         if (set != nullptr) {
1797           number_of_oop_maps += set->count();
1798           map_size           += set->nr_of_bytes();
1799         }
1800       }
1801     }
1802     tty->print_cr("OopMaps");
1803     tty->print_cr("  #blobs    = %d", number_of_blobs);
1804     tty->print_cr("  code size = %d", code_size);
1805     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1806     tty->print_cr("  map size  = %d", map_size);
1807   }
1808 
1809 #endif // !PRODUCT
1810 }
1811 
1812 void CodeCache::print_nmethods_on(outputStream* st) {
1813   ResourceMark rm;
1814   int i = 0;
1815   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1816     st->print_cr("-- %s --", (*heap)->name());
1817     FOR_ALL_BLOBS(cb, *heap) {
1818       i++;
1819       if (cb->is_nmethod()) {
1820         nmethod* nm = (nmethod*)cb;
1821         st->print("%4d: ", i);
1822         CompileTask::print(st, nm, nullptr, true, false);
1823 
1824         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1825         st->print_cr(" %c", non_entrant_char);
1826       }
1827     }
1828   }
1829 }
1830 
1831 void CodeCache::print_summary(outputStream* st, bool detailed) {
1832   int full_count = 0;
1833   julong total_used = 0;
1834   julong total_max_used = 0;
1835   julong total_free = 0;
1836   julong total_size = 0;
1837   FOR_ALL_HEAPS(heap_iterator) {
1838     CodeHeap* heap = (*heap_iterator);
1839     size_t total = (heap->high_boundary() - heap->low_boundary());
1840     if (_heaps->length() >= 1) {
1841       st->print("%s:", heap->name());
1842     } else {
1843       st->print("CodeCache:");
1844     }
1845     size_t size = total/K;
1846     size_t used = (total - heap->unallocated_capacity())/K;
1847     size_t max_used = heap->max_allocated_capacity()/K;
1848     size_t free = heap->unallocated_capacity()/K;
1849     total_size += size;
1850     total_used += used;
1851     total_max_used += max_used;
1852     total_free += free;
1853     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1854                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1855                  size, used, max_used, free);
1856 
1857     if (detailed) {
1858       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1859                    p2i(heap->low_boundary()),
1860                    p2i(heap->high()),
1861                    p2i(heap->high_boundary()));
1862 
1863       full_count += get_codemem_full_count(heap->code_blob_type());
1864     }
1865   }
1866 
1867   if (detailed) {
1868     if (SegmentedCodeCache) {
1869       st->print("CodeCache:");
1870       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1871                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1872                    total_size, total_used, total_max_used, total_free);
1873     }
1874     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1875                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1876                  blob_count(), nmethod_count(), adapter_count(), full_count);
1877     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1878                  CompileBroker::should_compile_new_jobs() ?
1879                  "enabled" : Arguments::mode() == Arguments::_int ?
1880                  "disabled (interpreter mode)" :
1881                  "disabled (not enough contiguous free space left)",
1882                  CompileBroker::get_total_compiler_stopped_count(),
1883                  CompileBroker::get_total_compiler_restarted_count());
1884   }
1885 }
1886 
1887 void CodeCache::print_codelist(outputStream* st) {
1888   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1889 
1890   NMethodIterator iter(NMethodIterator::not_unloading);
1891   while (iter.next()) {
1892     nmethod* nm = iter.method();
1893     ResourceMark rm;
1894     char* method_name = nm->method()->name_and_sig_as_C_string();
1895     const char* jvmci_name = nullptr;
1896 #if INCLUDE_JVMCI
1897     jvmci_name = nm->jvmci_name();
1898 #endif
1899     st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1900                  nm->compile_id(), nm->comp_level(), nm->get_state(),
1901                  method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1902                  (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1903   }
1904 }
1905 
1906 void CodeCache::print_layout(outputStream* st) {
1907   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1908   ResourceMark rm;
1909   print_summary(st, true);
1910 }
1911 
1912 void CodeCache::log_state(outputStream* st) {
1913   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1914             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1915             blob_count(), nmethod_count(), adapter_count(),
1916             unallocated_capacity());
1917 }
1918 
1919 #ifdef LINUX
1920 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1921   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1922   char fname[JVM_MAXPATHLEN];
1923   if (filename == nullptr) {
1924     // Invocation outside of jcmd requires pid substitution.
1925     if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1926                                     strlen(DEFAULT_PERFMAP_FILENAME),
1927                                     fname, JVM_MAXPATHLEN)) {
1928       st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1929       return;
1930     }
1931     filename = fname;
1932   }
1933   fileStream fs(filename, "w");
1934   if (!fs.is_open()) {
1935     st->print_cr("Warning: Failed to create %s for perf map", filename);
1936     return;
1937   }
1938 
1939   AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1940   while (iter.next()) {
1941     CodeBlob *cb = iter.method();
1942     ResourceMark rm;
1943     const char* method_name = nullptr;
1944     const char* jvmci_name = nullptr;
1945     if (cb->is_nmethod()) {
1946       nmethod* nm = cb->as_nmethod();
1947       method_name = nm->method()->external_name();
1948 #if INCLUDE_JVMCI
1949       jvmci_name = nm->jvmci_name();
1950 #endif
1951     } else {
1952       method_name = cb->name();
1953     }
1954     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1955                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1956                 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1957   }
1958 }
1959 #endif // LINUX
1960 
1961 //---<  BEGIN  >--- CodeHeap State Analytics.
1962 
1963 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1964   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1965     CodeHeapState::aggregate(out, (*heap), granularity);
1966   }
1967 }
1968 
1969 void CodeCache::discard(outputStream *out) {
1970   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1971     CodeHeapState::discard(out, (*heap));
1972   }
1973 }
1974 
1975 void CodeCache::print_usedSpace(outputStream *out) {
1976   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1977     CodeHeapState::print_usedSpace(out, (*heap));
1978   }
1979 }
1980 
1981 void CodeCache::print_freeSpace(outputStream *out) {
1982   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1983     CodeHeapState::print_freeSpace(out, (*heap));
1984   }
1985 }
1986 
1987 void CodeCache::print_count(outputStream *out) {
1988   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1989     CodeHeapState::print_count(out, (*heap));
1990   }
1991 }
1992 
1993 void CodeCache::print_space(outputStream *out) {
1994   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1995     CodeHeapState::print_space(out, (*heap));
1996   }
1997 }
1998 
1999 void CodeCache::print_age(outputStream *out) {
2000   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2001     CodeHeapState::print_age(out, (*heap));
2002   }
2003 }
2004 
2005 void CodeCache::print_names(outputStream *out) {
2006   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2007     CodeHeapState::print_names(out, (*heap));
2008   }
2009 }
2010 //---<  END  >--- CodeHeap State Analytics.