1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/barrierSetNMethod.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "jvm_io.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/memoryReserver.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/method.inline.hpp"
  51 #include "oops/objArrayOop.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "oops/verifyOopClosure.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/deoptimization.hpp"
  57 #include "runtime/globals_extension.hpp"
  58 #include "runtime/handles.inline.hpp"
  59 #include "runtime/icache.hpp"
  60 #include "runtime/init.hpp"
  61 #include "runtime/java.hpp"
  62 #include "runtime/mutexLocker.hpp"
  63 #include "runtime/os.inline.hpp"
  64 #include "runtime/safepointVerifiers.hpp"
  65 #include "runtime/vmThread.hpp"
  66 #include "sanitizers/leak.hpp"
  67 #include "services/memoryService.hpp"
  68 #include "utilities/align.hpp"
  69 #include "utilities/vmError.hpp"
  70 #include "utilities/xmlstream.hpp"
  71 #ifdef COMPILER1
  72 #include "c1/c1_Compilation.hpp"
  73 #include "c1/c1_Compiler.hpp"
  74 #endif
  75 #ifdef COMPILER2
  76 #include "opto/c2compiler.hpp"
  77 #include "opto/compile.hpp"
  78 #include "opto/node.hpp"
  79 #endif
  80 
  81 // Helper class for printing in CodeCache
  82 class CodeBlob_sizes {
  83  private:
  84   int count;
  85   int total_size;
  86   int header_size;
  87   int code_size;
  88   int stub_size;
  89   int relocation_size;
  90   int scopes_oop_size;
  91   int scopes_metadata_size;
  92   int scopes_data_size;
  93   int scopes_pcs_size;
  94 
  95  public:
  96   CodeBlob_sizes() {
  97     count            = 0;
  98     total_size       = 0;
  99     header_size      = 0;
 100     code_size        = 0;
 101     stub_size        = 0;
 102     relocation_size  = 0;
 103     scopes_oop_size  = 0;
 104     scopes_metadata_size  = 0;
 105     scopes_data_size = 0;
 106     scopes_pcs_size  = 0;
 107   }
 108 
 109   int total() const                              { return total_size; }
 110   bool is_empty() const                          { return count == 0; }
 111 
 112   void print(const char* title) const {
 113     if (is_empty()) {
 114       tty->print_cr(" #%d %s = %dK",
 115                     count,
 116                     title,
 117                     total()                 / (int)K);
 118     } else {
 119       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 120                     count,
 121                     title,
 122                     total()                 / (int)K,
 123                     header_size             / (int)K,
 124                     header_size             * 100 / total_size,
 125                     relocation_size         / (int)K,
 126                     relocation_size         * 100 / total_size,
 127                     code_size               / (int)K,
 128                     code_size               * 100 / total_size,
 129                     stub_size               / (int)K,
 130                     stub_size               * 100 / total_size,
 131                     scopes_oop_size         / (int)K,
 132                     scopes_oop_size         * 100 / total_size,
 133                     scopes_metadata_size    / (int)K,
 134                     scopes_metadata_size    * 100 / total_size,
 135                     scopes_data_size        / (int)K,
 136                     scopes_data_size        * 100 / total_size,
 137                     scopes_pcs_size         / (int)K,
 138                     scopes_pcs_size         * 100 / total_size);
 139     }
 140   }
 141 
 142   void add(CodeBlob* cb) {
 143     count++;
 144     total_size       += cb->size();
 145     header_size      += cb->header_size();
 146     relocation_size  += cb->relocation_size();
 147     if (cb->is_nmethod()) {
 148       nmethod* nm = cb->as_nmethod_or_null();
 149       code_size        += nm->insts_size();
 150       stub_size        += nm->stub_size();
 151 
 152       scopes_oop_size  += nm->oops_size();
 153       scopes_metadata_size  += nm->metadata_size();
 154       scopes_data_size += nm->scopes_data_size();
 155       scopes_pcs_size  += nm->scopes_pcs_size();
 156     } else {
 157       code_size        += cb->code_size();
 158     }
 159   }
 160 };
 161 
 162 // Iterate over all CodeHeaps
 163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 165 
 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 168 
 169 address CodeCache::_low_bound = nullptr;
 170 address CodeCache::_high_bound = nullptr;
 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 173 
 174 // Initialize arrays of CodeHeap subsets
 175 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 178 
 179 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 180   if (size < required_size) {
 181     log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
 182                          codeheap, size/K, required_size/K);
 183     err_msg title("Not enough space in %s to run VM", codeheap);
 184     err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
 185     vm_exit_during_initialization(title, message);
 186   }
 187 }
 188 
 189 struct CodeHeapInfo {
 190   size_t size;
 191   bool set;
 192   bool enabled;
 193 };
 194 
 195 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 196   assert(!heap->set, "sanity");
 197   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 198 }
 199 
 200 void CodeCache::initialize_heaps() {
 201 
 202   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 203   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 204   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 205 
 206   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 207   const size_t ps             = page_size(false, 8);
 208   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 209   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 210   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 211 
 212   // Prerequisites
 213   if (!heap_available(CodeBlobType::MethodProfiled)) {
 214     // For compatibility reasons, disabled tiered compilation overrides
 215     // segment size even if it is set explicitly.
 216     non_profiled.size += profiled.size;
 217     // Profiled code heap is not available, forcibly set size to 0
 218     profiled.size = 0;
 219     profiled.set = true;
 220     profiled.enabled = false;
 221   }
 222 
 223   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 224 
 225   size_t compiler_buffer_size = 0;
 226   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 227   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 228 
 229   if (!non_nmethod.set) {
 230     non_nmethod.size += compiler_buffer_size;
 231     // Further down, just before FLAG_SET_ERGO(), all segment sizes are
 232     // aligned down to the next lower multiple of min_size. For large page
 233     // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
 234     // Therefore, force non_nmethod.size to at least min_size.
 235     non_nmethod.size = MAX2(non_nmethod.size, min_size);
 236   }
 237 
 238   if (!profiled.set && !non_profiled.set) {
 239     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 240                                         (cache_size - non_nmethod.size) / 2 : min_size;
 241   }
 242 
 243   if (profiled.set && !non_profiled.set) {
 244     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 245   }
 246 
 247   if (!profiled.set && non_profiled.set) {
 248     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 249   }
 250 
 251   // Compatibility.
 252   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 253   if (!non_nmethod.set && profiled.set && non_profiled.set) {
 254     set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
 255   }
 256 
 257   size_t total = non_nmethod.size + profiled.size + non_profiled.size;
 258   if (total != cache_size && !cache_size_set) {
 259     log_info(codecache)("ReservedCodeCache size " SIZE_FORMAT "K changed to total segments size NonNMethod "
 260                         SIZE_FORMAT "K NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K = " SIZE_FORMAT "K",
 261                         cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
 262     // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
 263     cache_size = total;
 264   }
 265 
 266   log_debug(codecache)("Initializing code heaps ReservedCodeCache " SIZE_FORMAT "K NonNMethod " SIZE_FORMAT "K"
 267                        " NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K",
 268                        cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
 269 
 270   // Validation
 271   // Check minimal required sizes
 272   check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
 273   if (profiled.enabled) {
 274     check_min_size("profiled code heap", profiled.size, min_size);
 275   }
 276   if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
 277     check_min_size("non-profiled code heap", non_profiled.size, min_size);
 278   }
 279   if (cache_size_set) {
 280     check_min_size("reserved code cache", cache_size, min_cache_size);
 281   }
 282 
 283   // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
 284   if (total != cache_size && cache_size_set) {
 285     err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K)", non_nmethod.size/K);
 286     if (profiled.enabled) {
 287       message.append(" + ProfiledCodeHeapSize (" SIZE_FORMAT "K)", profiled.size/K);
 288     }
 289     if (non_profiled.enabled) {
 290       message.append(" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K)", non_profiled.size/K);
 291     }
 292     message.append(" = " SIZE_FORMAT "K", total/K);
 293     message.append((total > cache_size) ? " is greater than " : " is less than ");
 294     message.append("ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 295 
 296     vm_exit_during_initialization("Invalid code heap sizes", message);
 297   }
 298 
 299   // Compatibility. Print warning if using large pages but not able to use the size given
 300   if (UseLargePages) {
 301     const size_t lg_ps = page_size(false, 1);
 302     if (ps < lg_ps) {
 303       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 304                              "Reverting to smaller page size (" PROPERFMT ").",
 305                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 306     }
 307   }
 308 
 309   // Note: if large page support is enabled, min_size is at least the large
 310   // page size. This ensures that the code cache is covered by large pages.
 311   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 312   non_profiled.size += profiled.size & alignment_mask(min_size);
 313   non_nmethod.size = align_down(non_nmethod.size, min_size);
 314   profiled.size = align_down(profiled.size, min_size);
 315   non_profiled.size = align_down(non_profiled.size, min_size);
 316 
 317   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 318   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 319   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 320   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 321 
 322   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 323 
 324   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 325   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 326 
 327   size_t offset = 0;
 328   if (profiled.enabled) {
 329     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 330     offset += profiled.size;
 331     // Tier 2 and tier 3 (profiled) methods
 332     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 333   }
 334 
 335   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 336   offset += non_nmethod.size;
 337   // Non-nmethods (stubs, adapters, ...)
 338   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 339 
 340   if (non_profiled.enabled) {
 341     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 342     // Tier 1 and tier 4 (non-profiled) methods and native methods
 343     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 344   }
 345 }
 346 
 347 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 348   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 349                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 350 }
 351 
 352 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 353   // Align and reserve space for code cache
 354   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 355   const size_t rs_size = align_up(size, rs_align);
 356 
 357   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 358   if (!rs.is_reserved()) {
 359     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 360                                           rs_size/K));
 361   }
 362 
 363   // Initialize bounds
 364   _low_bound = (address)rs.base();
 365   _high_bound = _low_bound + rs.size();
 366   return rs;
 367 }
 368 
 369 // Heaps available for allocation
 370 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 371   if (!SegmentedCodeCache) {
 372     // No segmentation: use a single code heap
 373     return (code_blob_type == CodeBlobType::All);
 374   } else if (CompilerConfig::is_interpreter_only()) {
 375     // Interpreter only: we don't need any method code heaps
 376     return (code_blob_type == CodeBlobType::NonNMethod);
 377   } else if (CompilerConfig::is_c1_profiling()) {
 378     // Tiered compilation: use all code heaps
 379     return (code_blob_type < CodeBlobType::All);
 380   } else {
 381     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 382     return (code_blob_type == CodeBlobType::NonNMethod) ||
 383            (code_blob_type == CodeBlobType::MethodNonProfiled);
 384   }
 385 }
 386 
 387 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 388   switch(code_blob_type) {
 389   case CodeBlobType::NonNMethod:
 390     return "NonNMethodCodeHeapSize";
 391     break;
 392   case CodeBlobType::MethodNonProfiled:
 393     return "NonProfiledCodeHeapSize";
 394     break;
 395   case CodeBlobType::MethodProfiled:
 396     return "ProfiledCodeHeapSize";
 397     break;
 398   default:
 399     ShouldNotReachHere();
 400     return nullptr;
 401   }
 402 }
 403 
 404 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 405   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 406     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 407   } else {
 408     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 409   }
 410 }
 411 
 412 void CodeCache::add_heap(CodeHeap* heap) {
 413   assert(!Universe::is_fully_initialized(), "late heap addition?");
 414 
 415   _heaps->insert_sorted<code_heap_compare>(heap);
 416 
 417   CodeBlobType type = heap->code_blob_type();
 418   if (code_blob_type_accepts_nmethod(type)) {
 419     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 420   }
 421   if (code_blob_type_accepts_allocable(type)) {
 422     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 423   }
 424 }
 425 
 426 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 427   // Check if heap is needed
 428   if (!heap_available(code_blob_type)) {
 429     return;
 430   }
 431 
 432   // Create CodeHeap
 433   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 434   add_heap(heap);
 435 
 436   // Reserve Space
 437   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 438   size_initial = align_up(size_initial, rs.page_size());
 439   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 440     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 441                                           heap->name(), size_initial/K));
 442   }
 443 
 444   // Register the CodeHeap
 445   MemoryService::add_code_heap_memory_pool(heap, name);
 446 }
 447 
 448 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 449   FOR_ALL_HEAPS(heap) {
 450     if ((*heap)->contains(start)) {
 451       return *heap;
 452     }
 453   }
 454   return nullptr;
 455 }
 456 
 457 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 458   assert(cb != nullptr, "CodeBlob is null");
 459   FOR_ALL_HEAPS(heap) {
 460     if ((*heap)->contains(cb)) {
 461       return *heap;
 462     }
 463   }
 464   ShouldNotReachHere();
 465   return nullptr;
 466 }
 467 
 468 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 469   FOR_ALL_HEAPS(heap) {
 470     if ((*heap)->accepts(code_blob_type)) {
 471       return *heap;
 472     }
 473   }
 474   return nullptr;
 475 }
 476 
 477 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 478   assert_locked_or_safepoint(CodeCache_lock);
 479   assert(heap != nullptr, "heap is null");
 480   return (CodeBlob*)heap->first();
 481 }
 482 
 483 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 484   if (heap_available(code_blob_type)) {
 485     return first_blob(get_code_heap(code_blob_type));
 486   } else {
 487     return nullptr;
 488   }
 489 }
 490 
 491 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 492   assert_locked_or_safepoint(CodeCache_lock);
 493   assert(heap != nullptr, "heap is null");
 494   return (CodeBlob*)heap->next(cb);
 495 }
 496 
 497 /**
 498  * Do not seize the CodeCache lock here--if the caller has not
 499  * already done so, we are going to lose bigtime, since the code
 500  * cache will contain a garbage CodeBlob until the caller can
 501  * run the constructor for the CodeBlob subclass he is busy
 502  * instantiating.
 503  */
 504 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 505   assert_locked_or_safepoint(CodeCache_lock);
 506   assert(size > 0, "Code cache allocation request must be > 0");
 507   if (size == 0) {
 508     return nullptr;
 509   }
 510   CodeBlob* cb = nullptr;
 511 
 512   // Get CodeHeap for the given CodeBlobType
 513   CodeHeap* heap = get_code_heap(code_blob_type);
 514   assert(heap != nullptr, "heap is null");
 515 
 516   while (true) {
 517     cb = (CodeBlob*)heap->allocate(size);
 518     if (cb != nullptr) break;
 519     if (!heap->expand_by(CodeCacheExpansionSize)) {
 520       // Save original type for error reporting
 521       if (orig_code_blob_type == CodeBlobType::All) {
 522         orig_code_blob_type = code_blob_type;
 523       }
 524       // Expansion failed
 525       if (SegmentedCodeCache) {
 526         // Fallback solution: Try to store code in another code heap.
 527         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 528         CodeBlobType type = code_blob_type;
 529         switch (type) {
 530         case CodeBlobType::NonNMethod:
 531           type = CodeBlobType::MethodNonProfiled;
 532           break;
 533         case CodeBlobType::MethodNonProfiled:
 534           type = CodeBlobType::MethodProfiled;
 535           break;
 536         case CodeBlobType::MethodProfiled:
 537           // Avoid loop if we already tried that code heap
 538           if (type == orig_code_blob_type) {
 539             type = CodeBlobType::MethodNonProfiled;
 540           }
 541           break;
 542         default:
 543           break;
 544         }
 545         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 546           if (PrintCodeCacheExtension) {
 547             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 548                           heap->name(), get_code_heap(type)->name());
 549           }
 550           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 551         }
 552       }
 553       if (handle_alloc_failure) {
 554         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 555         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 556       }
 557       return nullptr;
 558     } else {
 559       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 560     }
 561     if (PrintCodeCacheExtension) {
 562       ResourceMark rm;
 563       if (_nmethod_heaps->length() >= 1) {
 564         tty->print("%s", heap->name());
 565       } else {
 566         tty->print("CodeCache");
 567       }
 568       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
 569                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 570                     (address)heap->high() - (address)heap->low_boundary());
 571     }
 572   }
 573   print_trace("allocation", cb, size);
 574   return cb;
 575 }
 576 
 577 void CodeCache::free(CodeBlob* cb) {
 578   assert_locked_or_safepoint(CodeCache_lock);
 579   CodeHeap* heap = get_code_heap(cb);
 580   print_trace("free", cb);
 581   if (cb->is_nmethod()) {
 582     heap->set_nmethod_count(heap->nmethod_count() - 1);
 583     if (((nmethod *)cb)->has_dependencies()) {
 584       Atomic::dec(&_number_of_nmethods_with_dependencies);
 585     }
 586   }
 587   if (cb->is_adapter_blob()) {
 588     heap->set_adapter_count(heap->adapter_count() - 1);
 589   }
 590 
 591   cb->~CodeBlob();
 592   // Get heap for given CodeBlob and deallocate
 593   heap->deallocate(cb);
 594 
 595   assert(heap->blob_count() >= 0, "sanity check");
 596 }
 597 
 598 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 599   assert_locked_or_safepoint(CodeCache_lock);
 600   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 601   print_trace("free_unused_tail", cb);
 602 
 603   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 604   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 605   used += CodeBlob::align_code_offset(cb->header_size());
 606 
 607   // Get heap for given CodeBlob and deallocate its unused tail
 608   get_code_heap(cb)->deallocate_tail(cb, used);
 609   // Adjust the sizes of the CodeBlob
 610   cb->adjust_size(used);
 611 }
 612 
 613 void CodeCache::commit(CodeBlob* cb) {
 614   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 615   assert_locked_or_safepoint(CodeCache_lock);
 616   CodeHeap* heap = get_code_heap(cb);
 617   if (cb->is_nmethod()) {
 618     heap->set_nmethod_count(heap->nmethod_count() + 1);
 619     if (((nmethod *)cb)->has_dependencies()) {
 620       Atomic::inc(&_number_of_nmethods_with_dependencies);
 621     }
 622   }
 623   if (cb->is_adapter_blob()) {
 624     heap->set_adapter_count(heap->adapter_count() + 1);
 625   }
 626 }
 627 
 628 bool CodeCache::contains(void *p) {
 629   // S390 uses contains() in current_frame(), which is used before
 630   // code cache initialization if NativeMemoryTracking=detail is set.
 631   S390_ONLY(if (_heaps == nullptr) return false;)
 632   // It should be ok to call contains without holding a lock.
 633   FOR_ALL_HEAPS(heap) {
 634     if ((*heap)->contains(p)) {
 635       return true;
 636     }
 637   }
 638   return false;
 639 }
 640 
 641 bool CodeCache::contains(nmethod *nm) {
 642   return contains((void *)nm);
 643 }
 644 
 645 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 646 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 647 CodeBlob* CodeCache::find_blob(void* start) {
 648   // NMT can walk the stack before code cache is created
 649   if (_heaps != nullptr) {
 650     CodeHeap* heap = get_code_heap_containing(start);
 651     if (heap != nullptr) {
 652       return heap->find_blob(start);
 653     }
 654   }
 655   return nullptr;
 656 }
 657 
 658 nmethod* CodeCache::find_nmethod(void* start) {
 659   CodeBlob* cb = find_blob(start);
 660   assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
 661   return (nmethod*)cb;
 662 }
 663 
 664 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 665   assert_locked_or_safepoint(CodeCache_lock);
 666   FOR_ALL_HEAPS(heap) {
 667     FOR_ALL_BLOBS(cb, *heap) {
 668       f(cb);
 669     }
 670   }
 671 }
 672 
 673 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 674   assert_locked_or_safepoint(CodeCache_lock);
 675   NMethodIterator iter(NMethodIterator::all);
 676   while(iter.next()) {
 677     f(iter.method());
 678   }
 679 }
 680 
 681 void CodeCache::nmethods_do(NMethodClosure* cl) {
 682   assert_locked_or_safepoint(CodeCache_lock);
 683   NMethodIterator iter(NMethodIterator::all);
 684   while(iter.next()) {
 685     cl->do_nmethod(iter.method());
 686   }
 687 }
 688 
 689 void CodeCache::metadata_do(MetadataClosure* f) {
 690   assert_locked_or_safepoint(CodeCache_lock);
 691   NMethodIterator iter(NMethodIterator::all);
 692   while(iter.next()) {
 693     iter.method()->metadata_do(f);
 694   }
 695 }
 696 
 697 // Calculate the number of GCs after which an nmethod is expected to have been
 698 // used in order to not be classed as cold.
 699 void CodeCache::update_cold_gc_count() {
 700   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 701     // No aging
 702     return;
 703   }
 704 
 705   size_t last_used = _last_unloading_used;
 706   double last_time = _last_unloading_time;
 707 
 708   double time = os::elapsedTime();
 709 
 710   size_t free = unallocated_capacity();
 711   size_t max = max_capacity();
 712   size_t used = max - free;
 713   double gc_interval = time - last_time;
 714 
 715   _unloading_threshold_gc_requested = false;
 716   _last_unloading_time = time;
 717   _last_unloading_used = used;
 718 
 719   if (last_time == 0.0) {
 720     // The first GC doesn't have enough information to make good
 721     // decisions, so just keep everything afloat
 722     log_info(codecache)("Unknown code cache pressure; don't age code");
 723     return;
 724   }
 725 
 726   if (gc_interval <= 0.0 || last_used >= used) {
 727     // Dodge corner cases where there is no pressure or negative pressure
 728     // on the code cache. Just don't unload when this happens.
 729     _cold_gc_count = INT_MAX;
 730     log_info(codecache)("No code cache pressure; don't age code");
 731     return;
 732   }
 733 
 734   double allocation_rate = (used - last_used) / gc_interval;
 735 
 736   _unloading_allocation_rates.add(allocation_rate);
 737   _unloading_gc_intervals.add(gc_interval);
 738 
 739   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 740   if (free < aggressive_sweeping_free_threshold) {
 741     // We are already in the red zone; be very aggressive to avoid disaster
 742     // But not more aggressive than 2. This ensures that an nmethod must
 743     // have been unused at least between two GCs to be considered cold still.
 744     _cold_gc_count = 2;
 745     log_info(codecache)("Code cache critically low; use aggressive aging");
 746     return;
 747   }
 748 
 749   // The code cache has an expected time for cold nmethods to "time out"
 750   // when they have not been used. The time for nmethods to time out
 751   // depends on how long we expect we can keep allocating code until
 752   // aggressive sweeping starts, based on sampled allocation rates.
 753   double average_gc_interval = _unloading_gc_intervals.avg();
 754   double average_allocation_rate = _unloading_allocation_rates.avg();
 755   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 756   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 757 
 758   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 759   // that is that the _cold_gc_count will be added to an epoch number
 760   // and that addition must not overflow, or we can crash the VM.
 761   // But not more aggressive than 2. This ensures that an nmethod must
 762   // have been unused at least between two GCs to be considered cold still.
 763   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 764 
 765   double used_ratio = double(used) / double(max);
 766   double last_used_ratio = double(last_used) / double(max);
 767   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 768                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 769                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 770                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 771 
 772 }
 773 
 774 uint64_t CodeCache::cold_gc_count() {
 775   return _cold_gc_count;
 776 }
 777 
 778 void CodeCache::gc_on_allocation() {
 779   if (!is_init_completed()) {
 780     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 781     return;
 782   }
 783 
 784   size_t free = unallocated_capacity();
 785   size_t max = max_capacity();
 786   size_t used = max - free;
 787   double free_ratio = double(free) / double(max);
 788   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 789     // In case the GC is concurrent, we make sure only one thread requests the GC.
 790     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 791       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 792       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 793     }
 794     return;
 795   }
 796 
 797   size_t last_used = _last_unloading_used;
 798   if (last_used >= used) {
 799     // No increase since last GC; no need to sweep yet
 800     return;
 801   }
 802   size_t allocated_since_last = used - last_used;
 803   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 804   double threshold = SweeperThreshold / 100.0;
 805   double used_ratio = double(used) / double(max);
 806   double last_used_ratio = double(last_used) / double(max);
 807   if (used_ratio > threshold) {
 808     // After threshold is reached, scale it by free_ratio so that more aggressive
 809     // GC is triggered as we approach code cache exhaustion
 810     threshold *= free_ratio;
 811   }
 812   // If code cache has been allocated without any GC at all, let's make sure
 813   // it is eventually invoked to avoid trouble.
 814   if (allocated_since_last_ratio > threshold) {
 815     // In case the GC is concurrent, we make sure only one thread requests the GC.
 816     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 817       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 818                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 819       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 820     }
 821   }
 822 }
 823 
 824 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 825 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 826 //
 827 // Odd values mean that marking is in progress, and even values mean that no
 828 // marking is currently active.
 829 uint64_t CodeCache::_gc_epoch = 2;
 830 
 831 // How many GCs after an nmethod has not been used, do we consider it cold?
 832 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 833 
 834 double CodeCache::_last_unloading_time = 0.0;
 835 size_t CodeCache::_last_unloading_used = 0;
 836 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 837 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 838 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 839 
 840 uint64_t CodeCache::gc_epoch() {
 841   return _gc_epoch;
 842 }
 843 
 844 bool CodeCache::is_gc_marking_cycle_active() {
 845   // Odd means that marking is active
 846   return (_gc_epoch % 2) == 1;
 847 }
 848 
 849 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 850   if (is_gc_marking_cycle_active()) {
 851     return _gc_epoch - 2;
 852   } else {
 853     return _gc_epoch - 1;
 854   }
 855 }
 856 
 857 void CodeCache::on_gc_marking_cycle_start() {
 858   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 859   ++_gc_epoch;
 860 }
 861 
 862 // Once started the code cache marking cycle must only be finished after marking of
 863 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 864 // if they have frames in continuation StackChunks that were not yet visited.
 865 void CodeCache::on_gc_marking_cycle_finish() {
 866   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 867   ++_gc_epoch;
 868   update_cold_gc_count();
 869 }
 870 
 871 void CodeCache::arm_all_nmethods() {
 872   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 873   if (bs_nm != nullptr) {
 874     bs_nm->arm_all_nmethods();
 875   }
 876 }
 877 
 878 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 879 void CodeCache::do_unloading(bool unloading_occurred) {
 880   assert_locked_or_safepoint(CodeCache_lock);
 881   NMethodIterator iter(NMethodIterator::all);
 882   while(iter.next()) {
 883     iter.method()->do_unloading(unloading_occurred);
 884   }
 885 }
 886 
 887 void CodeCache::verify_clean_inline_caches() {
 888 #ifdef ASSERT
 889   NMethodIterator iter(NMethodIterator::not_unloading);
 890   while(iter.next()) {
 891     nmethod* nm = iter.method();
 892     nm->verify_clean_inline_caches();
 893     nm->verify();
 894   }
 895 #endif
 896 }
 897 
 898 // Defer freeing of concurrently cleaned ExceptionCache entries until
 899 // after a global handshake operation.
 900 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 901   if (SafepointSynchronize::is_at_safepoint()) {
 902     delete entry;
 903   } else {
 904     for (;;) {
 905       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 906       entry->set_purge_list_next(purge_list_head);
 907       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 908         break;
 909       }
 910     }
 911   }
 912 }
 913 
 914 // Delete exception caches that have been concurrently unlinked,
 915 // followed by a global handshake operation.
 916 void CodeCache::purge_exception_caches() {
 917   ExceptionCache* curr = _exception_cache_purge_list;
 918   while (curr != nullptr) {
 919     ExceptionCache* next = curr->purge_list_next();
 920     delete curr;
 921     curr = next;
 922   }
 923   _exception_cache_purge_list = nullptr;
 924 }
 925 
 926 // Restart compiler if possible and required..
 927 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 928 
 929   // Try to start the compiler again if we freed any memory
 930   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 931     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 932     log_info(codecache)("Restarting compiler");
 933     EventJITRestart event;
 934     event.set_freedMemory(freed_memory);
 935     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 936     event.commit();
 937   }
 938 }
 939 
 940 uint8_t CodeCache::_unloading_cycle = 1;
 941 
 942 void CodeCache::increment_unloading_cycle() {
 943   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 944   // 0 is reserved for new methods.
 945   _unloading_cycle = (_unloading_cycle + 1) % 4;
 946   if (_unloading_cycle == 0) {
 947     _unloading_cycle = 1;
 948   }
 949 }
 950 
 951 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 952   : _is_unloading_behaviour(is_alive)
 953 {
 954   _saved_behaviour = IsUnloadingBehaviour::current();
 955   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 956   increment_unloading_cycle();
 957   DependencyContext::cleaning_start();
 958 }
 959 
 960 CodeCache::UnlinkingScope::~UnlinkingScope() {
 961   IsUnloadingBehaviour::set_current(_saved_behaviour);
 962   DependencyContext::cleaning_end();
 963 }
 964 
 965 void CodeCache::verify_oops() {
 966   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 967   VerifyOopClosure voc;
 968   NMethodIterator iter(NMethodIterator::not_unloading);
 969   while(iter.next()) {
 970     nmethod* nm = iter.method();
 971     nm->oops_do(&voc);
 972     nm->verify_oop_relocations();
 973   }
 974 }
 975 
 976 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 977   CodeHeap* heap = get_code_heap(code_blob_type);
 978   return (heap != nullptr) ? heap->blob_count() : 0;
 979 }
 980 
 981 int CodeCache::blob_count() {
 982   int count = 0;
 983   FOR_ALL_HEAPS(heap) {
 984     count += (*heap)->blob_count();
 985   }
 986   return count;
 987 }
 988 
 989 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
 990   CodeHeap* heap = get_code_heap(code_blob_type);
 991   return (heap != nullptr) ? heap->nmethod_count() : 0;
 992 }
 993 
 994 int CodeCache::nmethod_count() {
 995   int count = 0;
 996   for (CodeHeap* heap : *_nmethod_heaps) {
 997     count += heap->nmethod_count();
 998   }
 999   return count;
1000 }
1001 
1002 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1003   CodeHeap* heap = get_code_heap(code_blob_type);
1004   return (heap != nullptr) ? heap->adapter_count() : 0;
1005 }
1006 
1007 int CodeCache::adapter_count() {
1008   int count = 0;
1009   FOR_ALL_HEAPS(heap) {
1010     count += (*heap)->adapter_count();
1011   }
1012   return count;
1013 }
1014 
1015 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1016   CodeHeap* heap = get_code_heap(code_blob_type);
1017   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1018 }
1019 
1020 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1021   CodeHeap* heap = get_code_heap(code_blob_type);
1022   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1023 }
1024 
1025 size_t CodeCache::capacity() {
1026   size_t cap = 0;
1027   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1028     cap += (*heap)->capacity();
1029   }
1030   return cap;
1031 }
1032 
1033 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1034   CodeHeap* heap = get_code_heap(code_blob_type);
1035   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1036 }
1037 
1038 size_t CodeCache::unallocated_capacity() {
1039   size_t unallocated_cap = 0;
1040   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1041     unallocated_cap += (*heap)->unallocated_capacity();
1042   }
1043   return unallocated_cap;
1044 }
1045 
1046 size_t CodeCache::max_capacity() {
1047   size_t max_cap = 0;
1048   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1049     max_cap += (*heap)->max_capacity();
1050   }
1051   return max_cap;
1052 }
1053 
1054 bool CodeCache::is_non_nmethod(address addr) {
1055   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1056   return blob->contains(addr);
1057 }
1058 
1059 size_t CodeCache::max_distance_to_non_nmethod() {
1060   if (!SegmentedCodeCache) {
1061     return ReservedCodeCacheSize;
1062   } else {
1063     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1064     // the max distance is minimized by placing the NonNMethod segment
1065     // in between MethodProfiled and MethodNonProfiled segments
1066     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1067     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1068     return dist1 > dist2 ? dist1 : dist2;
1069   }
1070 }
1071 
1072 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1073 // is free, reverse_free_ratio() returns 4.
1074 // Since code heap for each type of code blobs falls forward to the next
1075 // type of code heap, return the reverse free ratio for the entire
1076 // code cache.
1077 double CodeCache::reverse_free_ratio() {
1078   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1079   double max = (double)max_capacity();
1080   double result = max / unallocated;
1081   assert (max >= unallocated, "Must be");
1082   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1083   return result;
1084 }
1085 
1086 size_t CodeCache::bytes_allocated_in_freelists() {
1087   size_t allocated_bytes = 0;
1088   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1089     allocated_bytes += (*heap)->allocated_in_freelist();
1090   }
1091   return allocated_bytes;
1092 }
1093 
1094 int CodeCache::allocated_segments() {
1095   int number_of_segments = 0;
1096   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1097     number_of_segments += (*heap)->allocated_segments();
1098   }
1099   return number_of_segments;
1100 }
1101 
1102 size_t CodeCache::freelists_length() {
1103   size_t length = 0;
1104   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1105     length += (*heap)->freelist_length();
1106   }
1107   return length;
1108 }
1109 
1110 void icache_init();
1111 
1112 void CodeCache::initialize() {
1113   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1114 #ifdef COMPILER2
1115   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1116 #endif
1117   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1118   // This was originally just a check of the alignment, causing failure, instead, round
1119   // the code cache to the page size.  In particular, Solaris is moving to a larger
1120   // default page size.
1121   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1122 
1123   if (SegmentedCodeCache) {
1124     // Use multiple code heaps
1125     initialize_heaps();
1126   } else {
1127     // Use a single code heap
1128     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1129     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1130     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1131 
1132     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1133     // users want to use the largest available page.
1134     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1135     ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1136     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1137     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1138     add_heap(rs, "CodeCache", CodeBlobType::All);
1139   }
1140 
1141   // Initialize ICache flush mechanism
1142   // This service is needed for os::register_code_area
1143   icache_init();
1144 
1145   // Give OS a chance to register generated code area.
1146   // This is used on Windows 64 bit platforms to register
1147   // Structured Exception Handlers for our generated code.
1148   os::register_code_area((char*)low_bound(), (char*)high_bound());
1149 }
1150 
1151 void codeCache_init() {
1152   CodeCache::initialize();
1153 }
1154 
1155 //------------------------------------------------------------------------------------------------
1156 
1157 bool CodeCache::has_nmethods_with_dependencies() {
1158   return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1159 }
1160 
1161 void CodeCache::clear_inline_caches() {
1162   assert_locked_or_safepoint(CodeCache_lock);
1163   NMethodIterator iter(NMethodIterator::not_unloading);
1164   while(iter.next()) {
1165     iter.method()->clear_inline_caches();
1166   }
1167 }
1168 
1169 // Only used by whitebox API
1170 void CodeCache::cleanup_inline_caches_whitebox() {
1171   assert_locked_or_safepoint(CodeCache_lock);
1172   NMethodIterator iter(NMethodIterator::not_unloading);
1173   while(iter.next()) {
1174     iter.method()->cleanup_inline_caches_whitebox();
1175   }
1176 }
1177 
1178 // Keeps track of time spent for checking dependencies
1179 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1180 
1181 #ifndef PRODUCT
1182 // Check if any of live methods dependencies have been invalidated.
1183 // (this is expensive!)
1184 static void check_live_nmethods_dependencies(DepChange& changes) {
1185   // Checked dependencies are allocated into this ResourceMark
1186   ResourceMark rm;
1187 
1188   // Turn off dependency tracing while actually testing dependencies.
1189   FlagSetting fs(Dependencies::_verify_in_progress, true);
1190 
1191   typedef ResourceHashtable<DependencySignature, int, 11027,
1192                             AnyObj::RESOURCE_AREA, mtInternal,
1193                             &DependencySignature::hash,
1194                             &DependencySignature::equals> DepTable;
1195 
1196   DepTable* table = new DepTable();
1197 
1198   // Iterate over live nmethods and check dependencies of all nmethods that are not
1199   // marked for deoptimization. A particular dependency is only checked once.
1200   NMethodIterator iter(NMethodIterator::not_unloading);
1201   while(iter.next()) {
1202     nmethod* nm = iter.method();
1203     // Only notify for live nmethods
1204     if (!nm->is_marked_for_deoptimization()) {
1205       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1206         // Construct abstraction of a dependency.
1207         DependencySignature* current_sig = new DependencySignature(deps);
1208 
1209         // Determine if dependency is already checked. table->put(...) returns
1210         // 'true' if the dependency is added (i.e., was not in the hashtable).
1211         if (table->put(*current_sig, 1)) {
1212           if (deps.check_dependency() != nullptr) {
1213             // Dependency checking failed. Print out information about the failed
1214             // dependency and finally fail with an assert. We can fail here, since
1215             // dependency checking is never done in a product build.
1216             tty->print_cr("Failed dependency:");
1217             changes.print();
1218             nm->print();
1219             nm->print_dependencies_on(tty);
1220             assert(false, "Should have been marked for deoptimization");
1221           }
1222         }
1223       }
1224     }
1225   }
1226 }
1227 #endif
1228 
1229 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1230   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1231 
1232   // search the hierarchy looking for nmethods which are affected by the loading of this class
1233 
1234   // then search the interfaces this class implements looking for nmethods
1235   // which might be dependent of the fact that an interface only had one
1236   // implementor.
1237   // nmethod::check_all_dependencies works only correctly, if no safepoint
1238   // can happen
1239   NoSafepointVerifier nsv;
1240   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1241     InstanceKlass* d = str.klass();
1242     d->mark_dependent_nmethods(deopt_scope, changes);
1243   }
1244 
1245 #ifndef PRODUCT
1246   if (VerifyDependencies) {
1247     // Object pointers are used as unique identifiers for dependency arguments. This
1248     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1249     dependentCheckTime.start();
1250     check_live_nmethods_dependencies(changes);
1251     dependentCheckTime.stop();
1252   }
1253 #endif
1254 }
1255 
1256 #if INCLUDE_JVMTI
1257 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1258 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1259 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1260 
1261 static void add_to_old_table(nmethod* c) {
1262   if (old_nmethod_table == nullptr) {
1263     old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1264   }
1265   old_nmethod_table->push(c);
1266 }
1267 
1268 static void reset_old_method_table() {
1269   if (old_nmethod_table != nullptr) {
1270     delete old_nmethod_table;
1271     old_nmethod_table = nullptr;
1272   }
1273 }
1274 
1275 // Remove this method when flushed.
1276 void CodeCache::unregister_old_nmethod(nmethod* c) {
1277   assert_lock_strong(CodeCache_lock);
1278   if (old_nmethod_table != nullptr) {
1279     int index = old_nmethod_table->find(c);
1280     if (index != -1) {
1281       old_nmethod_table->delete_at(index);
1282     }
1283   }
1284 }
1285 
1286 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1287   // Walk old method table and mark those on stack.
1288   int length = 0;
1289   if (old_nmethod_table != nullptr) {
1290     length = old_nmethod_table->length();
1291     for (int i = 0; i < length; i++) {
1292       // Walk all methods saved on the last pass.  Concurrent class unloading may
1293       // also be looking at this method's metadata, so don't delete it yet if
1294       // it is marked as unloaded.
1295       old_nmethod_table->at(i)->metadata_do(f);
1296     }
1297   }
1298   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1299 }
1300 
1301 // Walk compiled methods and mark dependent methods for deoptimization.
1302 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1303   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1304   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1305   // So delete old method table and create a new one.
1306   reset_old_method_table();
1307 
1308   NMethodIterator iter(NMethodIterator::all);
1309   while(iter.next()) {
1310     nmethod* nm = iter.method();
1311     // Walk all alive nmethods to check for old Methods.
1312     // This includes methods whose inline caches point to old methods, so
1313     // inline cache clearing is unnecessary.
1314     if (nm->has_evol_metadata()) {
1315       deopt_scope->mark(nm);
1316       add_to_old_table(nm);
1317     }
1318   }
1319 }
1320 
1321 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1322   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1323   NMethodIterator iter(NMethodIterator::all);
1324   while(iter.next()) {
1325     nmethod* nm = iter.method();
1326     if (!nm->method()->is_method_handle_intrinsic()) {
1327       if (nm->can_be_deoptimized()) {
1328         deopt_scope->mark(nm);
1329       }
1330       if (nm->has_evol_metadata()) {
1331         add_to_old_table(nm);
1332       }
1333     }
1334   }
1335 }
1336 
1337 #endif // INCLUDE_JVMTI
1338 
1339 // Mark methods for deopt (if safe or possible).
1340 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1341   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1342   NMethodIterator iter(NMethodIterator::not_unloading);
1343   while(iter.next()) {
1344     nmethod* nm = iter.method();
1345     if (!nm->is_native_method()) {
1346       deopt_scope->mark(nm);
1347     }
1348   }
1349 }
1350 
1351 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1352   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1353 
1354   NMethodIterator iter(NMethodIterator::not_unloading);
1355   while(iter.next()) {
1356     nmethod* nm = iter.method();
1357     if (nm->is_dependent_on_method(dependee)) {
1358       deopt_scope->mark(nm);
1359     }
1360   }
1361 }
1362 
1363 void CodeCache::make_marked_nmethods_deoptimized() {
1364   RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1365   while(iter.next()) {
1366     nmethod* nm = iter.method();
1367     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1368       nm->make_not_entrant();
1369       nm->make_deoptimized();
1370     }
1371   }
1372 }
1373 
1374 // Marks compiled methods dependent on dependee.
1375 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1376   assert_lock_strong(Compile_lock);
1377 
1378   if (!has_nmethods_with_dependencies()) {
1379     return;
1380   }
1381 
1382   if (dependee->is_linked()) {
1383     // Class initialization state change.
1384     KlassInitDepChange changes(dependee);
1385     mark_for_deoptimization(deopt_scope, changes);
1386   } else {
1387     // New class is loaded.
1388     NewKlassDepChange changes(dependee);
1389     mark_for_deoptimization(deopt_scope, changes);
1390   }
1391 }
1392 
1393 // Marks compiled methods dependent on dependee
1394 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1395   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1396 
1397   DeoptimizationScope deopt_scope;
1398   // Compute the dependent nmethods
1399   mark_for_deoptimization(&deopt_scope, m_h());
1400   deopt_scope.deoptimize_marked();
1401 }
1402 
1403 void CodeCache::verify() {
1404   assert_locked_or_safepoint(CodeCache_lock);
1405   FOR_ALL_HEAPS(heap) {
1406     (*heap)->verify();
1407     FOR_ALL_BLOBS(cb, *heap) {
1408       cb->verify();
1409     }
1410   }
1411 }
1412 
1413 // A CodeHeap is full. Print out warning and report event.
1414 PRAGMA_DIAG_PUSH
1415 PRAGMA_FORMAT_NONLITERAL_IGNORED
1416 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1417   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1418   CodeHeap* heap = get_code_heap(code_blob_type);
1419   assert(heap != nullptr, "heap is null");
1420 
1421   int full_count = heap->report_full();
1422 
1423   if ((full_count == 1) || print) {
1424     // Not yet reported for this heap, report
1425     if (SegmentedCodeCache) {
1426       ResourceMark rm;
1427       stringStream msg1_stream, msg2_stream;
1428       msg1_stream.print("%s is full. Compiler has been disabled.",
1429                         get_code_heap_name(code_blob_type));
1430       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1431                  get_code_heap_flag_name(code_blob_type));
1432       const char *msg1 = msg1_stream.as_string();
1433       const char *msg2 = msg2_stream.as_string();
1434 
1435       log_warning(codecache)("%s", msg1);
1436       log_warning(codecache)("%s", msg2);
1437       warning("%s", msg1);
1438       warning("%s", msg2);
1439     } else {
1440       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1441       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1442 
1443       log_warning(codecache)("%s", msg1);
1444       log_warning(codecache)("%s", msg2);
1445       warning("%s", msg1);
1446       warning("%s", msg2);
1447     }
1448     stringStream s;
1449     // Dump code cache into a buffer before locking the tty.
1450     {
1451       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1452       print_summary(&s);
1453     }
1454     {
1455       ttyLocker ttyl;
1456       tty->print("%s", s.freeze());
1457     }
1458 
1459     if (full_count == 1) {
1460       if (PrintCodeHeapAnalytics) {
1461         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1462       }
1463     }
1464   }
1465 
1466   EventCodeCacheFull event;
1467   if (event.should_commit()) {
1468     event.set_codeBlobType((u1)code_blob_type);
1469     event.set_startAddress((u8)heap->low_boundary());
1470     event.set_commitedTopAddress((u8)heap->high());
1471     event.set_reservedTopAddress((u8)heap->high_boundary());
1472     event.set_entryCount(heap->blob_count());
1473     event.set_methodCount(heap->nmethod_count());
1474     event.set_adaptorCount(heap->adapter_count());
1475     event.set_unallocatedCapacity(heap->unallocated_capacity());
1476     event.set_fullCount(heap->full_count());
1477     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1478     event.commit();
1479   }
1480 }
1481 PRAGMA_DIAG_POP
1482 
1483 void CodeCache::print_memory_overhead() {
1484   size_t wasted_bytes = 0;
1485   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1486       CodeHeap* curr_heap = *heap;
1487       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1488         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1489         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1490       }
1491   }
1492   // Print bytes that are allocated in the freelist
1493   ttyLocker ttl;
1494   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1495   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1496   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1497   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1498 }
1499 
1500 //------------------------------------------------------------------------------------------------
1501 // Non-product version
1502 
1503 #ifndef PRODUCT
1504 
1505 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1506   if (PrintCodeCache2) {  // Need to add a new flag
1507     ResourceMark rm;
1508     if (size == 0) {
1509       int s = cb->size();
1510       assert(s >= 0, "CodeBlob size is negative: %d", s);
1511       size = (uint) s;
1512     }
1513     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1514   }
1515 }
1516 
1517 void CodeCache::print_internals() {
1518   int nmethodCount = 0;
1519   int runtimeStubCount = 0;
1520   int adapterCount = 0;
1521   int deoptimizationStubCount = 0;
1522   int uncommonTrapStubCount = 0;
1523   int bufferBlobCount = 0;
1524   int total = 0;
1525   int nmethodNotEntrant = 0;
1526   int nmethodJava = 0;
1527   int nmethodNative = 0;
1528   int max_nm_size = 0;
1529   ResourceMark rm;
1530 
1531   int i = 0;
1532   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1533     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1534       tty->print_cr("-- %s --", (*heap)->name());
1535     }
1536     FOR_ALL_BLOBS(cb, *heap) {
1537       total++;
1538       if (cb->is_nmethod()) {
1539         nmethod* nm = (nmethod*)cb;
1540 
1541         if (Verbose && nm->method() != nullptr) {
1542           ResourceMark rm;
1543           char *method_name = nm->method()->name_and_sig_as_C_string();
1544           tty->print("%s", method_name);
1545           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1546         }
1547 
1548         nmethodCount++;
1549 
1550         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1551         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1552 
1553         if(nm->method() != nullptr && nm->is_java_method()) {
1554           nmethodJava++;
1555           max_nm_size = MAX2(max_nm_size, nm->size());
1556         }
1557       } else if (cb->is_runtime_stub()) {
1558         runtimeStubCount++;
1559       } else if (cb->is_deoptimization_stub()) {
1560         deoptimizationStubCount++;
1561       } else if (cb->is_uncommon_trap_stub()) {
1562         uncommonTrapStubCount++;
1563       } else if (cb->is_adapter_blob()) {
1564         adapterCount++;
1565       } else if (cb->is_buffer_blob()) {
1566         bufferBlobCount++;
1567       }
1568     }
1569   }
1570 
1571   int bucketSize = 512;
1572   int bucketLimit = max_nm_size / bucketSize + 1;
1573   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1574   memset(buckets, 0, sizeof(int) * bucketLimit);
1575 
1576   NMethodIterator iter(NMethodIterator::all);
1577   while(iter.next()) {
1578     nmethod* nm = iter.method();
1579     if(nm->method() != nullptr && nm->is_java_method()) {
1580       buckets[nm->size() / bucketSize]++;
1581     }
1582   }
1583 
1584   tty->print_cr("Code Cache Entries (total of %d)",total);
1585   tty->print_cr("-------------------------------------------------");
1586   tty->print_cr("nmethods: %d",nmethodCount);
1587   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1588   tty->print_cr("\tjava: %d",nmethodJava);
1589   tty->print_cr("\tnative: %d",nmethodNative);
1590   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1591   tty->print_cr("adapters: %d",adapterCount);
1592   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1593   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1594   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1595   tty->print_cr("\nnmethod size distribution");
1596   tty->print_cr("-------------------------------------------------");
1597 
1598   for(int i=0; i<bucketLimit; i++) {
1599     if(buckets[i] != 0) {
1600       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1601       tty->fill_to(40);
1602       tty->print_cr("%d",buckets[i]);
1603     }
1604   }
1605 
1606   FREE_C_HEAP_ARRAY(int, buckets);
1607   print_memory_overhead();
1608 }
1609 
1610 #endif // !PRODUCT
1611 
1612 void CodeCache::print() {
1613   print_summary(tty);
1614 
1615 #ifndef PRODUCT
1616   if (!Verbose) return;
1617 
1618   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1619   CodeBlob_sizes runtimeStub;
1620   CodeBlob_sizes uncommonTrapStub;
1621   CodeBlob_sizes deoptimizationStub;
1622   CodeBlob_sizes adapter;
1623   CodeBlob_sizes bufferBlob;
1624   CodeBlob_sizes other;
1625 
1626   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1627     FOR_ALL_BLOBS(cb, *heap) {
1628       if (cb->is_nmethod()) {
1629         const int level = cb->as_nmethod()->comp_level();
1630         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1631         live[level].add(cb);
1632       } else if (cb->is_runtime_stub()) {
1633         runtimeStub.add(cb);
1634       } else if (cb->is_deoptimization_stub()) {
1635         deoptimizationStub.add(cb);
1636       } else if (cb->is_uncommon_trap_stub()) {
1637         uncommonTrapStub.add(cb);
1638       } else if (cb->is_adapter_blob()) {
1639         adapter.add(cb);
1640       } else if (cb->is_buffer_blob()) {
1641         bufferBlob.add(cb);
1642       } else {
1643         other.add(cb);
1644       }
1645     }
1646   }
1647 
1648   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1649 
1650   tty->print_cr("nmethod blobs per compilation level:");
1651   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1652     const char *level_name;
1653     switch (i) {
1654     case CompLevel_none:              level_name = "none";              break;
1655     case CompLevel_simple:            level_name = "simple";            break;
1656     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1657     case CompLevel_full_profile:      level_name = "full profile";      break;
1658     case CompLevel_full_optimization: level_name = "full optimization"; break;
1659     default: assert(false, "invalid compilation level");
1660     }
1661     tty->print_cr("%s:", level_name);
1662     live[i].print("live");
1663   }
1664 
1665   struct {
1666     const char* name;
1667     const CodeBlob_sizes* sizes;
1668   } non_nmethod_blobs[] = {
1669     { "runtime",        &runtimeStub },
1670     { "uncommon trap",  &uncommonTrapStub },
1671     { "deoptimization", &deoptimizationStub },
1672     { "adapter",        &adapter },
1673     { "buffer blob",    &bufferBlob },
1674     { "other",          &other },
1675   };
1676   tty->print_cr("Non-nmethod blobs:");
1677   for (auto& blob: non_nmethod_blobs) {
1678     blob.sizes->print(blob.name);
1679   }
1680 
1681   if (WizardMode) {
1682      // print the oop_map usage
1683     int code_size = 0;
1684     int number_of_blobs = 0;
1685     int number_of_oop_maps = 0;
1686     int map_size = 0;
1687     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1688       FOR_ALL_BLOBS(cb, *heap) {
1689         number_of_blobs++;
1690         code_size += cb->code_size();
1691         ImmutableOopMapSet* set = cb->oop_maps();
1692         if (set != nullptr) {
1693           number_of_oop_maps += set->count();
1694           map_size           += set->nr_of_bytes();
1695         }
1696       }
1697     }
1698     tty->print_cr("OopMaps");
1699     tty->print_cr("  #blobs    = %d", number_of_blobs);
1700     tty->print_cr("  code size = %d", code_size);
1701     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1702     tty->print_cr("  map size  = %d", map_size);
1703   }
1704 
1705 #endif // !PRODUCT
1706 }
1707 
1708 void CodeCache::print_summary(outputStream* st, bool detailed) {
1709   int full_count = 0;
1710   julong total_used = 0;
1711   julong total_max_used = 0;
1712   julong total_free = 0;
1713   julong total_size = 0;
1714   FOR_ALL_HEAPS(heap_iterator) {
1715     CodeHeap* heap = (*heap_iterator);
1716     size_t total = (heap->high_boundary() - heap->low_boundary());
1717     if (_heaps->length() >= 1) {
1718       st->print("%s:", heap->name());
1719     } else {
1720       st->print("CodeCache:");
1721     }
1722     size_t size = total/K;
1723     size_t used = (total - heap->unallocated_capacity())/K;
1724     size_t max_used = heap->max_allocated_capacity()/K;
1725     size_t free = heap->unallocated_capacity()/K;
1726     total_size += size;
1727     total_used += used;
1728     total_max_used += max_used;
1729     total_free += free;
1730     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1731                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1732                  size, used, max_used, free);
1733 
1734     if (detailed) {
1735       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1736                    p2i(heap->low_boundary()),
1737                    p2i(heap->high()),
1738                    p2i(heap->high_boundary()));
1739 
1740       full_count += get_codemem_full_count(heap->code_blob_type());
1741     }
1742   }
1743 
1744   if (detailed) {
1745     if (SegmentedCodeCache) {
1746       st->print("CodeCache:");
1747       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1748                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1749                    total_size, total_used, total_max_used, total_free);
1750     }
1751     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1752                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1753                  blob_count(), nmethod_count(), adapter_count(), full_count);
1754     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1755                  CompileBroker::should_compile_new_jobs() ?
1756                  "enabled" : Arguments::mode() == Arguments::_int ?
1757                  "disabled (interpreter mode)" :
1758                  "disabled (not enough contiguous free space left)",
1759                  CompileBroker::get_total_compiler_stopped_count(),
1760                  CompileBroker::get_total_compiler_restarted_count());
1761   }
1762 }
1763 
1764 void CodeCache::print_codelist(outputStream* st) {
1765   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1766 
1767   NMethodIterator iter(NMethodIterator::not_unloading);
1768   while (iter.next()) {
1769     nmethod* nm = iter.method();
1770     ResourceMark rm;
1771     char* method_name = nm->method()->name_and_sig_as_C_string();
1772     const char* jvmci_name = nullptr;
1773 #if INCLUDE_JVMCI
1774     jvmci_name = nm->jvmci_name();
1775 #endif
1776     st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1777                  nm->compile_id(), nm->comp_level(), nm->get_state(),
1778                  method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1779                  (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1780   }
1781 }
1782 
1783 void CodeCache::print_layout(outputStream* st) {
1784   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1785   ResourceMark rm;
1786   print_summary(st, true);
1787 }
1788 
1789 void CodeCache::log_state(outputStream* st) {
1790   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1791             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1792             blob_count(), nmethod_count(), adapter_count(),
1793             unallocated_capacity());
1794 }
1795 
1796 #ifdef LINUX
1797 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1798   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1799   char fname[JVM_MAXPATHLEN];
1800   if (filename == nullptr) {
1801     // Invocation outside of jcmd requires pid substitution.
1802     if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1803                                     strlen(DEFAULT_PERFMAP_FILENAME),
1804                                     fname, JVM_MAXPATHLEN)) {
1805       st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1806       return;
1807     }
1808     filename = fname;
1809   }
1810   fileStream fs(filename, "w");
1811   if (!fs.is_open()) {
1812     st->print_cr("Warning: Failed to create %s for perf map", filename);
1813     return;
1814   }
1815 
1816   AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1817   while (iter.next()) {
1818     CodeBlob *cb = iter.method();
1819     ResourceMark rm;
1820     const char* method_name = nullptr;
1821     const char* jvmci_name = nullptr;
1822     if (cb->is_nmethod()) {
1823       nmethod* nm = cb->as_nmethod();
1824       method_name = nm->method()->external_name();
1825 #if INCLUDE_JVMCI
1826       jvmci_name = nm->jvmci_name();
1827 #endif
1828     } else {
1829       method_name = cb->name();
1830     }
1831     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1832                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1833                 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1834   }
1835 }
1836 #endif // LINUX
1837 
1838 //---<  BEGIN  >--- CodeHeap State Analytics.
1839 
1840 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1841   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1842     CodeHeapState::aggregate(out, (*heap), granularity);
1843   }
1844 }
1845 
1846 void CodeCache::discard(outputStream *out) {
1847   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1848     CodeHeapState::discard(out, (*heap));
1849   }
1850 }
1851 
1852 void CodeCache::print_usedSpace(outputStream *out) {
1853   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1854     CodeHeapState::print_usedSpace(out, (*heap));
1855   }
1856 }
1857 
1858 void CodeCache::print_freeSpace(outputStream *out) {
1859   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1860     CodeHeapState::print_freeSpace(out, (*heap));
1861   }
1862 }
1863 
1864 void CodeCache::print_count(outputStream *out) {
1865   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1866     CodeHeapState::print_count(out, (*heap));
1867   }
1868 }
1869 
1870 void CodeCache::print_space(outputStream *out) {
1871   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1872     CodeHeapState::print_space(out, (*heap));
1873   }
1874 }
1875 
1876 void CodeCache::print_age(outputStream *out) {
1877   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1878     CodeHeapState::print_age(out, (*heap));
1879   }
1880 }
1881 
1882 void CodeCache::print_names(outputStream *out) {
1883   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1884     CodeHeapState::print_names(out, (*heap));
1885   }
1886 }
1887 //---<  END  >--- CodeHeap State Analytics.