1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/barrierSetNMethod.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "jvm_io.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/method.inline.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "runtime/arguments.hpp"
  54 #include "runtime/atomic.hpp"
  55 #include "runtime/deoptimization.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/icache.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/os.inline.hpp"
  63 #include "runtime/safepointVerifiers.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "sanitizers/leak.hpp"
  66 #include "services/memoryService.hpp"
  67 #include "utilities/align.hpp"
  68 #include "utilities/vmError.hpp"
  69 #include "utilities/xmlstream.hpp"
  70 #ifdef COMPILER1
  71 #include "c1/c1_Compilation.hpp"
  72 #include "c1/c1_Compiler.hpp"
  73 #endif
  74 #ifdef COMPILER2
  75 #include "opto/c2compiler.hpp"
  76 #include "opto/compile.hpp"
  77 #include "opto/node.hpp"
  78 #endif
  79 
  80 // Helper class for printing in CodeCache
  81 class CodeBlob_sizes {
  82  private:
  83   int count;
  84   int total_size;
  85   int header_size;
  86   int code_size;
  87   int stub_size;
  88   int relocation_size;
  89   int scopes_oop_size;
  90   int scopes_metadata_size;
  91   int scopes_data_size;
  92   int scopes_pcs_size;
  93 
  94  public:
  95   CodeBlob_sizes() {
  96     count            = 0;
  97     total_size       = 0;
  98     header_size      = 0;
  99     code_size        = 0;
 100     stub_size        = 0;
 101     relocation_size  = 0;
 102     scopes_oop_size  = 0;
 103     scopes_metadata_size  = 0;
 104     scopes_data_size = 0;
 105     scopes_pcs_size  = 0;
 106   }
 107 
 108   int total() const                              { return total_size; }
 109   bool is_empty() const                          { return count == 0; }
 110 
 111   void print(const char* title) const {
 112     if (is_empty()) {
 113       tty->print_cr(" #%d %s = %dK",
 114                     count,
 115                     title,
 116                     total()                 / (int)K);
 117     } else {
 118       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 119                     count,
 120                     title,
 121                     total()                 / (int)K,
 122                     header_size             / (int)K,
 123                     header_size             * 100 / total_size,
 124                     relocation_size         / (int)K,
 125                     relocation_size         * 100 / total_size,
 126                     code_size               / (int)K,
 127                     code_size               * 100 / total_size,
 128                     stub_size               / (int)K,
 129                     stub_size               * 100 / total_size,
 130                     scopes_oop_size         / (int)K,
 131                     scopes_oop_size         * 100 / total_size,
 132                     scopes_metadata_size    / (int)K,
 133                     scopes_metadata_size    * 100 / total_size,
 134                     scopes_data_size        / (int)K,
 135                     scopes_data_size        * 100 / total_size,
 136                     scopes_pcs_size         / (int)K,
 137                     scopes_pcs_size         * 100 / total_size);
 138     }
 139   }
 140 
 141   void add(CodeBlob* cb) {
 142     count++;
 143     total_size       += cb->size();
 144     header_size      += cb->header_size();
 145     relocation_size  += cb->relocation_size();
 146     if (cb->is_nmethod()) {
 147       nmethod* nm = cb->as_nmethod_or_null();
 148       code_size        += nm->insts_size();
 149       stub_size        += nm->stub_size();
 150 
 151       scopes_oop_size  += nm->oops_size();
 152       scopes_metadata_size  += nm->metadata_size();
 153       scopes_data_size += nm->scopes_data_size();
 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 164 
 165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 167 
 168 address CodeCache::_low_bound = 0;
 169 address CodeCache::_high_bound = 0;
 170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 172 
 173 // Initialize arrays of CodeHeap subsets
 174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 
 178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 179   if (size < required_size) {
 180     log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
 181                          codeheap, size/K, required_size/K);
 182     err_msg title("Not enough space in %s to run VM", codeheap);
 183     err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
 184     vm_exit_during_initialization(title, message);
 185   }
 186 }
 187 
 188 struct CodeHeapInfo {
 189   size_t size;
 190   bool set;
 191   bool enabled;
 192 };
 193 
 194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 195   assert(!heap->set, "sanity");
 196   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200 
 201   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 202   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 203   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 204 
 205   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 206   const size_t ps             = page_size(false, 8);
 207   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 208   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 209   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 210 
 211   // Prerequisites
 212   if (!heap_available(CodeBlobType::MethodProfiled)) {
 213     // For compatibility reasons, disabled tiered compilation overrides
 214     // segment size even if it is set explicitly.
 215     non_profiled.size += profiled.size;
 216     // Profiled code heap is not available, forcibly set size to 0
 217     profiled.size = 0;
 218     profiled.set = true;
 219     profiled.enabled = false;
 220   }
 221 
 222   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 223 
 224   size_t compiler_buffer_size = 0;
 225   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 226   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 227 
 228   if (!non_nmethod.set) {
 229     non_nmethod.size += compiler_buffer_size;
 230     // Further down, just before FLAG_SET_ERGO(), all segment sizes are
 231     // aligned down to the next lower multiple of min_size. For large page
 232     // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
 233     // Therefore, force non_nmethod.size to at least min_size.
 234     non_nmethod.size = MAX2(non_nmethod.size, min_size);
 235   }
 236 
 237   if (!profiled.set && !non_profiled.set) {
 238     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 239                                         (cache_size - non_nmethod.size) / 2 : min_size;
 240   }
 241 
 242   if (profiled.set && !non_profiled.set) {
 243     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 244   }
 245 
 246   if (!profiled.set && non_profiled.set) {
 247     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 248   }
 249 
 250   // Compatibility.
 251   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 252   if (!non_nmethod.set && profiled.set && non_profiled.set) {
 253     set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
 254   }
 255 
 256   size_t total = non_nmethod.size + profiled.size + non_profiled.size;
 257   if (total != cache_size && !cache_size_set) {
 258     log_info(codecache)("ReservedCodeCache size " SIZE_FORMAT "K changed to total segments size NonNMethod "
 259                         SIZE_FORMAT "K NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K = " SIZE_FORMAT "K",
 260                         cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
 261     // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
 262     cache_size = total;
 263   }
 264 
 265   log_debug(codecache)("Initializing code heaps ReservedCodeCache " SIZE_FORMAT "K NonNMethod " SIZE_FORMAT "K"
 266                        " NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K",
 267                        cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
 268 
 269   // Validation
 270   // Check minimal required sizes
 271   check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
 272   if (profiled.enabled) {
 273     check_min_size("profiled code heap", profiled.size, min_size);
 274   }
 275   if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
 276     check_min_size("non-profiled code heap", non_profiled.size, min_size);
 277   }
 278   if (cache_size_set) {
 279     check_min_size("reserved code cache", cache_size, min_cache_size);
 280   }
 281 
 282   // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
 283   if (total != cache_size && cache_size_set) {
 284     err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K)", non_nmethod.size/K);
 285     if (profiled.enabled) {
 286       message.append(" + ProfiledCodeHeapSize (" SIZE_FORMAT "K)", profiled.size/K);
 287     }
 288     if (non_profiled.enabled) {
 289       message.append(" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K)", non_profiled.size/K);
 290     }
 291     message.append(" = " SIZE_FORMAT "K", total/K);
 292     message.append((total > cache_size) ? " is greater than " : " is less than ");
 293     message.append("ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 294 
 295     vm_exit_during_initialization("Invalid code heap sizes", message);
 296   }
 297 
 298   // Compatibility. Print warning if using large pages but not able to use the size given
 299   if (UseLargePages) {
 300     const size_t lg_ps = page_size(false, 1);
 301     if (ps < lg_ps) {
 302       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 303                              "Reverting to smaller page size (" PROPERFMT ").",
 304                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 305     }
 306   }
 307 
 308   // Note: if large page support is enabled, min_size is at least the large
 309   // page size. This ensures that the code cache is covered by large pages.
 310   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 311   non_profiled.size += profiled.size & alignment_mask(min_size);
 312   non_nmethod.size = align_down(non_nmethod.size, min_size);
 313   profiled.size = align_down(profiled.size, min_size);
 314   non_profiled.size = align_down(non_profiled.size, min_size);
 315 
 316   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 317   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 318   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 319   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 320 
 321   ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
 322 
 323   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 324   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 325 
 326   size_t offset = 0;
 327   if (profiled.enabled) {
 328     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 329     offset += profiled.size;
 330     // Tier 2 and tier 3 (profiled) methods
 331     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 332   }
 333 
 334   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 335   offset += non_nmethod.size;
 336   // Non-nmethods (stubs, adapters, ...)
 337   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 338 
 339   if (non_profiled.enabled) {
 340     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 341     // Tier 1 and tier 4 (non-profiled) methods and native methods
 342     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 343   }
 344 }
 345 
 346 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 347   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 348                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 349 }
 350 
 351 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 352   // Align and reserve space for code cache
 353   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 354   const size_t rs_size = align_up(size, rs_align);
 355   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 356   if (!rs.is_reserved()) {
 357     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 358                                           rs_size/K));
 359   }
 360 
 361   // Initialize bounds
 362   _low_bound = (address)rs.base();
 363   _high_bound = _low_bound + rs.size();
 364   return rs;
 365 }
 366 
 367 // Heaps available for allocation
 368 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 369   if (!SegmentedCodeCache) {
 370     // No segmentation: use a single code heap
 371     return (code_blob_type == CodeBlobType::All);
 372   } else if (CompilerConfig::is_interpreter_only()) {
 373     // Interpreter only: we don't need any method code heaps
 374     return (code_blob_type == CodeBlobType::NonNMethod);
 375   } else if (CompilerConfig::is_c1_profiling()) {
 376     // Tiered compilation: use all code heaps
 377     return (code_blob_type < CodeBlobType::All);
 378   } else {
 379     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 380     return (code_blob_type == CodeBlobType::NonNMethod) ||
 381            (code_blob_type == CodeBlobType::MethodNonProfiled);
 382   }
 383 }
 384 
 385 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 386   switch(code_blob_type) {
 387   case CodeBlobType::NonNMethod:
 388     return "NonNMethodCodeHeapSize";
 389     break;
 390   case CodeBlobType::MethodNonProfiled:
 391     return "NonProfiledCodeHeapSize";
 392     break;
 393   case CodeBlobType::MethodProfiled:
 394     return "ProfiledCodeHeapSize";
 395     break;
 396   default:
 397     ShouldNotReachHere();
 398     return nullptr;
 399   }
 400 }
 401 
 402 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 403   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 404     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 405   } else {
 406     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 407   }
 408 }
 409 
 410 void CodeCache::add_heap(CodeHeap* heap) {
 411   assert(!Universe::is_fully_initialized(), "late heap addition?");
 412 
 413   _heaps->insert_sorted<code_heap_compare>(heap);
 414 
 415   CodeBlobType type = heap->code_blob_type();
 416   if (code_blob_type_accepts_nmethod(type)) {
 417     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 418   }
 419   if (code_blob_type_accepts_allocable(type)) {
 420     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 421   }
 422 }
 423 
 424 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 425   // Check if heap is needed
 426   if (!heap_available(code_blob_type)) {
 427     return;
 428   }
 429 
 430   // Create CodeHeap
 431   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 432   add_heap(heap);
 433 
 434   // Reserve Space
 435   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 436   size_initial = align_up(size_initial, os::vm_page_size());
 437   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 438     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 439                                           heap->name(), size_initial/K));
 440   }
 441 
 442   // Register the CodeHeap
 443   MemoryService::add_code_heap_memory_pool(heap, name);
 444 }
 445 
 446 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 447   FOR_ALL_HEAPS(heap) {
 448     if ((*heap)->contains(start)) {
 449       return *heap;
 450     }
 451   }
 452   return nullptr;
 453 }
 454 
 455 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 456   assert(cb != nullptr, "CodeBlob is null");
 457   FOR_ALL_HEAPS(heap) {
 458     if ((*heap)->contains(cb)) {
 459       return *heap;
 460     }
 461   }
 462   ShouldNotReachHere();
 463   return nullptr;
 464 }
 465 
 466 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 467   FOR_ALL_HEAPS(heap) {
 468     if ((*heap)->accepts(code_blob_type)) {
 469       return *heap;
 470     }
 471   }
 472   return nullptr;
 473 }
 474 
 475 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 476   assert_locked_or_safepoint(CodeCache_lock);
 477   assert(heap != nullptr, "heap is null");
 478   return (CodeBlob*)heap->first();
 479 }
 480 
 481 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 482   if (heap_available(code_blob_type)) {
 483     return first_blob(get_code_heap(code_blob_type));
 484   } else {
 485     return nullptr;
 486   }
 487 }
 488 
 489 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 490   assert_locked_or_safepoint(CodeCache_lock);
 491   assert(heap != nullptr, "heap is null");
 492   return (CodeBlob*)heap->next(cb);
 493 }
 494 
 495 /**
 496  * Do not seize the CodeCache lock here--if the caller has not
 497  * already done so, we are going to lose bigtime, since the code
 498  * cache will contain a garbage CodeBlob until the caller can
 499  * run the constructor for the CodeBlob subclass he is busy
 500  * instantiating.
 501  */
 502 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 503   assert_locked_or_safepoint(CodeCache_lock);
 504   assert(size > 0, "Code cache allocation request must be > 0");
 505   if (size == 0) {
 506     return nullptr;
 507   }
 508   CodeBlob* cb = nullptr;
 509 
 510   // Get CodeHeap for the given CodeBlobType
 511   CodeHeap* heap = get_code_heap(code_blob_type);
 512   assert(heap != nullptr, "heap is null");
 513 
 514   while (true) {
 515     cb = (CodeBlob*)heap->allocate(size);
 516     if (cb != nullptr) break;
 517     if (!heap->expand_by(CodeCacheExpansionSize)) {
 518       // Save original type for error reporting
 519       if (orig_code_blob_type == CodeBlobType::All) {
 520         orig_code_blob_type = code_blob_type;
 521       }
 522       // Expansion failed
 523       if (SegmentedCodeCache) {
 524         // Fallback solution: Try to store code in another code heap.
 525         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 526         CodeBlobType type = code_blob_type;
 527         switch (type) {
 528         case CodeBlobType::NonNMethod:
 529           type = CodeBlobType::MethodNonProfiled;
 530           break;
 531         case CodeBlobType::MethodNonProfiled:
 532           type = CodeBlobType::MethodProfiled;
 533           break;
 534         case CodeBlobType::MethodProfiled:
 535           // Avoid loop if we already tried that code heap
 536           if (type == orig_code_blob_type) {
 537             type = CodeBlobType::MethodNonProfiled;
 538           }
 539           break;
 540         default:
 541           break;
 542         }
 543         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 544           if (PrintCodeCacheExtension) {
 545             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 546                           heap->name(), get_code_heap(type)->name());
 547           }
 548           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 549         }
 550       }
 551       if (handle_alloc_failure) {
 552         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 553         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 554       }
 555       return nullptr;
 556     } else {
 557       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 558     }
 559     if (PrintCodeCacheExtension) {
 560       ResourceMark rm;
 561       if (_nmethod_heaps->length() >= 1) {
 562         tty->print("%s", heap->name());
 563       } else {
 564         tty->print("CodeCache");
 565       }
 566       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 567                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 568                     (address)heap->high() - (address)heap->low_boundary());
 569     }
 570   }
 571   print_trace("allocation", cb, size);
 572   return cb;
 573 }
 574 
 575 void CodeCache::free(CodeBlob* cb) {
 576   assert_locked_or_safepoint(CodeCache_lock);
 577   CodeHeap* heap = get_code_heap(cb);
 578   print_trace("free", cb);
 579   if (cb->is_nmethod()) {
 580     heap->set_nmethod_count(heap->nmethod_count() - 1);
 581     if (((nmethod *)cb)->has_dependencies()) {
 582       Atomic::dec(&_number_of_nmethods_with_dependencies);
 583     }
 584   }
 585   if (cb->is_adapter_blob()) {
 586     heap->set_adapter_count(heap->adapter_count() - 1);
 587   }
 588 
 589   cb->~CodeBlob();
 590   // Get heap for given CodeBlob and deallocate
 591   heap->deallocate(cb);
 592 
 593   assert(heap->blob_count() >= 0, "sanity check");
 594 }
 595 
 596 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 597   assert_locked_or_safepoint(CodeCache_lock);
 598   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 599   print_trace("free_unused_tail", cb);
 600 
 601   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 602   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 603   used += CodeBlob::align_code_offset(cb->header_size());
 604 
 605   // Get heap for given CodeBlob and deallocate its unused tail
 606   get_code_heap(cb)->deallocate_tail(cb, used);
 607   // Adjust the sizes of the CodeBlob
 608   cb->adjust_size(used);
 609 }
 610 
 611 void CodeCache::commit(CodeBlob* cb) {
 612   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 613   assert_locked_or_safepoint(CodeCache_lock);
 614   CodeHeap* heap = get_code_heap(cb);
 615   if (cb->is_nmethod()) {
 616     heap->set_nmethod_count(heap->nmethod_count() + 1);
 617     if (((nmethod *)cb)->has_dependencies()) {
 618       Atomic::inc(&_number_of_nmethods_with_dependencies);
 619     }
 620   }
 621   if (cb->is_adapter_blob()) {
 622     heap->set_adapter_count(heap->adapter_count() + 1);
 623   }
 624 }
 625 
 626 bool CodeCache::contains(void *p) {
 627   // S390 uses contains() in current_frame(), which is used before
 628   // code cache initialization if NativeMemoryTracking=detail is set.
 629   S390_ONLY(if (_heaps == nullptr) return false;)
 630   // It should be ok to call contains without holding a lock.
 631   FOR_ALL_HEAPS(heap) {
 632     if ((*heap)->contains(p)) {
 633       return true;
 634     }
 635   }
 636   return false;
 637 }
 638 
 639 bool CodeCache::contains(nmethod *nm) {
 640   return contains((void *)nm);
 641 }
 642 
 643 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 644 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 645 CodeBlob* CodeCache::find_blob(void* start) {
 646   // NMT can walk the stack before code cache is created
 647   if (_heaps != nullptr) {
 648     CodeHeap* heap = get_code_heap_containing(start);
 649     if (heap != nullptr) {
 650       return heap->find_blob(start);
 651     }
 652   }
 653   return nullptr;
 654 }
 655 
 656 nmethod* CodeCache::find_nmethod(void* start) {
 657   CodeBlob* cb = find_blob(start);
 658   assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
 659   return (nmethod*)cb;
 660 }
 661 
 662 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 663   assert_locked_or_safepoint(CodeCache_lock);
 664   FOR_ALL_HEAPS(heap) {
 665     FOR_ALL_BLOBS(cb, *heap) {
 666       f(cb);
 667     }
 668   }
 669 }
 670 
 671 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 672   assert_locked_or_safepoint(CodeCache_lock);
 673   NMethodIterator iter(NMethodIterator::all);
 674   while(iter.next()) {
 675     f(iter.method());
 676   }
 677 }
 678 
 679 void CodeCache::nmethods_do(NMethodClosure* cl) {
 680   assert_locked_or_safepoint(CodeCache_lock);
 681   NMethodIterator iter(NMethodIterator::all);
 682   while(iter.next()) {
 683     cl->do_nmethod(iter.method());
 684   }
 685 }
 686 
 687 void CodeCache::metadata_do(MetadataClosure* f) {
 688   assert_locked_or_safepoint(CodeCache_lock);
 689   NMethodIterator iter(NMethodIterator::all);
 690   while(iter.next()) {
 691     iter.method()->metadata_do(f);
 692   }
 693 }
 694 
 695 // Calculate the number of GCs after which an nmethod is expected to have been
 696 // used in order to not be classed as cold.
 697 void CodeCache::update_cold_gc_count() {
 698   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 699     // No aging
 700     return;
 701   }
 702 
 703   size_t last_used = _last_unloading_used;
 704   double last_time = _last_unloading_time;
 705 
 706   double time = os::elapsedTime();
 707 
 708   size_t free = unallocated_capacity();
 709   size_t max = max_capacity();
 710   size_t used = max - free;
 711   double gc_interval = time - last_time;
 712 
 713   _unloading_threshold_gc_requested = false;
 714   _last_unloading_time = time;
 715   _last_unloading_used = used;
 716 
 717   if (last_time == 0.0) {
 718     // The first GC doesn't have enough information to make good
 719     // decisions, so just keep everything afloat
 720     log_info(codecache)("Unknown code cache pressure; don't age code");
 721     return;
 722   }
 723 
 724   if (gc_interval <= 0.0 || last_used >= used) {
 725     // Dodge corner cases where there is no pressure or negative pressure
 726     // on the code cache. Just don't unload when this happens.
 727     _cold_gc_count = INT_MAX;
 728     log_info(codecache)("No code cache pressure; don't age code");
 729     return;
 730   }
 731 
 732   double allocation_rate = (used - last_used) / gc_interval;
 733 
 734   _unloading_allocation_rates.add(allocation_rate);
 735   _unloading_gc_intervals.add(gc_interval);
 736 
 737   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 738   if (free < aggressive_sweeping_free_threshold) {
 739     // We are already in the red zone; be very aggressive to avoid disaster
 740     // But not more aggressive than 2. This ensures that an nmethod must
 741     // have been unused at least between two GCs to be considered cold still.
 742     _cold_gc_count = 2;
 743     log_info(codecache)("Code cache critically low; use aggressive aging");
 744     return;
 745   }
 746 
 747   // The code cache has an expected time for cold nmethods to "time out"
 748   // when they have not been used. The time for nmethods to time out
 749   // depends on how long we expect we can keep allocating code until
 750   // aggressive sweeping starts, based on sampled allocation rates.
 751   double average_gc_interval = _unloading_gc_intervals.avg();
 752   double average_allocation_rate = _unloading_allocation_rates.avg();
 753   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 754   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 755 
 756   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 757   // that is that the _cold_gc_count will be added to an epoch number
 758   // and that addition must not overflow, or we can crash the VM.
 759   // But not more aggressive than 2. This ensures that an nmethod must
 760   // have been unused at least between two GCs to be considered cold still.
 761   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 762 
 763   double used_ratio = double(used) / double(max);
 764   double last_used_ratio = double(last_used) / double(max);
 765   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 766                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 767                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 768                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 769 
 770 }
 771 
 772 uint64_t CodeCache::cold_gc_count() {
 773   return _cold_gc_count;
 774 }
 775 
 776 void CodeCache::gc_on_allocation() {
 777   if (!is_init_completed()) {
 778     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 779     return;
 780   }
 781 
 782   size_t free = unallocated_capacity();
 783   size_t max = max_capacity();
 784   size_t used = max - free;
 785   double free_ratio = double(free) / double(max);
 786   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 787     // In case the GC is concurrent, we make sure only one thread requests the GC.
 788     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 789       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 790       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 791     }
 792     return;
 793   }
 794 
 795   size_t last_used = _last_unloading_used;
 796   if (last_used >= used) {
 797     // No increase since last GC; no need to sweep yet
 798     return;
 799   }
 800   size_t allocated_since_last = used - last_used;
 801   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 802   double threshold = SweeperThreshold / 100.0;
 803   double used_ratio = double(used) / double(max);
 804   double last_used_ratio = double(last_used) / double(max);
 805   if (used_ratio > threshold) {
 806     // After threshold is reached, scale it by free_ratio so that more aggressive
 807     // GC is triggered as we approach code cache exhaustion
 808     threshold *= free_ratio;
 809   }
 810   // If code cache has been allocated without any GC at all, let's make sure
 811   // it is eventually invoked to avoid trouble.
 812   if (allocated_since_last_ratio > threshold) {
 813     // In case the GC is concurrent, we make sure only one thread requests the GC.
 814     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 815       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 816                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 817       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 818     }
 819   }
 820 }
 821 
 822 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 823 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 824 //
 825 // Odd values mean that marking is in progress, and even values mean that no
 826 // marking is currently active.
 827 uint64_t CodeCache::_gc_epoch = 2;
 828 
 829 // How many GCs after an nmethod has not been used, do we consider it cold?
 830 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 831 
 832 double CodeCache::_last_unloading_time = 0.0;
 833 size_t CodeCache::_last_unloading_used = 0;
 834 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 835 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 836 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 837 
 838 uint64_t CodeCache::gc_epoch() {
 839   return _gc_epoch;
 840 }
 841 
 842 bool CodeCache::is_gc_marking_cycle_active() {
 843   // Odd means that marking is active
 844   return (_gc_epoch % 2) == 1;
 845 }
 846 
 847 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 848   if (is_gc_marking_cycle_active()) {
 849     return _gc_epoch - 2;
 850   } else {
 851     return _gc_epoch - 1;
 852   }
 853 }
 854 
 855 void CodeCache::on_gc_marking_cycle_start() {
 856   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 857   ++_gc_epoch;
 858 }
 859 
 860 // Once started the code cache marking cycle must only be finished after marking of
 861 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 862 // if they have frames in continuation StackChunks that were not yet visited.
 863 void CodeCache::on_gc_marking_cycle_finish() {
 864   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 865   ++_gc_epoch;
 866   update_cold_gc_count();
 867 }
 868 
 869 void CodeCache::arm_all_nmethods() {
 870   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 871   if (bs_nm != nullptr) {
 872     bs_nm->arm_all_nmethods();
 873   }
 874 }
 875 
 876 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 877 void CodeCache::do_unloading(bool unloading_occurred) {
 878   assert_locked_or_safepoint(CodeCache_lock);
 879   NMethodIterator iter(NMethodIterator::all);
 880   while(iter.next()) {
 881     iter.method()->do_unloading(unloading_occurred);
 882   }
 883 }
 884 
 885 void CodeCache::verify_clean_inline_caches() {
 886 #ifdef ASSERT
 887   NMethodIterator iter(NMethodIterator::not_unloading);
 888   while(iter.next()) {
 889     nmethod* nm = iter.method();
 890     nm->verify_clean_inline_caches();
 891     nm->verify();
 892   }
 893 #endif
 894 }
 895 
 896 // Defer freeing of concurrently cleaned ExceptionCache entries until
 897 // after a global handshake operation.
 898 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 899   if (SafepointSynchronize::is_at_safepoint()) {
 900     delete entry;
 901   } else {
 902     for (;;) {
 903       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 904       entry->set_purge_list_next(purge_list_head);
 905       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 906         break;
 907       }
 908     }
 909   }
 910 }
 911 
 912 // Delete exception caches that have been concurrently unlinked,
 913 // followed by a global handshake operation.
 914 void CodeCache::purge_exception_caches() {
 915   ExceptionCache* curr = _exception_cache_purge_list;
 916   while (curr != nullptr) {
 917     ExceptionCache* next = curr->purge_list_next();
 918     delete curr;
 919     curr = next;
 920   }
 921   _exception_cache_purge_list = nullptr;
 922 }
 923 
 924 // Restart compiler if possible and required..
 925 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 926 
 927   // Try to start the compiler again if we freed any memory
 928   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 929     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 930     log_info(codecache)("Restarting compiler");
 931     EventJITRestart event;
 932     event.set_freedMemory(freed_memory);
 933     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 934     event.commit();
 935   }
 936 }
 937 
 938 uint8_t CodeCache::_unloading_cycle = 1;
 939 
 940 void CodeCache::increment_unloading_cycle() {
 941   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 942   // 0 is reserved for new methods.
 943   _unloading_cycle = (_unloading_cycle + 1) % 4;
 944   if (_unloading_cycle == 0) {
 945     _unloading_cycle = 1;
 946   }
 947 }
 948 
 949 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 950   : _is_unloading_behaviour(is_alive)
 951 {
 952   _saved_behaviour = IsUnloadingBehaviour::current();
 953   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 954   increment_unloading_cycle();
 955   DependencyContext::cleaning_start();
 956 }
 957 
 958 CodeCache::UnlinkingScope::~UnlinkingScope() {
 959   IsUnloadingBehaviour::set_current(_saved_behaviour);
 960   DependencyContext::cleaning_end();
 961 }
 962 
 963 void CodeCache::verify_oops() {
 964   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 965   VerifyOopClosure voc;
 966   NMethodIterator iter(NMethodIterator::not_unloading);
 967   while(iter.next()) {
 968     nmethod* nm = iter.method();
 969     nm->oops_do(&voc);
 970     nm->verify_oop_relocations();
 971   }
 972 }
 973 
 974 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 975   CodeHeap* heap = get_code_heap(code_blob_type);
 976   return (heap != nullptr) ? heap->blob_count() : 0;
 977 }
 978 
 979 int CodeCache::blob_count() {
 980   int count = 0;
 981   FOR_ALL_HEAPS(heap) {
 982     count += (*heap)->blob_count();
 983   }
 984   return count;
 985 }
 986 
 987 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
 988   CodeHeap* heap = get_code_heap(code_blob_type);
 989   return (heap != nullptr) ? heap->nmethod_count() : 0;
 990 }
 991 
 992 int CodeCache::nmethod_count() {
 993   int count = 0;
 994   for (CodeHeap* heap : *_nmethod_heaps) {
 995     count += heap->nmethod_count();
 996   }
 997   return count;
 998 }
 999 
1000 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1001   CodeHeap* heap = get_code_heap(code_blob_type);
1002   return (heap != nullptr) ? heap->adapter_count() : 0;
1003 }
1004 
1005 int CodeCache::adapter_count() {
1006   int count = 0;
1007   FOR_ALL_HEAPS(heap) {
1008     count += (*heap)->adapter_count();
1009   }
1010   return count;
1011 }
1012 
1013 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1014   CodeHeap* heap = get_code_heap(code_blob_type);
1015   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1016 }
1017 
1018 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1019   CodeHeap* heap = get_code_heap(code_blob_type);
1020   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1021 }
1022 
1023 size_t CodeCache::capacity() {
1024   size_t cap = 0;
1025   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1026     cap += (*heap)->capacity();
1027   }
1028   return cap;
1029 }
1030 
1031 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1032   CodeHeap* heap = get_code_heap(code_blob_type);
1033   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1034 }
1035 
1036 size_t CodeCache::unallocated_capacity() {
1037   size_t unallocated_cap = 0;
1038   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1039     unallocated_cap += (*heap)->unallocated_capacity();
1040   }
1041   return unallocated_cap;
1042 }
1043 
1044 size_t CodeCache::max_capacity() {
1045   size_t max_cap = 0;
1046   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1047     max_cap += (*heap)->max_capacity();
1048   }
1049   return max_cap;
1050 }
1051 
1052 bool CodeCache::is_non_nmethod(address addr) {
1053   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1054   return blob->contains(addr);
1055 }
1056 
1057 size_t CodeCache::max_distance_to_non_nmethod() {
1058   if (!SegmentedCodeCache) {
1059     return ReservedCodeCacheSize;
1060   } else {
1061     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1062     // the max distance is minimized by placing the NonNMethod segment
1063     // in between MethodProfiled and MethodNonProfiled segments
1064     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1065     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1066     return dist1 > dist2 ? dist1 : dist2;
1067   }
1068 }
1069 
1070 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1071 // is free, reverse_free_ratio() returns 4.
1072 // Since code heap for each type of code blobs falls forward to the next
1073 // type of code heap, return the reverse free ratio for the entire
1074 // code cache.
1075 double CodeCache::reverse_free_ratio() {
1076   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1077   double max = (double)max_capacity();
1078   double result = max / unallocated;
1079   assert (max >= unallocated, "Must be");
1080   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1081   return result;
1082 }
1083 
1084 size_t CodeCache::bytes_allocated_in_freelists() {
1085   size_t allocated_bytes = 0;
1086   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1087     allocated_bytes += (*heap)->allocated_in_freelist();
1088   }
1089   return allocated_bytes;
1090 }
1091 
1092 int CodeCache::allocated_segments() {
1093   int number_of_segments = 0;
1094   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1095     number_of_segments += (*heap)->allocated_segments();
1096   }
1097   return number_of_segments;
1098 }
1099 
1100 size_t CodeCache::freelists_length() {
1101   size_t length = 0;
1102   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1103     length += (*heap)->freelist_length();
1104   }
1105   return length;
1106 }
1107 
1108 void icache_init();
1109 
1110 void CodeCache::initialize() {
1111   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1112 #ifdef COMPILER2
1113   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1114 #endif
1115   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1116   // This was originally just a check of the alignment, causing failure, instead, round
1117   // the code cache to the page size.  In particular, Solaris is moving to a larger
1118   // default page size.
1119   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1120 
1121   if (SegmentedCodeCache) {
1122     // Use multiple code heaps
1123     initialize_heaps();
1124   } else {
1125     // Use a single code heap
1126     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1127     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1128     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1129 
1130     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1131     // users want to use the largest available page.
1132     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1133     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1134     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1135     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1136     add_heap(rs, "CodeCache", CodeBlobType::All);
1137   }
1138 
1139   // Initialize ICache flush mechanism
1140   // This service is needed for os::register_code_area
1141   icache_init();
1142 
1143   // Give OS a chance to register generated code area.
1144   // This is used on Windows 64 bit platforms to register
1145   // Structured Exception Handlers for our generated code.
1146   os::register_code_area((char*)low_bound(), (char*)high_bound());
1147 }
1148 
1149 void codeCache_init() {
1150   CodeCache::initialize();
1151 }
1152 
1153 //------------------------------------------------------------------------------------------------
1154 
1155 bool CodeCache::has_nmethods_with_dependencies() {
1156   return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1157 }
1158 
1159 void CodeCache::clear_inline_caches() {
1160   assert_locked_or_safepoint(CodeCache_lock);
1161   NMethodIterator iter(NMethodIterator::not_unloading);
1162   while(iter.next()) {
1163     iter.method()->clear_inline_caches();
1164   }
1165 }
1166 
1167 // Only used by whitebox API
1168 void CodeCache::cleanup_inline_caches_whitebox() {
1169   assert_locked_or_safepoint(CodeCache_lock);
1170   NMethodIterator iter(NMethodIterator::not_unloading);
1171   while(iter.next()) {
1172     iter.method()->cleanup_inline_caches_whitebox();
1173   }
1174 }
1175 
1176 // Keeps track of time spent for checking dependencies
1177 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1178 
1179 #ifndef PRODUCT
1180 // Check if any of live methods dependencies have been invalidated.
1181 // (this is expensive!)
1182 static void check_live_nmethods_dependencies(DepChange& changes) {
1183   // Checked dependencies are allocated into this ResourceMark
1184   ResourceMark rm;
1185 
1186   // Turn off dependency tracing while actually testing dependencies.
1187   FlagSetting fs(Dependencies::_verify_in_progress, true);
1188 
1189   typedef ResourceHashtable<DependencySignature, int, 11027,
1190                             AnyObj::RESOURCE_AREA, mtInternal,
1191                             &DependencySignature::hash,
1192                             &DependencySignature::equals> DepTable;
1193 
1194   DepTable* table = new DepTable();
1195 
1196   // Iterate over live nmethods and check dependencies of all nmethods that are not
1197   // marked for deoptimization. A particular dependency is only checked once.
1198   NMethodIterator iter(NMethodIterator::not_unloading);
1199   while(iter.next()) {
1200     nmethod* nm = iter.method();
1201     // Only notify for live nmethods
1202     if (!nm->is_marked_for_deoptimization()) {
1203       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1204         // Construct abstraction of a dependency.
1205         DependencySignature* current_sig = new DependencySignature(deps);
1206 
1207         // Determine if dependency is already checked. table->put(...) returns
1208         // 'true' if the dependency is added (i.e., was not in the hashtable).
1209         if (table->put(*current_sig, 1)) {
1210           if (deps.check_dependency() != nullptr) {
1211             // Dependency checking failed. Print out information about the failed
1212             // dependency and finally fail with an assert. We can fail here, since
1213             // dependency checking is never done in a product build.
1214             tty->print_cr("Failed dependency:");
1215             changes.print();
1216             nm->print();
1217             nm->print_dependencies_on(tty);
1218             assert(false, "Should have been marked for deoptimization");
1219           }
1220         }
1221       }
1222     }
1223   }
1224 }
1225 #endif
1226 
1227 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1228   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1229 
1230   // search the hierarchy looking for nmethods which are affected by the loading of this class
1231 
1232   // then search the interfaces this class implements looking for nmethods
1233   // which might be dependent of the fact that an interface only had one
1234   // implementor.
1235   // nmethod::check_all_dependencies works only correctly, if no safepoint
1236   // can happen
1237   NoSafepointVerifier nsv;
1238   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1239     InstanceKlass* d = str.klass();
1240     d->mark_dependent_nmethods(deopt_scope, changes);
1241   }
1242 
1243 #ifndef PRODUCT
1244   if (VerifyDependencies) {
1245     // Object pointers are used as unique identifiers for dependency arguments. This
1246     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1247     dependentCheckTime.start();
1248     check_live_nmethods_dependencies(changes);
1249     dependentCheckTime.stop();
1250   }
1251 #endif
1252 }
1253 
1254 #if INCLUDE_JVMTI
1255 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1256 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1257 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1258 
1259 static void add_to_old_table(nmethod* c) {
1260   if (old_nmethod_table == nullptr) {
1261     old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1262   }
1263   old_nmethod_table->push(c);
1264 }
1265 
1266 static void reset_old_method_table() {
1267   if (old_nmethod_table != nullptr) {
1268     delete old_nmethod_table;
1269     old_nmethod_table = nullptr;
1270   }
1271 }
1272 
1273 // Remove this method when flushed.
1274 void CodeCache::unregister_old_nmethod(nmethod* c) {
1275   assert_lock_strong(CodeCache_lock);
1276   if (old_nmethod_table != nullptr) {
1277     int index = old_nmethod_table->find(c);
1278     if (index != -1) {
1279       old_nmethod_table->delete_at(index);
1280     }
1281   }
1282 }
1283 
1284 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1285   // Walk old method table and mark those on stack.
1286   int length = 0;
1287   if (old_nmethod_table != nullptr) {
1288     length = old_nmethod_table->length();
1289     for (int i = 0; i < length; i++) {
1290       // Walk all methods saved on the last pass.  Concurrent class unloading may
1291       // also be looking at this method's metadata, so don't delete it yet if
1292       // it is marked as unloaded.
1293       old_nmethod_table->at(i)->metadata_do(f);
1294     }
1295   }
1296   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1297 }
1298 
1299 // Walk compiled methods and mark dependent methods for deoptimization.
1300 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1301   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1302   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1303   // So delete old method table and create a new one.
1304   reset_old_method_table();
1305 
1306   NMethodIterator iter(NMethodIterator::all);
1307   while(iter.next()) {
1308     nmethod* nm = iter.method();
1309     // Walk all alive nmethods to check for old Methods.
1310     // This includes methods whose inline caches point to old methods, so
1311     // inline cache clearing is unnecessary.
1312     if (nm->has_evol_metadata()) {
1313       deopt_scope->mark(nm);
1314       add_to_old_table(nm);
1315     }
1316   }
1317 }
1318 
1319 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1320   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1321   NMethodIterator iter(NMethodIterator::all);
1322   while(iter.next()) {
1323     nmethod* nm = iter.method();
1324     if (!nm->method()->is_method_handle_intrinsic()) {
1325       if (nm->can_be_deoptimized()) {
1326         deopt_scope->mark(nm);
1327       }
1328       if (nm->has_evol_metadata()) {
1329         add_to_old_table(nm);
1330       }
1331     }
1332   }
1333 }
1334 
1335 #endif // INCLUDE_JVMTI
1336 
1337 // Mark methods for deopt (if safe or possible).
1338 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1339   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1340   NMethodIterator iter(NMethodIterator::not_unloading);
1341   while(iter.next()) {
1342     nmethod* nm = iter.method();
1343     if (!nm->is_native_method()) {
1344       deopt_scope->mark(nm);
1345     }
1346   }
1347 }
1348 
1349 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1350   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1351 
1352   NMethodIterator iter(NMethodIterator::not_unloading);
1353   while(iter.next()) {
1354     nmethod* nm = iter.method();
1355     if (nm->is_dependent_on_method(dependee)) {
1356       deopt_scope->mark(nm);
1357     }
1358   }
1359 }
1360 
1361 void CodeCache::make_marked_nmethods_deoptimized() {
1362   RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1363   while(iter.next()) {
1364     nmethod* nm = iter.method();
1365     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1366       nm->make_not_entrant();
1367       nm->make_deoptimized();
1368     }
1369   }
1370 }
1371 
1372 // Marks compiled methods dependent on dependee.
1373 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1374   assert_lock_strong(Compile_lock);
1375 
1376   if (!has_nmethods_with_dependencies()) {
1377     return;
1378   }
1379 
1380   if (dependee->is_linked()) {
1381     // Class initialization state change.
1382     KlassInitDepChange changes(dependee);
1383     mark_for_deoptimization(deopt_scope, changes);
1384   } else {
1385     // New class is loaded.
1386     NewKlassDepChange changes(dependee);
1387     mark_for_deoptimization(deopt_scope, changes);
1388   }
1389 }
1390 
1391 // Marks compiled methods dependent on dependee
1392 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1393   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1394 
1395   DeoptimizationScope deopt_scope;
1396   // Compute the dependent nmethods
1397   mark_for_deoptimization(&deopt_scope, m_h());
1398   deopt_scope.deoptimize_marked();
1399 }
1400 
1401 void CodeCache::verify() {
1402   assert_locked_or_safepoint(CodeCache_lock);
1403   FOR_ALL_HEAPS(heap) {
1404     (*heap)->verify();
1405     FOR_ALL_BLOBS(cb, *heap) {
1406       cb->verify();
1407     }
1408   }
1409 }
1410 
1411 // A CodeHeap is full. Print out warning and report event.
1412 PRAGMA_DIAG_PUSH
1413 PRAGMA_FORMAT_NONLITERAL_IGNORED
1414 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1415   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1416   CodeHeap* heap = get_code_heap(code_blob_type);
1417   assert(heap != nullptr, "heap is null");
1418 
1419   int full_count = heap->report_full();
1420 
1421   if ((full_count == 1) || print) {
1422     // Not yet reported for this heap, report
1423     if (SegmentedCodeCache) {
1424       ResourceMark rm;
1425       stringStream msg1_stream, msg2_stream;
1426       msg1_stream.print("%s is full. Compiler has been disabled.",
1427                         get_code_heap_name(code_blob_type));
1428       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1429                  get_code_heap_flag_name(code_blob_type));
1430       const char *msg1 = msg1_stream.as_string();
1431       const char *msg2 = msg2_stream.as_string();
1432 
1433       log_warning(codecache)("%s", msg1);
1434       log_warning(codecache)("%s", msg2);
1435       warning("%s", msg1);
1436       warning("%s", msg2);
1437     } else {
1438       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1439       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1440 
1441       log_warning(codecache)("%s", msg1);
1442       log_warning(codecache)("%s", msg2);
1443       warning("%s", msg1);
1444       warning("%s", msg2);
1445     }
1446     stringStream s;
1447     // Dump code cache into a buffer before locking the tty.
1448     {
1449       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1450       print_summary(&s);
1451     }
1452     {
1453       ttyLocker ttyl;
1454       tty->print("%s", s.freeze());
1455     }
1456 
1457     if (full_count == 1) {
1458       if (PrintCodeHeapAnalytics) {
1459         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1460       }
1461     }
1462   }
1463 
1464   EventCodeCacheFull event;
1465   if (event.should_commit()) {
1466     event.set_codeBlobType((u1)code_blob_type);
1467     event.set_startAddress((u8)heap->low_boundary());
1468     event.set_commitedTopAddress((u8)heap->high());
1469     event.set_reservedTopAddress((u8)heap->high_boundary());
1470     event.set_entryCount(heap->blob_count());
1471     event.set_methodCount(heap->nmethod_count());
1472     event.set_adaptorCount(heap->adapter_count());
1473     event.set_unallocatedCapacity(heap->unallocated_capacity());
1474     event.set_fullCount(heap->full_count());
1475     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1476     event.commit();
1477   }
1478 }
1479 PRAGMA_DIAG_POP
1480 
1481 void CodeCache::print_memory_overhead() {
1482   size_t wasted_bytes = 0;
1483   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1484       CodeHeap* curr_heap = *heap;
1485       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1486         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1487         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1488       }
1489   }
1490   // Print bytes that are allocated in the freelist
1491   ttyLocker ttl;
1492   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1493   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1494   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1495   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1496 }
1497 
1498 //------------------------------------------------------------------------------------------------
1499 // Non-product version
1500 
1501 #ifndef PRODUCT
1502 
1503 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1504   if (PrintCodeCache2) {  // Need to add a new flag
1505     ResourceMark rm;
1506     if (size == 0) {
1507       int s = cb->size();
1508       assert(s >= 0, "CodeBlob size is negative: %d", s);
1509       size = (uint) s;
1510     }
1511     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1512   }
1513 }
1514 
1515 void CodeCache::print_internals() {
1516   int nmethodCount = 0;
1517   int runtimeStubCount = 0;
1518   int adapterCount = 0;
1519   int deoptimizationStubCount = 0;
1520   int uncommonTrapStubCount = 0;
1521   int bufferBlobCount = 0;
1522   int total = 0;
1523   int nmethodNotEntrant = 0;
1524   int nmethodJava = 0;
1525   int nmethodNative = 0;
1526   int max_nm_size = 0;
1527   ResourceMark rm;
1528 
1529   int i = 0;
1530   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1531     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1532       tty->print_cr("-- %s --", (*heap)->name());
1533     }
1534     FOR_ALL_BLOBS(cb, *heap) {
1535       total++;
1536       if (cb->is_nmethod()) {
1537         nmethod* nm = (nmethod*)cb;
1538 
1539         if (Verbose && nm->method() != nullptr) {
1540           ResourceMark rm;
1541           char *method_name = nm->method()->name_and_sig_as_C_string();
1542           tty->print("%s", method_name);
1543           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1544         }
1545 
1546         nmethodCount++;
1547 
1548         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1549         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1550 
1551         if(nm->method() != nullptr && nm->is_java_method()) {
1552           nmethodJava++;
1553           max_nm_size = MAX2(max_nm_size, nm->size());
1554         }
1555       } else if (cb->is_runtime_stub()) {
1556         runtimeStubCount++;
1557       } else if (cb->is_deoptimization_stub()) {
1558         deoptimizationStubCount++;
1559       } else if (cb->is_uncommon_trap_stub()) {
1560         uncommonTrapStubCount++;
1561       } else if (cb->is_adapter_blob()) {
1562         adapterCount++;
1563       } else if (cb->is_buffer_blob()) {
1564         bufferBlobCount++;
1565       }
1566     }
1567   }
1568 
1569   int bucketSize = 512;
1570   int bucketLimit = max_nm_size / bucketSize + 1;
1571   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1572   memset(buckets, 0, sizeof(int) * bucketLimit);
1573 
1574   NMethodIterator iter(NMethodIterator::all);
1575   while(iter.next()) {
1576     nmethod* nm = iter.method();
1577     if(nm->method() != nullptr && nm->is_java_method()) {
1578       buckets[nm->size() / bucketSize]++;
1579     }
1580   }
1581 
1582   tty->print_cr("Code Cache Entries (total of %d)",total);
1583   tty->print_cr("-------------------------------------------------");
1584   tty->print_cr("nmethods: %d",nmethodCount);
1585   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1586   tty->print_cr("\tjava: %d",nmethodJava);
1587   tty->print_cr("\tnative: %d",nmethodNative);
1588   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1589   tty->print_cr("adapters: %d",adapterCount);
1590   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1591   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1592   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1593   tty->print_cr("\nnmethod size distribution");
1594   tty->print_cr("-------------------------------------------------");
1595 
1596   for(int i=0; i<bucketLimit; i++) {
1597     if(buckets[i] != 0) {
1598       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1599       tty->fill_to(40);
1600       tty->print_cr("%d",buckets[i]);
1601     }
1602   }
1603 
1604   FREE_C_HEAP_ARRAY(int, buckets);
1605   print_memory_overhead();
1606 }
1607 
1608 #endif // !PRODUCT
1609 
1610 void CodeCache::print() {
1611   print_summary(tty);
1612 
1613 #ifndef PRODUCT
1614   if (!Verbose) return;
1615 
1616   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1617   CodeBlob_sizes runtimeStub;
1618   CodeBlob_sizes uncommonTrapStub;
1619   CodeBlob_sizes deoptimizationStub;
1620   CodeBlob_sizes adapter;
1621   CodeBlob_sizes bufferBlob;
1622   CodeBlob_sizes other;
1623 
1624   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1625     FOR_ALL_BLOBS(cb, *heap) {
1626       if (cb->is_nmethod()) {
1627         const int level = cb->as_nmethod()->comp_level();
1628         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1629         live[level].add(cb);
1630       } else if (cb->is_runtime_stub()) {
1631         runtimeStub.add(cb);
1632       } else if (cb->is_deoptimization_stub()) {
1633         deoptimizationStub.add(cb);
1634       } else if (cb->is_uncommon_trap_stub()) {
1635         uncommonTrapStub.add(cb);
1636       } else if (cb->is_adapter_blob()) {
1637         adapter.add(cb);
1638       } else if (cb->is_buffer_blob()) {
1639         bufferBlob.add(cb);
1640       } else {
1641         other.add(cb);
1642       }
1643     }
1644   }
1645 
1646   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1647 
1648   tty->print_cr("nmethod blobs per compilation level:");
1649   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1650     const char *level_name;
1651     switch (i) {
1652     case CompLevel_none:              level_name = "none";              break;
1653     case CompLevel_simple:            level_name = "simple";            break;
1654     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1655     case CompLevel_full_profile:      level_name = "full profile";      break;
1656     case CompLevel_full_optimization: level_name = "full optimization"; break;
1657     default: assert(false, "invalid compilation level");
1658     }
1659     tty->print_cr("%s:", level_name);
1660     live[i].print("live");
1661   }
1662 
1663   struct {
1664     const char* name;
1665     const CodeBlob_sizes* sizes;
1666   } non_nmethod_blobs[] = {
1667     { "runtime",        &runtimeStub },
1668     { "uncommon trap",  &uncommonTrapStub },
1669     { "deoptimization", &deoptimizationStub },
1670     { "adapter",        &adapter },
1671     { "buffer blob",    &bufferBlob },
1672     { "other",          &other },
1673   };
1674   tty->print_cr("Non-nmethod blobs:");
1675   for (auto& blob: non_nmethod_blobs) {
1676     blob.sizes->print(blob.name);
1677   }
1678 
1679   if (WizardMode) {
1680      // print the oop_map usage
1681     int code_size = 0;
1682     int number_of_blobs = 0;
1683     int number_of_oop_maps = 0;
1684     int map_size = 0;
1685     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1686       FOR_ALL_BLOBS(cb, *heap) {
1687         number_of_blobs++;
1688         code_size += cb->code_size();
1689         ImmutableOopMapSet* set = cb->oop_maps();
1690         if (set != nullptr) {
1691           number_of_oop_maps += set->count();
1692           map_size           += set->nr_of_bytes();
1693         }
1694       }
1695     }
1696     tty->print_cr("OopMaps");
1697     tty->print_cr("  #blobs    = %d", number_of_blobs);
1698     tty->print_cr("  code size = %d", code_size);
1699     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1700     tty->print_cr("  map size  = %d", map_size);
1701   }
1702 
1703 #endif // !PRODUCT
1704 }
1705 
1706 void CodeCache::print_summary(outputStream* st, bool detailed) {
1707   int full_count = 0;
1708   julong total_used = 0;
1709   julong total_max_used = 0;
1710   julong total_free = 0;
1711   julong total_size = 0;
1712   FOR_ALL_HEAPS(heap_iterator) {
1713     CodeHeap* heap = (*heap_iterator);
1714     size_t total = (heap->high_boundary() - heap->low_boundary());
1715     if (_heaps->length() >= 1) {
1716       st->print("%s:", heap->name());
1717     } else {
1718       st->print("CodeCache:");
1719     }
1720     size_t size = total/K;
1721     size_t used = (total - heap->unallocated_capacity())/K;
1722     size_t max_used = heap->max_allocated_capacity()/K;
1723     size_t free = heap->unallocated_capacity()/K;
1724     total_size += size;
1725     total_used += used;
1726     total_max_used += max_used;
1727     total_free += free;
1728     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1729                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1730                  size, used, max_used, free);
1731 
1732     if (detailed) {
1733       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1734                    p2i(heap->low_boundary()),
1735                    p2i(heap->high()),
1736                    p2i(heap->high_boundary()));
1737 
1738       full_count += get_codemem_full_count(heap->code_blob_type());
1739     }
1740   }
1741 
1742   if (detailed) {
1743     if (SegmentedCodeCache) {
1744       st->print("CodeCache:");
1745       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1746                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1747                    total_size, total_used, total_max_used, total_free);
1748     }
1749     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1750                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1751                  blob_count(), nmethod_count(), adapter_count(), full_count);
1752     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1753                  CompileBroker::should_compile_new_jobs() ?
1754                  "enabled" : Arguments::mode() == Arguments::_int ?
1755                  "disabled (interpreter mode)" :
1756                  "disabled (not enough contiguous free space left)",
1757                  CompileBroker::get_total_compiler_stopped_count(),
1758                  CompileBroker::get_total_compiler_restarted_count());
1759   }
1760 }
1761 
1762 void CodeCache::print_codelist(outputStream* st) {
1763   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1764 
1765   NMethodIterator iter(NMethodIterator::not_unloading);
1766   while (iter.next()) {
1767     nmethod* nm = iter.method();
1768     ResourceMark rm;
1769     char* method_name = nm->method()->name_and_sig_as_C_string();
1770     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1771                  nm->compile_id(), nm->comp_level(), nm->get_state(),
1772                  method_name,
1773                  (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1774   }
1775 }
1776 
1777 void CodeCache::print_layout(outputStream* st) {
1778   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1779   ResourceMark rm;
1780   print_summary(st, true);
1781 }
1782 
1783 void CodeCache::log_state(outputStream* st) {
1784   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1785             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1786             blob_count(), nmethod_count(), adapter_count(),
1787             unallocated_capacity());
1788 }
1789 
1790 #ifdef LINUX
1791 void CodeCache::write_perf_map(const char* filename) {
1792   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1793 
1794   // Perf expects to find the map file at /tmp/perf-<pid>.map
1795   // if the file name is not specified.
1796   char fname[32];
1797   if (filename == nullptr) {
1798     jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1799     filename = fname;
1800   }
1801 
1802   fileStream fs(filename, "w");
1803   if (!fs.is_open()) {
1804     log_warning(codecache)("Failed to create %s for perf map", filename);
1805     return;
1806   }
1807 
1808   AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1809   while (iter.next()) {
1810     CodeBlob *cb = iter.method();
1811     ResourceMark rm;
1812     const char* method_name =
1813       cb->is_nmethod() ? cb->as_nmethod()->method()->external_name()
1814                        : cb->name();
1815     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1816                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1817                 method_name);
1818   }
1819 }
1820 #endif // LINUX
1821 
1822 //---<  BEGIN  >--- CodeHeap State Analytics.
1823 
1824 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1825   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1826     CodeHeapState::aggregate(out, (*heap), granularity);
1827   }
1828 }
1829 
1830 void CodeCache::discard(outputStream *out) {
1831   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1832     CodeHeapState::discard(out, (*heap));
1833   }
1834 }
1835 
1836 void CodeCache::print_usedSpace(outputStream *out) {
1837   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1838     CodeHeapState::print_usedSpace(out, (*heap));
1839   }
1840 }
1841 
1842 void CodeCache::print_freeSpace(outputStream *out) {
1843   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1844     CodeHeapState::print_freeSpace(out, (*heap));
1845   }
1846 }
1847 
1848 void CodeCache::print_count(outputStream *out) {
1849   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1850     CodeHeapState::print_count(out, (*heap));
1851   }
1852 }
1853 
1854 void CodeCache::print_space(outputStream *out) {
1855   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1856     CodeHeapState::print_space(out, (*heap));
1857   }
1858 }
1859 
1860 void CodeCache::print_age(outputStream *out) {
1861   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1862     CodeHeapState::print_age(out, (*heap));
1863   }
1864 }
1865 
1866 void CodeCache::print_names(outputStream *out) {
1867   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1868     CodeHeapState::print_names(out, (*heap));
1869   }
1870 }
1871 //---<  END  >--- CodeHeap State Analytics.