1 /*
   2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/barrierSetNMethod.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "jvm_io.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/method.inline.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "runtime/arguments.hpp"
  54 #include "runtime/atomic.hpp"
  55 #include "runtime/deoptimization.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/icache.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/os.inline.hpp"
  63 #include "runtime/safepointVerifiers.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "sanitizers/leak.hpp"
  66 #include "services/memoryService.hpp"
  67 #include "utilities/align.hpp"
  68 #include "utilities/vmError.hpp"
  69 #include "utilities/xmlstream.hpp"
  70 #ifdef COMPILER1
  71 #include "c1/c1_Compilation.hpp"
  72 #include "c1/c1_Compiler.hpp"
  73 #endif
  74 #ifdef COMPILER2
  75 #include "opto/c2compiler.hpp"
  76 #include "opto/compile.hpp"
  77 #include "opto/node.hpp"
  78 #endif
  79 
  80 // Helper class for printing in CodeCache
  81 class CodeBlob_sizes {
  82  private:
  83   int count;
  84   int total_size;
  85   int header_size;
  86   int code_size;
  87   int stub_size;
  88   int relocation_size;
  89   int scopes_oop_size;
  90   int scopes_metadata_size;
  91   int scopes_data_size;
  92   int scopes_pcs_size;
  93 
  94  public:
  95   CodeBlob_sizes() {
  96     count            = 0;
  97     total_size       = 0;
  98     header_size      = 0;
  99     code_size        = 0;
 100     stub_size        = 0;
 101     relocation_size  = 0;
 102     scopes_oop_size  = 0;
 103     scopes_metadata_size  = 0;
 104     scopes_data_size = 0;
 105     scopes_pcs_size  = 0;
 106   }
 107 
 108   int total() const                              { return total_size; }
 109   bool is_empty() const                          { return count == 0; }
 110 
 111   void print(const char* title) const {
 112     if (is_empty()) {
 113       tty->print_cr(" #%d %s = %dK",
 114                     count,
 115                     title,
 116                     total()                 / (int)K);
 117     } else {
 118       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 119                     count,
 120                     title,
 121                     total()                 / (int)K,
 122                     header_size             / (int)K,
 123                     header_size             * 100 / total_size,
 124                     relocation_size         / (int)K,
 125                     relocation_size         * 100 / total_size,
 126                     code_size               / (int)K,
 127                     code_size               * 100 / total_size,
 128                     stub_size               / (int)K,
 129                     stub_size               * 100 / total_size,
 130                     scopes_oop_size         / (int)K,
 131                     scopes_oop_size         * 100 / total_size,
 132                     scopes_metadata_size    / (int)K,
 133                     scopes_metadata_size    * 100 / total_size,
 134                     scopes_data_size        / (int)K,
 135                     scopes_data_size        * 100 / total_size,
 136                     scopes_pcs_size         / (int)K,
 137                     scopes_pcs_size         * 100 / total_size);
 138     }
 139   }
 140 
 141   void add(CodeBlob* cb) {
 142     count++;
 143     total_size       += cb->size();
 144     header_size      += cb->header_size();
 145     relocation_size  += cb->relocation_size();
 146     if (cb->is_nmethod()) {
 147       nmethod* nm = cb->as_nmethod_or_null();
 148       code_size        += nm->insts_size();
 149       stub_size        += nm->stub_size();
 150 
 151       scopes_oop_size  += nm->oops_size();
 152       scopes_metadata_size  += nm->metadata_size();
 153       scopes_data_size += nm->scopes_data_size();
 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 165 
 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 168 
 169 address CodeCache::_low_bound = 0;
 170 address CodeCache::_high_bound = 0;
 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 173 
 174 // Initialize arrays of CodeHeap subsets
 175 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 178 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 
 180 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 181   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 182   // Prepare error message
 183   const char* error = "Invalid code heap sizes";
 184   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 185                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 186           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 187 
 188   if (total_size > cache_size) {
 189     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 190     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 191     vm_exit_during_initialization(error, message);
 192   } else if (all_set && total_size != cache_size) {
 193     // All code heap sizes were explicitly set: total_size must equal cache_size
 194     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 195     vm_exit_during_initialization(error, message);
 196   }
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 201   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 202   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 203   const size_t ps           = page_size(false, 8);
 204   const size_t min_size     = MAX2(os::vm_allocation_granularity(), ps);
 205   const size_t cache_size   = ReservedCodeCacheSize;
 206   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 207   size_t profiled_size      = ProfiledCodeHeapSize;
 208   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 209   // Check if total size set via command line flags exceeds the reserved size
 210   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 211                    (profiled_set     ? profiled_size     : min_size),
 212                    (non_profiled_set ? non_profiled_size : min_size),
 213                    cache_size,
 214                    non_nmethod_set && profiled_set && non_profiled_set);
 215 
 216   // Determine size of compiler buffers
 217   size_t code_buffers_size = 0;
 218 #ifdef COMPILER1
 219   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 220   const int c1_count = CompilationPolicy::c1_count();
 221   code_buffers_size += c1_count * Compiler::code_buffer_size();
 222 #endif
 223 #ifdef COMPILER2
 224   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 225   const int c2_count = CompilationPolicy::c2_count();
 226   // Initial size of constant table (this may be increased if a compiled method needs more space)
 227   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 228 #endif
 229 
 230   // Increase default non_nmethod_size to account for compiler buffers
 231   if (!non_nmethod_set) {
 232     non_nmethod_size += code_buffers_size;
 233   }
 234   // Calculate default CodeHeap sizes if not set by user
 235   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 236     // Leave room for the other two parts of the code cache
 237     const size_t max_non_nmethod_size = cache_size - 2 * min_size;
 238     // Check if we have enough space for the non-nmethod code heap
 239     if (max_non_nmethod_size >= non_nmethod_size) {
 240       // Use the default value for non_nmethod_size and one half of the
 241       // remaining size for non-profiled and one half for profiled methods
 242       size_t remaining_size = cache_size - non_nmethod_size;
 243       profiled_size = remaining_size / 2;
 244       non_profiled_size = remaining_size - profiled_size;
 245     } else {
 246       // Use all space for the non-nmethod heap and set other heaps to minimal size
 247       non_nmethod_size = max_non_nmethod_size;
 248       profiled_size = min_size;
 249       non_profiled_size = min_size;
 250     }
 251   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
 252     // The user explicitly set some code heap sizes. Increase or decrease the (default)
 253     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
 254     // code heap sizes and then only change non-nmethod code heap size if still necessary.
 255     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
 256     if (non_profiled_set) {
 257       if (!profiled_set) {
 258         // Adapt size of profiled code heap
 259         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
 260           // Not enough space available, set to minimum size
 261           diff_size += profiled_size - min_size;
 262           profiled_size = min_size;
 263         } else {
 264           profiled_size += diff_size;
 265           diff_size = 0;
 266         }
 267       }
 268     } else if (profiled_set) {
 269       // Adapt size of non-profiled code heap
 270       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
 271         // Not enough space available, set to minimum size
 272         diff_size += non_profiled_size - min_size;
 273         non_profiled_size = min_size;
 274       } else {
 275         non_profiled_size += diff_size;
 276         diff_size = 0;
 277       }
 278     } else if (non_nmethod_set) {
 279       // Distribute remaining size between profiled and non-profiled code heaps
 280       diff_size = cache_size - non_nmethod_size;
 281       profiled_size = diff_size / 2;
 282       non_profiled_size = diff_size - profiled_size;
 283       diff_size = 0;
 284     }
 285     if (diff_size != 0) {
 286       // Use non-nmethod code heap for remaining space requirements
 287       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
 288       non_nmethod_size += diff_size;
 289     }
 290   }
 291 
 292   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 293   if (!heap_available(CodeBlobType::MethodProfiled)) {
 294     non_profiled_size += profiled_size;
 295     profiled_size = 0;
 296   }
 297   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 298   if (!heap_available(CodeBlobType::MethodNonProfiled)) {
 299     non_nmethod_size += non_profiled_size;
 300     non_profiled_size = 0;
 301   }
 302   // Make sure we have enough space for VM internal code
 303   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 304   if (non_nmethod_size < min_code_cache_size) {
 305     vm_exit_during_initialization(err_msg(
 306         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 307         non_nmethod_size/K, min_code_cache_size/K));
 308   }
 309 
 310   // Verify sizes and update flag values
 311   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 312   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 313   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 314   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 315 
 316   // Print warning if using large pages but not able to use the size given
 317   if (UseLargePages) {
 318     const size_t lg_ps = page_size(false, 1);
 319     if (ps < lg_ps) {
 320       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 321                              "Reverting to smaller page size (" PROPERFMT ").",
 322                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 323     }
 324   }
 325 
 326   // Note: if large page support is enabled, min_size is at least the large
 327   // page size. This ensures that the code cache is covered by large pages.
 328   non_nmethod_size = align_up(non_nmethod_size, min_size);
 329   profiled_size    = align_down(profiled_size, min_size);
 330   non_profiled_size = align_down(non_profiled_size, min_size);
 331 
 332   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 333   // parts for the individual heaps. The memory layout looks like this:
 334   // ---------- high -----------
 335   //    Non-profiled nmethods
 336   //         Non-nmethods
 337   //      Profiled nmethods
 338   // ---------- low ------------
 339   ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
 340   ReservedSpace profiled_space      = rs.first_part(profiled_size);
 341   ReservedSpace rest                = rs.last_part(profiled_size);
 342   ReservedSpace non_method_space    = rest.first_part(non_nmethod_size);
 343   ReservedSpace non_profiled_space  = rest.last_part(non_nmethod_size);
 344 
 345   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 346   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 347 
 348   // Non-nmethods (stubs, adapters, ...)
 349   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 350   // Tier 2 and tier 3 (profiled) methods
 351   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 352   // Tier 1 and tier 4 (non-profiled) methods and native methods
 353   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 354 }
 355 
 356 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 357   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 358                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 359 }
 360 
 361 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 362   // Align and reserve space for code cache
 363   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 364   const size_t rs_size = align_up(size, rs_align);
 365   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 366   if (!rs.is_reserved()) {
 367     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 368                                           rs_size/K));
 369   }
 370 
 371   // Initialize bounds
 372   _low_bound = (address)rs.base();
 373   _high_bound = _low_bound + rs.size();
 374   return rs;
 375 }
 376 
 377 // Heaps available for allocation
 378 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 379   if (!SegmentedCodeCache) {
 380     // No segmentation: use a single code heap
 381     return (code_blob_type == CodeBlobType::All);
 382   } else if (CompilerConfig::is_interpreter_only()) {
 383     // Interpreter only: we don't need any method code heaps
 384     return (code_blob_type == CodeBlobType::NonNMethod);
 385   } else if (CompilerConfig::is_c1_profiling()) {
 386     // Tiered compilation: use all code heaps
 387     return (code_blob_type < CodeBlobType::All);
 388   } else {
 389     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 390     return (code_blob_type == CodeBlobType::NonNMethod) ||
 391            (code_blob_type == CodeBlobType::MethodNonProfiled);
 392   }
 393 }
 394 
 395 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 396   switch(code_blob_type) {
 397   case CodeBlobType::NonNMethod:
 398     return "NonNMethodCodeHeapSize";
 399     break;
 400   case CodeBlobType::MethodNonProfiled:
 401     return "NonProfiledCodeHeapSize";
 402     break;
 403   case CodeBlobType::MethodProfiled:
 404     return "ProfiledCodeHeapSize";
 405     break;
 406   default:
 407     ShouldNotReachHere();
 408     return nullptr;
 409   }
 410 }
 411 
 412 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 413   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 414     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 415   } else {
 416     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 417   }
 418 }
 419 
 420 void CodeCache::add_heap(CodeHeap* heap) {
 421   assert(!Universe::is_fully_initialized(), "late heap addition?");
 422 
 423   _heaps->insert_sorted<code_heap_compare>(heap);
 424 
 425   CodeBlobType type = heap->code_blob_type();
 426   if (code_blob_type_accepts_compiled(type)) {
 427     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
 428   }
 429   if (code_blob_type_accepts_nmethod(type)) {
 430     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 431   }
 432   if (code_blob_type_accepts_allocable(type)) {
 433     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 434   }
 435 }
 436 
 437 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 438   // Check if heap is needed
 439   if (!heap_available(code_blob_type)) {
 440     return;
 441   }
 442 
 443   // Create CodeHeap
 444   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 445   add_heap(heap);
 446 
 447   // Reserve Space
 448   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 449   size_initial = align_up(size_initial, os::vm_page_size());
 450   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 451     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 452                                           heap->name(), size_initial/K));
 453   }
 454 
 455   // Register the CodeHeap
 456   MemoryService::add_code_heap_memory_pool(heap, name);
 457 }
 458 
 459 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 460   FOR_ALL_HEAPS(heap) {
 461     if ((*heap)->contains(start)) {
 462       return *heap;
 463     }
 464   }
 465   return nullptr;
 466 }
 467 
 468 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 469   assert(cb != nullptr, "CodeBlob is null");
 470   FOR_ALL_HEAPS(heap) {
 471     if ((*heap)->contains(cb)) {
 472       return *heap;
 473     }
 474   }
 475   ShouldNotReachHere();
 476   return nullptr;
 477 }
 478 
 479 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 480   FOR_ALL_HEAPS(heap) {
 481     if ((*heap)->accepts(code_blob_type)) {
 482       return *heap;
 483     }
 484   }
 485   return nullptr;
 486 }
 487 
 488 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 489   assert_locked_or_safepoint(CodeCache_lock);
 490   assert(heap != nullptr, "heap is null");
 491   return (CodeBlob*)heap->first();
 492 }
 493 
 494 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 495   if (heap_available(code_blob_type)) {
 496     return first_blob(get_code_heap(code_blob_type));
 497   } else {
 498     return nullptr;
 499   }
 500 }
 501 
 502 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 503   assert_locked_or_safepoint(CodeCache_lock);
 504   assert(heap != nullptr, "heap is null");
 505   return (CodeBlob*)heap->next(cb);
 506 }
 507 
 508 /**
 509  * Do not seize the CodeCache lock here--if the caller has not
 510  * already done so, we are going to lose bigtime, since the code
 511  * cache will contain a garbage CodeBlob until the caller can
 512  * run the constructor for the CodeBlob subclass he is busy
 513  * instantiating.
 514  */
 515 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 516   assert_locked_or_safepoint(CodeCache_lock);
 517   assert(size > 0, "Code cache allocation request must be > 0");
 518   if (size == 0) {
 519     return nullptr;
 520   }
 521   CodeBlob* cb = nullptr;
 522 
 523   // Get CodeHeap for the given CodeBlobType
 524   CodeHeap* heap = get_code_heap(code_blob_type);
 525   assert(heap != nullptr, "heap is null");
 526 
 527   while (true) {
 528     cb = (CodeBlob*)heap->allocate(size);
 529     if (cb != nullptr) break;
 530     if (!heap->expand_by(CodeCacheExpansionSize)) {
 531       // Save original type for error reporting
 532       if (orig_code_blob_type == CodeBlobType::All) {
 533         orig_code_blob_type = code_blob_type;
 534       }
 535       // Expansion failed
 536       if (SegmentedCodeCache) {
 537         // Fallback solution: Try to store code in another code heap.
 538         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 539         CodeBlobType type = code_blob_type;
 540         switch (type) {
 541         case CodeBlobType::NonNMethod:
 542           type = CodeBlobType::MethodNonProfiled;
 543           break;
 544         case CodeBlobType::MethodNonProfiled:
 545           type = CodeBlobType::MethodProfiled;
 546           break;
 547         case CodeBlobType::MethodProfiled:
 548           // Avoid loop if we already tried that code heap
 549           if (type == orig_code_blob_type) {
 550             type = CodeBlobType::MethodNonProfiled;
 551           }
 552           break;
 553         default:
 554           break;
 555         }
 556         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 557           if (PrintCodeCacheExtension) {
 558             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 559                           heap->name(), get_code_heap(type)->name());
 560           }
 561           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 562         }
 563       }
 564       if (handle_alloc_failure) {
 565         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 566         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 567       }
 568       return nullptr;
 569     } else {
 570       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 571     }
 572     if (PrintCodeCacheExtension) {
 573       ResourceMark rm;
 574       if (_nmethod_heaps->length() >= 1) {
 575         tty->print("%s", heap->name());
 576       } else {
 577         tty->print("CodeCache");
 578       }
 579       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 580                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 581                     (address)heap->high() - (address)heap->low_boundary());
 582     }
 583   }
 584   print_trace("allocation", cb, size);
 585   return cb;
 586 }
 587 
 588 void CodeCache::free(CodeBlob* cb) {
 589   assert_locked_or_safepoint(CodeCache_lock);
 590   CodeHeap* heap = get_code_heap(cb);
 591   print_trace("free", cb);
 592   if (cb->is_nmethod()) {
 593     heap->set_nmethod_count(heap->nmethod_count() - 1);
 594     if (((nmethod *)cb)->has_dependencies()) {
 595       Atomic::dec(&_number_of_nmethods_with_dependencies);
 596     }
 597   }
 598   if (cb->is_adapter_blob()) {
 599     heap->set_adapter_count(heap->adapter_count() - 1);
 600   }
 601 
 602   cb->~CodeBlob();
 603   // Get heap for given CodeBlob and deallocate
 604   heap->deallocate(cb);
 605 
 606   assert(heap->blob_count() >= 0, "sanity check");
 607 }
 608 
 609 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 610   assert_locked_or_safepoint(CodeCache_lock);
 611   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 612   print_trace("free_unused_tail", cb);
 613 
 614   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 615   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 616   used += CodeBlob::align_code_offset(cb->header_size());
 617 
 618   // Get heap for given CodeBlob and deallocate its unused tail
 619   get_code_heap(cb)->deallocate_tail(cb, used);
 620   // Adjust the sizes of the CodeBlob
 621   cb->adjust_size(used);
 622 }
 623 
 624 void CodeCache::commit(CodeBlob* cb) {
 625   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 626   assert_locked_or_safepoint(CodeCache_lock);
 627   CodeHeap* heap = get_code_heap(cb);
 628   if (cb->is_nmethod()) {
 629     heap->set_nmethod_count(heap->nmethod_count() + 1);
 630     if (((nmethod *)cb)->has_dependencies()) {
 631       Atomic::inc(&_number_of_nmethods_with_dependencies);
 632     }
 633   }
 634   if (cb->is_adapter_blob()) {
 635     heap->set_adapter_count(heap->adapter_count() + 1);
 636   }
 637 }
 638 
 639 bool CodeCache::contains(void *p) {
 640   // S390 uses contains() in current_frame(), which is used before
 641   // code cache initialization if NativeMemoryTracking=detail is set.
 642   S390_ONLY(if (_heaps == nullptr) return false;)
 643   // It should be ok to call contains without holding a lock.
 644   FOR_ALL_HEAPS(heap) {
 645     if ((*heap)->contains(p)) {
 646       return true;
 647     }
 648   }
 649   return false;
 650 }
 651 
 652 bool CodeCache::contains(nmethod *nm) {
 653   return contains((void *)nm);
 654 }
 655 
 656 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 657 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 658 CodeBlob* CodeCache::find_blob(void* start) {
 659   // NMT can walk the stack before code cache is created
 660   if (_heaps != nullptr) {
 661     CodeHeap* heap = get_code_heap_containing(start);
 662     if (heap != nullptr) {
 663       return heap->find_blob(start);
 664     }
 665   }
 666   return nullptr;
 667 }
 668 
 669 nmethod* CodeCache::find_nmethod(void* start) {
 670   CodeBlob* cb = find_blob(start);
 671   assert(cb->is_nmethod(), "did not find an nmethod");
 672   return (nmethod*)cb;
 673 }
 674 
 675 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 676   assert_locked_or_safepoint(CodeCache_lock);
 677   FOR_ALL_HEAPS(heap) {
 678     FOR_ALL_BLOBS(cb, *heap) {
 679       f(cb);
 680     }
 681   }
 682 }
 683 
 684 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 685   assert_locked_or_safepoint(CodeCache_lock);
 686   NMethodIterator iter(NMethodIterator::all_blobs);
 687   while(iter.next()) {
 688     f(iter.method());
 689   }
 690 }
 691 
 692 void CodeCache::metadata_do(MetadataClosure* f) {
 693   assert_locked_or_safepoint(CodeCache_lock);
 694   NMethodIterator iter(NMethodIterator::all_blobs);
 695   while(iter.next()) {
 696     iter.method()->metadata_do(f);
 697   }
 698 }
 699 
 700 // Calculate the number of GCs after which an nmethod is expected to have been
 701 // used in order to not be classed as cold.
 702 void CodeCache::update_cold_gc_count() {
 703   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 704     // No aging
 705     return;
 706   }
 707 
 708   size_t last_used = _last_unloading_used;
 709   double last_time = _last_unloading_time;
 710 
 711   double time = os::elapsedTime();
 712 
 713   size_t free = unallocated_capacity();
 714   size_t max = max_capacity();
 715   size_t used = max - free;
 716   double gc_interval = time - last_time;
 717 
 718   _unloading_threshold_gc_requested = false;
 719   _last_unloading_time = time;
 720   _last_unloading_used = used;
 721 
 722   if (last_time == 0.0) {
 723     // The first GC doesn't have enough information to make good
 724     // decisions, so just keep everything afloat
 725     log_info(codecache)("Unknown code cache pressure; don't age code");
 726     return;
 727   }
 728 
 729   if (gc_interval <= 0.0 || last_used >= used) {
 730     // Dodge corner cases where there is no pressure or negative pressure
 731     // on the code cache. Just don't unload when this happens.
 732     _cold_gc_count = INT_MAX;
 733     log_info(codecache)("No code cache pressure; don't age code");
 734     return;
 735   }
 736 
 737   double allocation_rate = (used - last_used) / gc_interval;
 738 
 739   _unloading_allocation_rates.add(allocation_rate);
 740   _unloading_gc_intervals.add(gc_interval);
 741 
 742   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 743   if (free < aggressive_sweeping_free_threshold) {
 744     // We are already in the red zone; be very aggressive to avoid disaster
 745     // But not more aggressive than 2. This ensures that an nmethod must
 746     // have been unused at least between two GCs to be considered cold still.
 747     _cold_gc_count = 2;
 748     log_info(codecache)("Code cache critically low; use aggressive aging");
 749     return;
 750   }
 751 
 752   // The code cache has an expected time for cold nmethods to "time out"
 753   // when they have not been used. The time for nmethods to time out
 754   // depends on how long we expect we can keep allocating code until
 755   // aggressive sweeping starts, based on sampled allocation rates.
 756   double average_gc_interval = _unloading_gc_intervals.avg();
 757   double average_allocation_rate = _unloading_allocation_rates.avg();
 758   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 759   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 760 
 761   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 762   // that is that the _cold_gc_count will be added to an epoch number
 763   // and that addition must not overflow, or we can crash the VM.
 764   // But not more aggressive than 2. This ensures that an nmethod must
 765   // have been unused at least between two GCs to be considered cold still.
 766   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 767 
 768   double used_ratio = double(used) / double(max);
 769   double last_used_ratio = double(last_used) / double(max);
 770   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 771                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 772                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 773                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 774 
 775 }
 776 
 777 uint64_t CodeCache::cold_gc_count() {
 778   return _cold_gc_count;
 779 }
 780 
 781 void CodeCache::gc_on_allocation() {
 782   if (!is_init_completed()) {
 783     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 784     return;
 785   }
 786 
 787   size_t free = unallocated_capacity();
 788   size_t max = max_capacity();
 789   size_t used = max - free;
 790   double free_ratio = double(free) / double(max);
 791   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 792     // In case the GC is concurrent, we make sure only one thread requests the GC.
 793     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 794       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 795       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 796     }
 797     return;
 798   }
 799 
 800   size_t last_used = _last_unloading_used;
 801   if (last_used >= used) {
 802     // No increase since last GC; no need to sweep yet
 803     return;
 804   }
 805   size_t allocated_since_last = used - last_used;
 806   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 807   double threshold = SweeperThreshold / 100.0;
 808   double used_ratio = double(used) / double(max);
 809   double last_used_ratio = double(last_used) / double(max);
 810   if (used_ratio > threshold) {
 811     // After threshold is reached, scale it by free_ratio so that more aggressive
 812     // GC is triggered as we approach code cache exhaustion
 813     threshold *= free_ratio;
 814   }
 815   // If code cache has been allocated without any GC at all, let's make sure
 816   // it is eventually invoked to avoid trouble.
 817   if (allocated_since_last_ratio > threshold) {
 818     // In case the GC is concurrent, we make sure only one thread requests the GC.
 819     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 820       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 821                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 822       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 823     }
 824   }
 825 }
 826 
 827 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 828 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 829 //
 830 // Odd values mean that marking is in progress, and even values mean that no
 831 // marking is currently active.
 832 uint64_t CodeCache::_gc_epoch = 2;
 833 
 834 // How many GCs after an nmethod has not been used, do we consider it cold?
 835 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 836 
 837 double CodeCache::_last_unloading_time = 0.0;
 838 size_t CodeCache::_last_unloading_used = 0;
 839 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 840 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 841 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 842 
 843 uint64_t CodeCache::gc_epoch() {
 844   return _gc_epoch;
 845 }
 846 
 847 bool CodeCache::is_gc_marking_cycle_active() {
 848   // Odd means that marking is active
 849   return (_gc_epoch % 2) == 1;
 850 }
 851 
 852 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 853   if (is_gc_marking_cycle_active()) {
 854     return _gc_epoch - 2;
 855   } else {
 856     return _gc_epoch - 1;
 857   }
 858 }
 859 
 860 void CodeCache::on_gc_marking_cycle_start() {
 861   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 862   ++_gc_epoch;
 863 }
 864 
 865 // Once started the code cache marking cycle must only be finished after marking of
 866 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 867 // if they have frames in continuation StackChunks that were not yet visited.
 868 void CodeCache::on_gc_marking_cycle_finish() {
 869   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 870   ++_gc_epoch;
 871   update_cold_gc_count();
 872 }
 873 
 874 void CodeCache::arm_all_nmethods() {
 875   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 876   if (bs_nm != nullptr) {
 877     bs_nm->arm_all_nmethods();
 878   }
 879 }
 880 
 881 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 882 void CodeCache::do_unloading(bool unloading_occurred) {
 883   assert_locked_or_safepoint(CodeCache_lock);
 884   CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
 885   while(iter.next()) {
 886     iter.method()->do_unloading(unloading_occurred);
 887   }
 888 }
 889 
 890 void CodeCache::blobs_do(CodeBlobClosure* f) {
 891   assert_locked_or_safepoint(CodeCache_lock);
 892   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 893     FOR_ALL_BLOBS(cb, *heap) {
 894       f->do_code_blob(cb);
 895 #ifdef ASSERT
 896       if (cb->is_nmethod()) {
 897         Universe::heap()->verify_nmethod((nmethod*)cb);
 898       }
 899 #endif //ASSERT
 900     }
 901   }
 902 }
 903 
 904 void CodeCache::verify_clean_inline_caches() {
 905 #ifdef ASSERT
 906   NMethodIterator iter(NMethodIterator::only_not_unloading);
 907   while(iter.next()) {
 908     nmethod* nm = iter.method();
 909     nm->verify_clean_inline_caches();
 910     nm->verify();
 911   }
 912 #endif
 913 }
 914 
 915 // Defer freeing of concurrently cleaned ExceptionCache entries until
 916 // after a global handshake operation.
 917 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 918   if (SafepointSynchronize::is_at_safepoint()) {
 919     delete entry;
 920   } else {
 921     for (;;) {
 922       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 923       entry->set_purge_list_next(purge_list_head);
 924       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 925         break;
 926       }
 927     }
 928   }
 929 }
 930 
 931 // Delete exception caches that have been concurrently unlinked,
 932 // followed by a global handshake operation.
 933 void CodeCache::purge_exception_caches() {
 934   ExceptionCache* curr = _exception_cache_purge_list;
 935   while (curr != nullptr) {
 936     ExceptionCache* next = curr->purge_list_next();
 937     delete curr;
 938     curr = next;
 939   }
 940   _exception_cache_purge_list = nullptr;
 941 }
 942 
 943 // Restart compiler if possible and required..
 944 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 945 
 946   // Try to start the compiler again if we freed any memory
 947   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 948     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 949     log_info(codecache)("Restarting compiler");
 950     EventJITRestart event;
 951     event.set_freedMemory(freed_memory);
 952     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 953     event.commit();
 954   }
 955 }
 956 
 957 uint8_t CodeCache::_unloading_cycle = 1;
 958 
 959 void CodeCache::increment_unloading_cycle() {
 960   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 961   // 0 is reserved for new methods.
 962   _unloading_cycle = (_unloading_cycle + 1) % 4;
 963   if (_unloading_cycle == 0) {
 964     _unloading_cycle = 1;
 965   }
 966 }
 967 
 968 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 969   : _is_unloading_behaviour(is_alive)
 970 {
 971   _saved_behaviour = IsUnloadingBehaviour::current();
 972   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 973   increment_unloading_cycle();
 974   DependencyContext::cleaning_start();
 975 }
 976 
 977 CodeCache::UnlinkingScope::~UnlinkingScope() {
 978   IsUnloadingBehaviour::set_current(_saved_behaviour);
 979   DependencyContext::cleaning_end();
 980 }
 981 
 982 void CodeCache::verify_oops() {
 983   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 984   VerifyOopClosure voc;
 985   NMethodIterator iter(NMethodIterator::only_not_unloading);
 986   while(iter.next()) {
 987     nmethod* nm = iter.method();
 988     nm->oops_do(&voc);
 989     nm->verify_oop_relocations();
 990   }
 991 }
 992 
 993 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 994   CodeHeap* heap = get_code_heap(code_blob_type);
 995   return (heap != nullptr) ? heap->blob_count() : 0;
 996 }
 997 
 998 int CodeCache::blob_count() {
 999   int count = 0;
1000   FOR_ALL_HEAPS(heap) {
1001     count += (*heap)->blob_count();
1002   }
1003   return count;
1004 }
1005 
1006 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
1007   CodeHeap* heap = get_code_heap(code_blob_type);
1008   return (heap != nullptr) ? heap->nmethod_count() : 0;
1009 }
1010 
1011 int CodeCache::nmethod_count() {
1012   int count = 0;
1013   FOR_ALL_NMETHOD_HEAPS(heap) {
1014     count += (*heap)->nmethod_count();
1015   }
1016   return count;
1017 }
1018 
1019 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1020   CodeHeap* heap = get_code_heap(code_blob_type);
1021   return (heap != nullptr) ? heap->adapter_count() : 0;
1022 }
1023 
1024 int CodeCache::adapter_count() {
1025   int count = 0;
1026   FOR_ALL_HEAPS(heap) {
1027     count += (*heap)->adapter_count();
1028   }
1029   return count;
1030 }
1031 
1032 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1033   CodeHeap* heap = get_code_heap(code_blob_type);
1034   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1035 }
1036 
1037 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1038   CodeHeap* heap = get_code_heap(code_blob_type);
1039   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1040 }
1041 
1042 size_t CodeCache::capacity() {
1043   size_t cap = 0;
1044   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1045     cap += (*heap)->capacity();
1046   }
1047   return cap;
1048 }
1049 
1050 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1051   CodeHeap* heap = get_code_heap(code_blob_type);
1052   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1053 }
1054 
1055 size_t CodeCache::unallocated_capacity() {
1056   size_t unallocated_cap = 0;
1057   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1058     unallocated_cap += (*heap)->unallocated_capacity();
1059   }
1060   return unallocated_cap;
1061 }
1062 
1063 size_t CodeCache::max_capacity() {
1064   size_t max_cap = 0;
1065   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1066     max_cap += (*heap)->max_capacity();
1067   }
1068   return max_cap;
1069 }
1070 
1071 bool CodeCache::is_non_nmethod(address addr) {
1072   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1073   return blob->contains(addr);
1074 }
1075 
1076 size_t CodeCache::max_distance_to_non_nmethod() {
1077   if (!SegmentedCodeCache) {
1078     return ReservedCodeCacheSize;
1079   } else {
1080     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1081     // the max distance is minimized by placing the NonNMethod segment
1082     // in between MethodProfiled and MethodNonProfiled segments
1083     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1084     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1085     return dist1 > dist2 ? dist1 : dist2;
1086   }
1087 }
1088 
1089 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1090 // is free, reverse_free_ratio() returns 4.
1091 // Since code heap for each type of code blobs falls forward to the next
1092 // type of code heap, return the reverse free ratio for the entire
1093 // code cache.
1094 double CodeCache::reverse_free_ratio() {
1095   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1096   double max = (double)max_capacity();
1097   double result = max / unallocated;
1098   assert (max >= unallocated, "Must be");
1099   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1100   return result;
1101 }
1102 
1103 size_t CodeCache::bytes_allocated_in_freelists() {
1104   size_t allocated_bytes = 0;
1105   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1106     allocated_bytes += (*heap)->allocated_in_freelist();
1107   }
1108   return allocated_bytes;
1109 }
1110 
1111 int CodeCache::allocated_segments() {
1112   int number_of_segments = 0;
1113   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1114     number_of_segments += (*heap)->allocated_segments();
1115   }
1116   return number_of_segments;
1117 }
1118 
1119 size_t CodeCache::freelists_length() {
1120   size_t length = 0;
1121   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1122     length += (*heap)->freelist_length();
1123   }
1124   return length;
1125 }
1126 
1127 void icache_init();
1128 
1129 void CodeCache::initialize() {
1130   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1131 #ifdef COMPILER2
1132   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1133 #endif
1134   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1135   // This was originally just a check of the alignment, causing failure, instead, round
1136   // the code cache to the page size.  In particular, Solaris is moving to a larger
1137   // default page size.
1138   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1139 
1140   if (SegmentedCodeCache) {
1141     // Use multiple code heaps
1142     initialize_heaps();
1143   } else {
1144     // Use a single code heap
1145     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1146     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1147     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1148 
1149     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1150     // users want to use the largest available page.
1151     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1152     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1153     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1154     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1155     add_heap(rs, "CodeCache", CodeBlobType::All);
1156   }
1157 
1158   // Initialize ICache flush mechanism
1159   // This service is needed for os::register_code_area
1160   icache_init();
1161 
1162   // Give OS a chance to register generated code area.
1163   // This is used on Windows 64 bit platforms to register
1164   // Structured Exception Handlers for our generated code.
1165   os::register_code_area((char*)low_bound(), (char*)high_bound());
1166 }
1167 
1168 void codeCache_init() {
1169   CodeCache::initialize();
1170 }
1171 
1172 //------------------------------------------------------------------------------------------------
1173 
1174 bool CodeCache::has_nmethods_with_dependencies() {
1175   return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1176 }
1177 
1178 void CodeCache::clear_inline_caches() {
1179   assert_locked_or_safepoint(CodeCache_lock);
1180   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1181   while(iter.next()) {
1182     iter.method()->clear_inline_caches();
1183   }
1184 }
1185 
1186 // Only used by whitebox API
1187 void CodeCache::cleanup_inline_caches_whitebox() {
1188   assert_locked_or_safepoint(CodeCache_lock);
1189   NMethodIterator iter(NMethodIterator::only_not_unloading);
1190   while(iter.next()) {
1191     iter.method()->cleanup_inline_caches_whitebox();
1192   }
1193 }
1194 
1195 // Keeps track of time spent for checking dependencies
1196 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1197 
1198 #ifndef PRODUCT
1199 // Check if any of live methods dependencies have been invalidated.
1200 // (this is expensive!)
1201 static void check_live_nmethods_dependencies(DepChange& changes) {
1202   // Checked dependencies are allocated into this ResourceMark
1203   ResourceMark rm;
1204 
1205   // Turn off dependency tracing while actually testing dependencies.
1206   FlagSetting fs(Dependencies::_verify_in_progress, true);
1207 
1208   typedef ResourceHashtable<DependencySignature, int, 11027,
1209                             AnyObj::RESOURCE_AREA, mtInternal,
1210                             &DependencySignature::hash,
1211                             &DependencySignature::equals> DepTable;
1212 
1213   DepTable* table = new DepTable();
1214 
1215   // Iterate over live nmethods and check dependencies of all nmethods that are not
1216   // marked for deoptimization. A particular dependency is only checked once.
1217   NMethodIterator iter(NMethodIterator::only_not_unloading);
1218   while(iter.next()) {
1219     nmethod* nm = iter.method();
1220     // Only notify for live nmethods
1221     if (!nm->is_marked_for_deoptimization()) {
1222       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1223         // Construct abstraction of a dependency.
1224         DependencySignature* current_sig = new DependencySignature(deps);
1225 
1226         // Determine if dependency is already checked. table->put(...) returns
1227         // 'true' if the dependency is added (i.e., was not in the hashtable).
1228         if (table->put(*current_sig, 1)) {
1229           if (deps.check_dependency() != nullptr) {
1230             // Dependency checking failed. Print out information about the failed
1231             // dependency and finally fail with an assert. We can fail here, since
1232             // dependency checking is never done in a product build.
1233             tty->print_cr("Failed dependency:");
1234             changes.print();
1235             nm->print();
1236             nm->print_dependencies_on(tty);
1237             assert(false, "Should have been marked for deoptimization");
1238           }
1239         }
1240       }
1241     }
1242   }
1243 }
1244 #endif
1245 
1246 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1247   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1248 
1249   // search the hierarchy looking for nmethods which are affected by the loading of this class
1250 
1251   // then search the interfaces this class implements looking for nmethods
1252   // which might be dependent of the fact that an interface only had one
1253   // implementor.
1254   // nmethod::check_all_dependencies works only correctly, if no safepoint
1255   // can happen
1256   NoSafepointVerifier nsv;
1257   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1258     InstanceKlass* d = str.klass();
1259     d->mark_dependent_nmethods(deopt_scope, changes);
1260   }
1261 
1262 #ifndef PRODUCT
1263   if (VerifyDependencies) {
1264     // Object pointers are used as unique identifiers for dependency arguments. This
1265     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1266     dependentCheckTime.start();
1267     check_live_nmethods_dependencies(changes);
1268     dependentCheckTime.stop();
1269   }
1270 #endif
1271 }
1272 
1273 CompiledMethod* CodeCache::find_compiled(void* start) {
1274   CodeBlob *cb = find_blob(start);
1275   assert(cb == nullptr || cb->is_compiled(), "did not find an compiled_method");
1276   return (CompiledMethod*)cb;
1277 }
1278 
1279 #if INCLUDE_JVMTI
1280 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1281 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1282 static GrowableArray<CompiledMethod*>* old_compiled_method_table = nullptr;
1283 
1284 static void add_to_old_table(CompiledMethod* c) {
1285   if (old_compiled_method_table == nullptr) {
1286     old_compiled_method_table = new (mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
1287   }
1288   old_compiled_method_table->push(c);
1289 }
1290 
1291 static void reset_old_method_table() {
1292   if (old_compiled_method_table != nullptr) {
1293     delete old_compiled_method_table;
1294     old_compiled_method_table = nullptr;
1295   }
1296 }
1297 
1298 // Remove this method when flushed.
1299 void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1300   assert_lock_strong(CodeCache_lock);
1301   if (old_compiled_method_table != nullptr) {
1302     int index = old_compiled_method_table->find(c);
1303     if (index != -1) {
1304       old_compiled_method_table->delete_at(index);
1305     }
1306   }
1307 }
1308 
1309 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1310   // Walk old method table and mark those on stack.
1311   int length = 0;
1312   if (old_compiled_method_table != nullptr) {
1313     length = old_compiled_method_table->length();
1314     for (int i = 0; i < length; i++) {
1315       // Walk all methods saved on the last pass.  Concurrent class unloading may
1316       // also be looking at this method's metadata, so don't delete it yet if
1317       // it is marked as unloaded.
1318       old_compiled_method_table->at(i)->metadata_do(f);
1319     }
1320   }
1321   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1322 }
1323 
1324 // Walk compiled methods and mark dependent methods for deoptimization.
1325 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1326   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1327   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1328   // So delete old method table and create a new one.
1329   reset_old_method_table();
1330 
1331   CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
1332   while(iter.next()) {
1333     CompiledMethod* nm = iter.method();
1334     // Walk all alive nmethods to check for old Methods.
1335     // This includes methods whose inline caches point to old methods, so
1336     // inline cache clearing is unnecessary.
1337     if (nm->has_evol_metadata()) {
1338       deopt_scope->mark(nm);
1339       add_to_old_table(nm);
1340     }
1341   }
1342 }
1343 
1344 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1345   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1346   CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
1347   while(iter.next()) {
1348     CompiledMethod* nm = iter.method();
1349     if (!nm->method()->is_method_handle_intrinsic()) {
1350       if (nm->can_be_deoptimized()) {
1351         deopt_scope->mark(nm);
1352       }
1353       if (nm->has_evol_metadata()) {
1354         add_to_old_table(nm);
1355       }
1356     }
1357   }
1358 }
1359 
1360 #endif // INCLUDE_JVMTI
1361 
1362 // Mark methods for deopt (if safe or possible).
1363 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1364   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1365   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1366   while(iter.next()) {
1367     CompiledMethod* nm = iter.method();
1368     if (!nm->is_native_method()) {
1369       deopt_scope->mark(nm);
1370     }
1371   }
1372 }
1373 
1374 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1375   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1376 
1377   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1378   while(iter.next()) {
1379     CompiledMethod* nm = iter.method();
1380     if (nm->is_dependent_on_method(dependee)) {
1381       deopt_scope->mark(nm);
1382     }
1383   }
1384 }
1385 
1386 void CodeCache::make_marked_nmethods_deoptimized() {
1387   RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading);
1388   while(iter.next()) {
1389     CompiledMethod* nm = iter.method();
1390     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1391       nm->make_not_entrant();
1392       nm->make_deoptimized();
1393     }
1394   }
1395 }
1396 
1397 // Marks compiled methods dependent on dependee.
1398 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1399   assert_lock_strong(Compile_lock);
1400 
1401   if (!has_nmethods_with_dependencies()) {
1402     return;
1403   }
1404 
1405   if (dependee->is_linked()) {
1406     // Class initialization state change.
1407     KlassInitDepChange changes(dependee);
1408     mark_for_deoptimization(deopt_scope, changes);
1409   } else {
1410     // New class is loaded.
1411     NewKlassDepChange changes(dependee);
1412     mark_for_deoptimization(deopt_scope, changes);
1413   }
1414 }
1415 
1416 // Marks compiled methods dependent on dependee
1417 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1418   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1419 
1420   DeoptimizationScope deopt_scope;
1421   // Compute the dependent nmethods
1422   mark_for_deoptimization(&deopt_scope, m_h());
1423   deopt_scope.deoptimize_marked();
1424 }
1425 
1426 void CodeCache::verify() {
1427   assert_locked_or_safepoint(CodeCache_lock);
1428   FOR_ALL_HEAPS(heap) {
1429     (*heap)->verify();
1430     FOR_ALL_BLOBS(cb, *heap) {
1431       cb->verify();
1432     }
1433   }
1434 }
1435 
1436 // A CodeHeap is full. Print out warning and report event.
1437 PRAGMA_DIAG_PUSH
1438 PRAGMA_FORMAT_NONLITERAL_IGNORED
1439 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1440   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1441   CodeHeap* heap = get_code_heap(code_blob_type);
1442   assert(heap != nullptr, "heap is null");
1443 
1444   int full_count = heap->report_full();
1445 
1446   if ((full_count == 1) || print) {
1447     // Not yet reported for this heap, report
1448     if (SegmentedCodeCache) {
1449       ResourceMark rm;
1450       stringStream msg1_stream, msg2_stream;
1451       msg1_stream.print("%s is full. Compiler has been disabled.",
1452                         get_code_heap_name(code_blob_type));
1453       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1454                  get_code_heap_flag_name(code_blob_type));
1455       const char *msg1 = msg1_stream.as_string();
1456       const char *msg2 = msg2_stream.as_string();
1457 
1458       log_warning(codecache)("%s", msg1);
1459       log_warning(codecache)("%s", msg2);
1460       warning("%s", msg1);
1461       warning("%s", msg2);
1462     } else {
1463       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1464       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1465 
1466       log_warning(codecache)("%s", msg1);
1467       log_warning(codecache)("%s", msg2);
1468       warning("%s", msg1);
1469       warning("%s", msg2);
1470     }
1471     stringStream s;
1472     // Dump code cache into a buffer before locking the tty.
1473     {
1474       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1475       print_summary(&s);
1476     }
1477     {
1478       ttyLocker ttyl;
1479       tty->print("%s", s.freeze());
1480     }
1481 
1482     if (full_count == 1) {
1483       if (PrintCodeHeapAnalytics) {
1484         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1485       }
1486     }
1487   }
1488 
1489   EventCodeCacheFull event;
1490   if (event.should_commit()) {
1491     event.set_codeBlobType((u1)code_blob_type);
1492     event.set_startAddress((u8)heap->low_boundary());
1493     event.set_commitedTopAddress((u8)heap->high());
1494     event.set_reservedTopAddress((u8)heap->high_boundary());
1495     event.set_entryCount(heap->blob_count());
1496     event.set_methodCount(heap->nmethod_count());
1497     event.set_adaptorCount(heap->adapter_count());
1498     event.set_unallocatedCapacity(heap->unallocated_capacity());
1499     event.set_fullCount(heap->full_count());
1500     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1501     event.commit();
1502   }
1503 }
1504 PRAGMA_DIAG_POP
1505 
1506 void CodeCache::print_memory_overhead() {
1507   size_t wasted_bytes = 0;
1508   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1509       CodeHeap* curr_heap = *heap;
1510       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1511         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1512         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1513       }
1514   }
1515   // Print bytes that are allocated in the freelist
1516   ttyLocker ttl;
1517   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1518   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1519   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1520   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1521 }
1522 
1523 //------------------------------------------------------------------------------------------------
1524 // Non-product version
1525 
1526 #ifndef PRODUCT
1527 
1528 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1529   if (PrintCodeCache2) {  // Need to add a new flag
1530     ResourceMark rm;
1531     if (size == 0) {
1532       int s = cb->size();
1533       assert(s >= 0, "CodeBlob size is negative: %d", s);
1534       size = (uint) s;
1535     }
1536     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1537   }
1538 }
1539 
1540 void CodeCache::print_internals() {
1541   int nmethodCount = 0;
1542   int runtimeStubCount = 0;
1543   int adapterCount = 0;
1544   int deoptimizationStubCount = 0;
1545   int uncommonTrapStubCount = 0;
1546   int bufferBlobCount = 0;
1547   int total = 0;
1548   int nmethodNotEntrant = 0;
1549   int nmethodJava = 0;
1550   int nmethodNative = 0;
1551   int max_nm_size = 0;
1552   ResourceMark rm;
1553 
1554   int i = 0;
1555   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1556     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1557       tty->print_cr("-- %s --", (*heap)->name());
1558     }
1559     FOR_ALL_BLOBS(cb, *heap) {
1560       total++;
1561       if (cb->is_nmethod()) {
1562         nmethod* nm = (nmethod*)cb;
1563 
1564         if (Verbose && nm->method() != nullptr) {
1565           ResourceMark rm;
1566           char *method_name = nm->method()->name_and_sig_as_C_string();
1567           tty->print("%s", method_name);
1568           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1569         }
1570 
1571         nmethodCount++;
1572 
1573         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1574         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1575 
1576         if(nm->method() != nullptr && nm->is_java_method()) {
1577           nmethodJava++;
1578           max_nm_size = MAX2(max_nm_size, nm->size());
1579         }
1580       } else if (cb->is_runtime_stub()) {
1581         runtimeStubCount++;
1582       } else if (cb->is_deoptimization_stub()) {
1583         deoptimizationStubCount++;
1584       } else if (cb->is_uncommon_trap_stub()) {
1585         uncommonTrapStubCount++;
1586       } else if (cb->is_adapter_blob()) {
1587         adapterCount++;
1588       } else if (cb->is_buffer_blob()) {
1589         bufferBlobCount++;
1590       }
1591     }
1592   }
1593 
1594   int bucketSize = 512;
1595   int bucketLimit = max_nm_size / bucketSize + 1;
1596   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1597   memset(buckets, 0, sizeof(int) * bucketLimit);
1598 
1599   NMethodIterator iter(NMethodIterator::all_blobs);
1600   while(iter.next()) {
1601     nmethod* nm = iter.method();
1602     if(nm->method() != nullptr && nm->is_java_method()) {
1603       buckets[nm->size() / bucketSize]++;
1604     }
1605   }
1606 
1607   tty->print_cr("Code Cache Entries (total of %d)",total);
1608   tty->print_cr("-------------------------------------------------");
1609   tty->print_cr("nmethods: %d",nmethodCount);
1610   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1611   tty->print_cr("\tjava: %d",nmethodJava);
1612   tty->print_cr("\tnative: %d",nmethodNative);
1613   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1614   tty->print_cr("adapters: %d",adapterCount);
1615   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1616   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1617   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1618   tty->print_cr("\nnmethod size distribution");
1619   tty->print_cr("-------------------------------------------------");
1620 
1621   for(int i=0; i<bucketLimit; i++) {
1622     if(buckets[i] != 0) {
1623       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1624       tty->fill_to(40);
1625       tty->print_cr("%d",buckets[i]);
1626     }
1627   }
1628 
1629   FREE_C_HEAP_ARRAY(int, buckets);
1630   print_memory_overhead();
1631 }
1632 
1633 #endif // !PRODUCT
1634 
1635 void CodeCache::print() {
1636   print_summary(tty);
1637 
1638 #ifndef PRODUCT
1639   if (!Verbose) return;
1640 
1641   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1642   CodeBlob_sizes runtimeStub;
1643   CodeBlob_sizes uncommonTrapStub;
1644   CodeBlob_sizes deoptimizationStub;
1645   CodeBlob_sizes adapter;
1646   CodeBlob_sizes bufferBlob;
1647   CodeBlob_sizes other;
1648 
1649   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1650     FOR_ALL_BLOBS(cb, *heap) {
1651       if (cb->is_nmethod()) {
1652         const int level = cb->as_nmethod()->comp_level();
1653         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1654         live[level].add(cb);
1655       } else if (cb->is_runtime_stub()) {
1656         runtimeStub.add(cb);
1657       } else if (cb->is_deoptimization_stub()) {
1658         deoptimizationStub.add(cb);
1659       } else if (cb->is_uncommon_trap_stub()) {
1660         uncommonTrapStub.add(cb);
1661       } else if (cb->is_adapter_blob()) {
1662         adapter.add(cb);
1663       } else if (cb->is_buffer_blob()) {
1664         bufferBlob.add(cb);
1665       } else {
1666         other.add(cb);
1667       }
1668     }
1669   }
1670 
1671   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1672 
1673   tty->print_cr("nmethod blobs per compilation level:");
1674   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1675     const char *level_name;
1676     switch (i) {
1677     case CompLevel_none:              level_name = "none";              break;
1678     case CompLevel_simple:            level_name = "simple";            break;
1679     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1680     case CompLevel_full_profile:      level_name = "full profile";      break;
1681     case CompLevel_full_optimization: level_name = "full optimization"; break;
1682     default: assert(false, "invalid compilation level");
1683     }
1684     tty->print_cr("%s:", level_name);
1685     live[i].print("live");
1686   }
1687 
1688   struct {
1689     const char* name;
1690     const CodeBlob_sizes* sizes;
1691   } non_nmethod_blobs[] = {
1692     { "runtime",        &runtimeStub },
1693     { "uncommon trap",  &uncommonTrapStub },
1694     { "deoptimization", &deoptimizationStub },
1695     { "adapter",        &adapter },
1696     { "buffer blob",    &bufferBlob },
1697     { "other",          &other },
1698   };
1699   tty->print_cr("Non-nmethod blobs:");
1700   for (auto& blob: non_nmethod_blobs) {
1701     blob.sizes->print(blob.name);
1702   }
1703 
1704   if (WizardMode) {
1705      // print the oop_map usage
1706     int code_size = 0;
1707     int number_of_blobs = 0;
1708     int number_of_oop_maps = 0;
1709     int map_size = 0;
1710     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1711       FOR_ALL_BLOBS(cb, *heap) {
1712         number_of_blobs++;
1713         code_size += cb->code_size();
1714         ImmutableOopMapSet* set = cb->oop_maps();
1715         if (set != nullptr) {
1716           number_of_oop_maps += set->count();
1717           map_size           += set->nr_of_bytes();
1718         }
1719       }
1720     }
1721     tty->print_cr("OopMaps");
1722     tty->print_cr("  #blobs    = %d", number_of_blobs);
1723     tty->print_cr("  code size = %d", code_size);
1724     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1725     tty->print_cr("  map size  = %d", map_size);
1726   }
1727 
1728 #endif // !PRODUCT
1729 }
1730 
1731 void CodeCache::print_summary(outputStream* st, bool detailed) {
1732   int full_count = 0;
1733   julong total_used = 0;
1734   julong total_max_used = 0;
1735   julong total_free = 0;
1736   julong total_size = 0;
1737   FOR_ALL_HEAPS(heap_iterator) {
1738     CodeHeap* heap = (*heap_iterator);
1739     size_t total = (heap->high_boundary() - heap->low_boundary());
1740     if (_heaps->length() >= 1) {
1741       st->print("%s:", heap->name());
1742     } else {
1743       st->print("CodeCache:");
1744     }
1745     size_t size = total/K;
1746     size_t used = (total - heap->unallocated_capacity())/K;
1747     size_t max_used = heap->max_allocated_capacity()/K;
1748     size_t free = heap->unallocated_capacity()/K;
1749     total_size += size;
1750     total_used += used;
1751     total_max_used += max_used;
1752     total_free += free;
1753     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1754                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1755                  size, used, max_used, free);
1756 
1757     if (detailed) {
1758       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1759                    p2i(heap->low_boundary()),
1760                    p2i(heap->high()),
1761                    p2i(heap->high_boundary()));
1762 
1763       full_count += get_codemem_full_count(heap->code_blob_type());
1764     }
1765   }
1766 
1767   if (detailed) {
1768     if (SegmentedCodeCache) {
1769       st->print("CodeCache:");
1770       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1771                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1772                    total_size, total_used, total_max_used, total_free);
1773     }
1774     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1775                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1776                  blob_count(), nmethod_count(), adapter_count(), full_count);
1777     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1778                  CompileBroker::should_compile_new_jobs() ?
1779                  "enabled" : Arguments::mode() == Arguments::_int ?
1780                  "disabled (interpreter mode)" :
1781                  "disabled (not enough contiguous free space left)",
1782                  CompileBroker::get_total_compiler_stopped_count(),
1783                  CompileBroker::get_total_compiler_restarted_count());
1784   }
1785 }
1786 
1787 void CodeCache::print_codelist(outputStream* st) {
1788   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1789 
1790   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1791   while (iter.next()) {
1792     CompiledMethod* cm = iter.method();
1793     ResourceMark rm;
1794     char* method_name = cm->method()->name_and_sig_as_C_string();
1795     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1796                  cm->compile_id(), cm->comp_level(), cm->get_state(),
1797                  method_name,
1798                  (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1799   }
1800 }
1801 
1802 void CodeCache::print_layout(outputStream* st) {
1803   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1804   ResourceMark rm;
1805   print_summary(st, true);
1806 }
1807 
1808 void CodeCache::log_state(outputStream* st) {
1809   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1810             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1811             blob_count(), nmethod_count(), adapter_count(),
1812             unallocated_capacity());
1813 }
1814 
1815 #ifdef LINUX
1816 void CodeCache::write_perf_map(const char* filename) {
1817   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1818 
1819   // Perf expects to find the map file at /tmp/perf-<pid>.map
1820   // if the file name is not specified.
1821   char fname[32];
1822   if (filename == nullptr) {
1823     jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1824     filename = fname;
1825   }
1826 
1827   fileStream fs(filename, "w");
1828   if (!fs.is_open()) {
1829     log_warning(codecache)("Failed to create %s for perf map", filename);
1830     return;
1831   }
1832 
1833   AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_not_unloading);
1834   while (iter.next()) {
1835     CodeBlob *cb = iter.method();
1836     ResourceMark rm;
1837     const char* method_name =
1838       cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
1839                         : cb->name();
1840     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1841                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1842                 method_name);
1843   }
1844 }
1845 #endif // LINUX
1846 
1847 //---<  BEGIN  >--- CodeHeap State Analytics.
1848 
1849 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1850   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1851     CodeHeapState::aggregate(out, (*heap), granularity);
1852   }
1853 }
1854 
1855 void CodeCache::discard(outputStream *out) {
1856   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1857     CodeHeapState::discard(out, (*heap));
1858   }
1859 }
1860 
1861 void CodeCache::print_usedSpace(outputStream *out) {
1862   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1863     CodeHeapState::print_usedSpace(out, (*heap));
1864   }
1865 }
1866 
1867 void CodeCache::print_freeSpace(outputStream *out) {
1868   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1869     CodeHeapState::print_freeSpace(out, (*heap));
1870   }
1871 }
1872 
1873 void CodeCache::print_count(outputStream *out) {
1874   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1875     CodeHeapState::print_count(out, (*heap));
1876   }
1877 }
1878 
1879 void CodeCache::print_space(outputStream *out) {
1880   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1881     CodeHeapState::print_space(out, (*heap));
1882   }
1883 }
1884 
1885 void CodeCache::print_age(outputStream *out) {
1886   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1887     CodeHeapState::print_age(out, (*heap));
1888   }
1889 }
1890 
1891 void CodeCache::print_names(outputStream *out) {
1892   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1893     CodeHeapState::print_names(out, (*heap));
1894   }
1895 }
1896 //---<  END  >--- CodeHeap State Analytics.