1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "code/codeBlob.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/codeHeapState.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/dependencyContext.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compilationPolicy.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSetNMethod.hpp"
  38 #include "gc/shared/classUnloadingContext.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "jvm_io.h"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/iterator.hpp"
  46 #include "memory/memoryReserver.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/method.inline.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "runtime/arguments.hpp"
  54 #include "runtime/atomicAccess.hpp"
  55 #include "runtime/deoptimization.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/icache.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/os.inline.hpp"
  63 #include "runtime/safepointVerifiers.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "sanitizers/leak.hpp"
  66 #include "services/memoryService.hpp"
  67 #include "utilities/align.hpp"
  68 #include "utilities/vmError.hpp"
  69 #include "utilities/xmlstream.hpp"
  70 #ifdef COMPILER1
  71 #include "c1/c1_Compilation.hpp"
  72 #include "c1/c1_Compiler.hpp"
  73 #endif
  74 #ifdef COMPILER2
  75 #include "opto/c2compiler.hpp"
  76 #include "opto/compile.hpp"
  77 #include "opto/node.hpp"
  78 #endif
  79 
  80 // Helper class for printing in CodeCache
  81 class CodeBlob_sizes {
  82  private:
  83   int count;
  84   int total_size;
  85   int header_size;
  86   int code_size;
  87   int stub_size;
  88   int relocation_size;
  89   int scopes_oop_size;
  90   int scopes_metadata_size;
  91   int scopes_data_size;
  92   int scopes_pcs_size;
  93 
  94  public:
  95   CodeBlob_sizes() {
  96     count            = 0;
  97     total_size       = 0;
  98     header_size      = 0;
  99     code_size        = 0;
 100     stub_size        = 0;
 101     relocation_size  = 0;
 102     scopes_oop_size  = 0;
 103     scopes_metadata_size  = 0;
 104     scopes_data_size = 0;
 105     scopes_pcs_size  = 0;
 106   }
 107 
 108   int total() const                              { return total_size; }
 109   bool is_empty() const                          { return count == 0; }
 110 
 111   void print(const char* title) const {
 112     if (is_empty()) {
 113       tty->print_cr(" #%d %s = %dK",
 114                     count,
 115                     title,
 116                     total()                 / (int)K);
 117     } else {
 118       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 119                     count,
 120                     title,
 121                     total()                 / (int)K,
 122                     header_size             / (int)K,
 123                     header_size             * 100 / total_size,
 124                     relocation_size         / (int)K,
 125                     relocation_size         * 100 / total_size,
 126                     code_size               / (int)K,
 127                     code_size               * 100 / total_size,
 128                     stub_size               / (int)K,
 129                     stub_size               * 100 / total_size,
 130                     scopes_oop_size         / (int)K,
 131                     scopes_oop_size         * 100 / total_size,
 132                     scopes_metadata_size    / (int)K,
 133                     scopes_metadata_size    * 100 / total_size,
 134                     scopes_data_size        / (int)K,
 135                     scopes_data_size        * 100 / total_size,
 136                     scopes_pcs_size         / (int)K,
 137                     scopes_pcs_size         * 100 / total_size);
 138     }
 139   }
 140 
 141   void add(CodeBlob* cb) {
 142     count++;
 143     total_size       += cb->size();
 144     header_size      += cb->header_size();
 145     relocation_size  += cb->relocation_size();
 146     if (cb->is_nmethod()) {
 147       nmethod* nm = cb->as_nmethod_or_null();
 148       code_size        += nm->insts_size();
 149       stub_size        += nm->stub_size();
 150 
 151       scopes_oop_size  += nm->oops_size();
 152       scopes_metadata_size  += nm->metadata_size();
 153       scopes_data_size += nm->scopes_data_size();
 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 164 
 165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 167 
 168 address CodeCache::_low_bound = nullptr;
 169 address CodeCache::_high_bound = nullptr;
 170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 172 
 173 // Initialize arrays of CodeHeap subsets
 174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 
 178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 179   if (size < required_size) {
 180     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 181                          codeheap, size/K, required_size/K);
 182     err_msg title("Not enough space in %s to run VM", codeheap);
 183     err_msg message("%zuK < %zuK", size/K, required_size/K);
 184     vm_exit_during_initialization(title, message);
 185   }
 186 }
 187 
 188 struct CodeHeapInfo {
 189   size_t size;
 190   bool set;
 191   bool enabled;
 192 };
 193 
 194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 195   assert(!heap->set, "sanity");
 196   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200 
 201   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 202   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 203   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 204 
 205   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 206   const size_t ps             = page_size(false, 8);
 207   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 208   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 209   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 210 
 211   // Prerequisites
 212   if (!heap_available(CodeBlobType::MethodProfiled)) {
 213     // For compatibility reasons, disabled tiered compilation overrides
 214     // segment size even if it is set explicitly.
 215     non_profiled.size += profiled.size;
 216     // Profiled code heap is not available, forcibly set size to 0
 217     profiled.size = 0;
 218     profiled.set = true;
 219     profiled.enabled = false;
 220   }
 221 
 222   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 223 
 224   size_t compiler_buffer_size = 0;
 225   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 226   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 227 
 228   if (!non_nmethod.set) {
 229     non_nmethod.size += compiler_buffer_size;
 230   }
 231 
 232   if (!profiled.set && !non_profiled.set) {
 233     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 234                                         (cache_size - non_nmethod.size) / 2 : min_size;
 235   }
 236 
 237   if (profiled.set && !non_profiled.set) {
 238     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 239   }
 240 
 241   if (!profiled.set && non_profiled.set) {
 242     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 243   }
 244 
 245   // Compatibility.
 246   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 247   if (!non_nmethod.set && profiled.set && non_profiled.set) {
 248     set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
 249   }
 250 
 251   size_t total = non_nmethod.size + profiled.size + non_profiled.size;
 252   if (total != cache_size && !cache_size_set) {
 253     log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
 254                         "%zuK NonProfiled %zuK Profiled %zuK = %zuK",
 255                         cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
 256     // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
 257     cache_size = total;
 258   }
 259 
 260   log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK"
 261                        " NonProfiled %zuK Profiled %zuK",
 262                        cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
 263 
 264   // Validation
 265   // Check minimal required sizes
 266   check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
 267   if (profiled.enabled) {
 268     check_min_size("profiled code heap", profiled.size, min_size);
 269   }
 270   if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
 271     check_min_size("non-profiled code heap", non_profiled.size, min_size);
 272   }
 273   if (cache_size_set) {
 274     check_min_size("reserved code cache", cache_size, min_cache_size);
 275   }
 276 
 277   // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
 278   if (total != cache_size && cache_size_set) {
 279     err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K);
 280     if (profiled.enabled) {
 281       message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K);
 282     }
 283     if (non_profiled.enabled) {
 284       message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K);
 285     }
 286     message.append(" = %zuK", total/K);
 287     message.append((total > cache_size) ? " is greater than " : " is less than ");
 288     message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
 289 
 290     vm_exit_during_initialization("Invalid code heap sizes", message);
 291   }
 292 
 293   // Compatibility. Print warning if using large pages but not able to use the size given
 294   if (UseLargePages) {
 295     const size_t lg_ps = page_size(false, 1);
 296     if (ps < lg_ps) {
 297       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 298                              "Reverting to smaller page size (" PROPERFMT ").",
 299                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 300     }
 301   }
 302 
 303   // Note: if large page support is enabled, min_size is at least the large
 304   // page size. This ensures that the code cache is covered by large pages.
 305   non_nmethod.size = align_up(non_nmethod.size, min_size);
 306   profiled.size = align_up(profiled.size, min_size);
 307   non_profiled.size = align_up(non_profiled.size, min_size);
 308   cache_size = non_nmethod.size + profiled.size + non_profiled.size;
 309 
 310   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 311   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 312   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 313   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 314 
 315   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 316 
 317   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 318   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 319 
 320   size_t offset = 0;
 321   if (profiled.enabled) {
 322     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 323     offset += profiled.size;
 324     // Tier 2 and tier 3 (profiled) methods
 325     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 326   }
 327 
 328   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 329   offset += non_nmethod.size;
 330   // Non-nmethods (stubs, adapters, ...)
 331   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 332 
 333   if (non_profiled.enabled) {
 334     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 335     // Tier 1 and tier 4 (non-profiled) methods and native methods
 336     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 337   }
 338 }
 339 
 340 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 341   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 342                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 343 }
 344 
 345 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 346   // Align and reserve space for code cache
 347   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 348   const size_t rs_size = align_up(size, rs_align);
 349 
 350   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 351   if (!rs.is_reserved()) {
 352     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 353                                           rs_size/K));
 354   }
 355 
 356   // Initialize bounds
 357   _low_bound = (address)rs.base();
 358   _high_bound = _low_bound + rs.size();
 359   return rs;
 360 }
 361 
 362 // Heaps available for allocation
 363 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 364   if (!SegmentedCodeCache) {
 365     // No segmentation: use a single code heap
 366     return (code_blob_type == CodeBlobType::All);
 367   } else if (CompilerConfig::is_interpreter_only()) {
 368     // Interpreter only: we don't need any method code heaps
 369     return (code_blob_type == CodeBlobType::NonNMethod);
 370   } else if (CompilerConfig::is_c1_profiling()) {
 371     // Tiered compilation: use all code heaps
 372     return (code_blob_type < CodeBlobType::All);
 373   } else {
 374     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 375     return (code_blob_type == CodeBlobType::NonNMethod) ||
 376            (code_blob_type == CodeBlobType::MethodNonProfiled);
 377   }
 378 }
 379 
 380 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 381   switch(code_blob_type) {
 382   case CodeBlobType::NonNMethod:
 383     return "NonNMethodCodeHeapSize";
 384     break;
 385   case CodeBlobType::MethodNonProfiled:
 386     return "NonProfiledCodeHeapSize";
 387     break;
 388   case CodeBlobType::MethodProfiled:
 389     return "ProfiledCodeHeapSize";
 390     break;
 391   default:
 392     ShouldNotReachHere();
 393     return nullptr;
 394   }
 395 }
 396 
 397 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 398   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 399     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 400   } else {
 401     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 402   }
 403 }
 404 
 405 void CodeCache::add_heap(CodeHeap* heap) {
 406   assert(!Universe::is_fully_initialized(), "late heap addition?");
 407 
 408   _heaps->insert_sorted<code_heap_compare>(heap);
 409 
 410   CodeBlobType type = heap->code_blob_type();
 411   if (code_blob_type_accepts_nmethod(type)) {
 412     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 413   }
 414   if (code_blob_type_accepts_allocable(type)) {
 415     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 416   }
 417 }
 418 
 419 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 420   // Check if heap is needed
 421   if (!heap_available(code_blob_type)) {
 422     return;
 423   }
 424 
 425   // Create CodeHeap
 426   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 427   add_heap(heap);
 428 
 429   // Reserve Space
 430   size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
 431   size_initial = align_up(size_initial, rs.page_size());
 432   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 433     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)",
 434                                           heap->name(), size_initial/K));
 435   }
 436 
 437   // Register the CodeHeap
 438   MemoryService::add_code_heap_memory_pool(heap, name);
 439 }
 440 
 441 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 442   FOR_ALL_HEAPS(heap) {
 443     if ((*heap)->contains(start)) {
 444       return *heap;
 445     }
 446   }
 447   return nullptr;
 448 }
 449 
 450 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 451   assert(cb != nullptr, "CodeBlob is null");
 452   FOR_ALL_HEAPS(heap) {
 453     if ((*heap)->contains(cb)) {
 454       return *heap;
 455     }
 456   }
 457   ShouldNotReachHere();
 458   return nullptr;
 459 }
 460 
 461 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 462   FOR_ALL_HEAPS(heap) {
 463     if ((*heap)->accepts(code_blob_type)) {
 464       return *heap;
 465     }
 466   }
 467   return nullptr;
 468 }
 469 
 470 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 471   assert_locked_or_safepoint(CodeCache_lock);
 472   assert(heap != nullptr, "heap is null");
 473   return (CodeBlob*)heap->first();
 474 }
 475 
 476 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 477   if (heap_available(code_blob_type)) {
 478     return first_blob(get_code_heap(code_blob_type));
 479   } else {
 480     return nullptr;
 481   }
 482 }
 483 
 484 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 485   assert_locked_or_safepoint(CodeCache_lock);
 486   assert(heap != nullptr, "heap is null");
 487   return (CodeBlob*)heap->next(cb);
 488 }
 489 
 490 /**
 491  * Do not seize the CodeCache lock here--if the caller has not
 492  * already done so, we are going to lose bigtime, since the code
 493  * cache will contain a garbage CodeBlob until the caller can
 494  * run the constructor for the CodeBlob subclass he is busy
 495  * instantiating.
 496  */
 497 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 498   assert_locked_or_safepoint(CodeCache_lock);
 499   assert(size > 0, "Code cache allocation request must be > 0");
 500   if (size == 0) {
 501     return nullptr;
 502   }
 503   CodeBlob* cb = nullptr;
 504 
 505   // Get CodeHeap for the given CodeBlobType
 506   CodeHeap* heap = get_code_heap(code_blob_type);
 507   assert(heap != nullptr, "heap is null");
 508 
 509   while (true) {
 510     cb = (CodeBlob*)heap->allocate(size);
 511     if (cb != nullptr) break;
 512     if (!heap->expand_by(CodeCacheExpansionSize)) {
 513       // Save original type for error reporting
 514       if (orig_code_blob_type == CodeBlobType::All) {
 515         orig_code_blob_type = code_blob_type;
 516       }
 517       // Expansion failed
 518       if (SegmentedCodeCache) {
 519         // Fallback solution: Try to store code in another code heap.
 520         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 521         CodeBlobType type = code_blob_type;
 522         switch (type) {
 523         case CodeBlobType::NonNMethod:
 524           type = CodeBlobType::MethodNonProfiled;
 525           break;
 526         case CodeBlobType::MethodNonProfiled:
 527           type = CodeBlobType::MethodProfiled;
 528           break;
 529         case CodeBlobType::MethodProfiled:
 530           // Avoid loop if we already tried that code heap
 531           if (type == orig_code_blob_type) {
 532             type = CodeBlobType::MethodNonProfiled;
 533           }
 534           break;
 535         default:
 536           break;
 537         }
 538         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 539           if (PrintCodeCacheExtension) {
 540             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 541                           heap->name(), get_code_heap(type)->name());
 542           }
 543           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 544         }
 545       }
 546       if (handle_alloc_failure) {
 547         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 548         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 549       }
 550       return nullptr;
 551     } else {
 552       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 553     }
 554     if (PrintCodeCacheExtension) {
 555       ResourceMark rm;
 556       if (_nmethod_heaps->length() >= 1) {
 557         tty->print("%s", heap->name());
 558       } else {
 559         tty->print("CodeCache");
 560       }
 561       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
 562                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 563                     (address)heap->high() - (address)heap->low_boundary());
 564     }
 565   }
 566   print_trace("allocation", cb, size);
 567   return cb;
 568 }
 569 
 570 void CodeCache::free(CodeBlob* cb) {
 571   assert_locked_or_safepoint(CodeCache_lock);
 572   CodeHeap* heap = get_code_heap(cb);
 573   print_trace("free", cb);
 574   if (cb->is_nmethod()) {
 575     heap->set_nmethod_count(heap->nmethod_count() - 1);
 576     if (((nmethod *)cb)->has_dependencies()) {
 577       AtomicAccess::dec(&_number_of_nmethods_with_dependencies);
 578     }
 579   }
 580   if (cb->is_adapter_blob()) {
 581     heap->set_adapter_count(heap->adapter_count() - 1);
 582   }
 583 
 584   cb->~CodeBlob();
 585   // Get heap for given CodeBlob and deallocate
 586   heap->deallocate(cb);
 587 
 588   assert(heap->blob_count() >= 0, "sanity check");
 589 }
 590 
 591 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 592   assert_locked_or_safepoint(CodeCache_lock);
 593   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 594   print_trace("free_unused_tail", cb);
 595 
 596   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 597   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 598   used += CodeBlob::align_code_offset(cb->header_size());
 599 
 600   // Get heap for given CodeBlob and deallocate its unused tail
 601   get_code_heap(cb)->deallocate_tail(cb, used);
 602   // Adjust the sizes of the CodeBlob
 603   cb->adjust_size(used);
 604 }
 605 
 606 void CodeCache::commit(CodeBlob* cb) {
 607   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 608   assert_locked_or_safepoint(CodeCache_lock);
 609   CodeHeap* heap = get_code_heap(cb);
 610   if (cb->is_nmethod()) {
 611     heap->set_nmethod_count(heap->nmethod_count() + 1);
 612     if (((nmethod *)cb)->has_dependencies()) {
 613       AtomicAccess::inc(&_number_of_nmethods_with_dependencies);
 614     }
 615   }
 616   if (cb->is_adapter_blob()) {
 617     heap->set_adapter_count(heap->adapter_count() + 1);
 618   }
 619 }
 620 
 621 bool CodeCache::contains(void *p) {
 622   // S390 uses contains() in current_frame(), which is used before
 623   // code cache initialization if NativeMemoryTracking=detail is set.
 624   S390_ONLY(if (_heaps == nullptr) return false;)
 625   // It should be ok to call contains without holding a lock.
 626   FOR_ALL_HEAPS(heap) {
 627     if ((*heap)->contains(p)) {
 628       return true;
 629     }
 630   }
 631   return false;
 632 }
 633 
 634 bool CodeCache::contains(nmethod *nm) {
 635   return contains((void *)nm);
 636 }
 637 
 638 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 639 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 640 CodeBlob* CodeCache::find_blob(void* start) {
 641   // NMT can walk the stack before code cache is created
 642   if (_heaps != nullptr) {
 643     CodeHeap* heap = get_code_heap_containing(start);
 644     if (heap != nullptr) {
 645       return heap->find_blob(start);
 646     }
 647   }
 648   return nullptr;
 649 }
 650 
 651 nmethod* CodeCache::find_nmethod(void* start) {
 652   CodeBlob* cb = find_blob(start);
 653   assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
 654   return (nmethod*)cb;
 655 }
 656 
 657 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 658   assert_locked_or_safepoint(CodeCache_lock);
 659   FOR_ALL_HEAPS(heap) {
 660     FOR_ALL_BLOBS(cb, *heap) {
 661       f(cb);
 662     }
 663   }
 664 }
 665 
 666 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 667   assert_locked_or_safepoint(CodeCache_lock);
 668   NMethodIterator iter(NMethodIterator::all);
 669   while(iter.next()) {
 670     f(iter.method());
 671   }
 672 }
 673 
 674 void CodeCache::nmethods_do(NMethodClosure* cl) {
 675   assert_locked_or_safepoint(CodeCache_lock);
 676   NMethodIterator iter(NMethodIterator::all);
 677   while(iter.next()) {
 678     cl->do_nmethod(iter.method());
 679   }
 680 }
 681 
 682 void CodeCache::metadata_do(MetadataClosure* f) {
 683   assert_locked_or_safepoint(CodeCache_lock);
 684   NMethodIterator iter(NMethodIterator::all);
 685   while(iter.next()) {
 686     iter.method()->metadata_do(f);
 687   }
 688 }
 689 
 690 // Calculate the number of GCs after which an nmethod is expected to have been
 691 // used in order to not be classed as cold.
 692 void CodeCache::update_cold_gc_count() {
 693   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 694     // No aging
 695     return;
 696   }
 697 
 698   size_t last_used = _last_unloading_used;
 699   double last_time = _last_unloading_time;
 700 
 701   double time = os::elapsedTime();
 702 
 703   size_t free = unallocated_capacity();
 704   size_t max = max_capacity();
 705   size_t used = max - free;
 706   double gc_interval = time - last_time;
 707 
 708   _unloading_threshold_gc_requested = false;
 709   _last_unloading_time = time;
 710   _last_unloading_used = used;
 711 
 712   if (last_time == 0.0) {
 713     // The first GC doesn't have enough information to make good
 714     // decisions, so just keep everything afloat
 715     log_info(codecache)("Unknown code cache pressure; don't age code");
 716     return;
 717   }
 718 
 719   if (gc_interval <= 0.0 || last_used >= used) {
 720     // Dodge corner cases where there is no pressure or negative pressure
 721     // on the code cache. Just don't unload when this happens.
 722     _cold_gc_count = INT_MAX;
 723     log_info(codecache)("No code cache pressure; don't age code");
 724     return;
 725   }
 726 
 727   double allocation_rate = (used - last_used) / gc_interval;
 728 
 729   _unloading_allocation_rates.add(allocation_rate);
 730   _unloading_gc_intervals.add(gc_interval);
 731 
 732   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 733   if (free < aggressive_sweeping_free_threshold) {
 734     // We are already in the red zone; be very aggressive to avoid disaster
 735     // But not more aggressive than 2. This ensures that an nmethod must
 736     // have been unused at least between two GCs to be considered cold still.
 737     _cold_gc_count = 2;
 738     log_info(codecache)("Code cache critically low; use aggressive aging");
 739     return;
 740   }
 741 
 742   // The code cache has an expected time for cold nmethods to "time out"
 743   // when they have not been used. The time for nmethods to time out
 744   // depends on how long we expect we can keep allocating code until
 745   // aggressive sweeping starts, based on sampled allocation rates.
 746   double average_gc_interval = _unloading_gc_intervals.avg();
 747   double average_allocation_rate = _unloading_allocation_rates.avg();
 748   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 749   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 750 
 751   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 752   // that is that the _cold_gc_count will be added to an epoch number
 753   // and that addition must not overflow, or we can crash the VM.
 754   // But not more aggressive than 2. This ensures that an nmethod must
 755   // have been unused at least between two GCs to be considered cold still.
 756   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 757 
 758   double used_ratio = double(used) / double(max);
 759   double last_used_ratio = double(last_used) / double(max);
 760   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 761                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 762                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 763                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 764 
 765 }
 766 
 767 uint64_t CodeCache::cold_gc_count() {
 768   return _cold_gc_count;
 769 }
 770 
 771 void CodeCache::gc_on_allocation() {
 772   if (!is_init_completed()) {
 773     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 774     return;
 775   }
 776 
 777   size_t free = unallocated_capacity();
 778   size_t max = max_capacity();
 779   size_t used = max - free;
 780   double free_ratio = double(free) / double(max);
 781   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 782     // In case the GC is concurrent, we make sure only one thread requests the GC.
 783     if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 784       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 785       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 786     }
 787     return;
 788   }
 789 
 790   size_t last_used = _last_unloading_used;
 791   if (last_used >= used) {
 792     // No increase since last GC; no need to sweep yet
 793     return;
 794   }
 795   size_t allocated_since_last = used - last_used;
 796   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 797   double threshold = SweeperThreshold / 100.0;
 798   double used_ratio = double(used) / double(max);
 799   double last_used_ratio = double(last_used) / double(max);
 800   if (used_ratio > threshold) {
 801     // After threshold is reached, scale it by free_ratio so that more aggressive
 802     // GC is triggered as we approach code cache exhaustion
 803     threshold *= free_ratio;
 804   }
 805   // If code cache has been allocated without any GC at all, let's make sure
 806   // it is eventually invoked to avoid trouble.
 807   if (allocated_since_last_ratio > threshold) {
 808     // In case the GC is concurrent, we make sure only one thread requests the GC.
 809     if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 810       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 811                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 812       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 813     }
 814   }
 815 }
 816 
 817 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 818 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 819 //
 820 // Odd values mean that marking is in progress, and even values mean that no
 821 // marking is currently active.
 822 uint64_t CodeCache::_gc_epoch = 2;
 823 
 824 // How many GCs after an nmethod has not been used, do we consider it cold?
 825 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 826 
 827 double CodeCache::_last_unloading_time = 0.0;
 828 size_t CodeCache::_last_unloading_used = 0;
 829 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 830 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 831 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 832 
 833 uint64_t CodeCache::gc_epoch() {
 834   return _gc_epoch;
 835 }
 836 
 837 bool CodeCache::is_gc_marking_cycle_active() {
 838   // Odd means that marking is active
 839   return (_gc_epoch % 2) == 1;
 840 }
 841 
 842 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 843   if (is_gc_marking_cycle_active()) {
 844     return _gc_epoch - 2;
 845   } else {
 846     return _gc_epoch - 1;
 847   }
 848 }
 849 
 850 void CodeCache::on_gc_marking_cycle_start() {
 851   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 852   ++_gc_epoch;
 853 }
 854 
 855 // Once started the code cache marking cycle must only be finished after marking of
 856 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 857 // if they have frames in continuation StackChunks that were not yet visited.
 858 void CodeCache::on_gc_marking_cycle_finish() {
 859   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 860   ++_gc_epoch;
 861   update_cold_gc_count();
 862 }
 863 
 864 void CodeCache::arm_all_nmethods() {
 865   BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
 866 }
 867 
 868 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 869 void CodeCache::do_unloading(bool unloading_occurred) {
 870   assert_locked_or_safepoint(CodeCache_lock);
 871   NMethodIterator iter(NMethodIterator::all);
 872   while(iter.next()) {
 873     iter.method()->do_unloading(unloading_occurred);
 874   }
 875 }
 876 
 877 void CodeCache::verify_clean_inline_caches() {
 878 #ifdef ASSERT
 879   if (!VerifyInlineCaches) return;
 880   NMethodIterator iter(NMethodIterator::not_unloading);
 881   while(iter.next()) {
 882     nmethod* nm = iter.method();
 883     nm->verify_clean_inline_caches();
 884     nm->verify();
 885   }
 886 #endif
 887 }
 888 
 889 // Defer freeing of concurrently cleaned ExceptionCache entries until
 890 // after a global handshake operation.
 891 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 892   if (SafepointSynchronize::is_at_safepoint()) {
 893     delete entry;
 894   } else {
 895     for (;;) {
 896       ExceptionCache* purge_list_head = AtomicAccess::load(&_exception_cache_purge_list);
 897       entry->set_purge_list_next(purge_list_head);
 898       if (AtomicAccess::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 899         break;
 900       }
 901     }
 902   }
 903 }
 904 
 905 // Delete exception caches that have been concurrently unlinked,
 906 // followed by a global handshake operation.
 907 void CodeCache::purge_exception_caches() {
 908   ExceptionCache* curr = _exception_cache_purge_list;
 909   while (curr != nullptr) {
 910     ExceptionCache* next = curr->purge_list_next();
 911     delete curr;
 912     curr = next;
 913   }
 914   _exception_cache_purge_list = nullptr;
 915 }
 916 
 917 // Restart compiler if possible and required..
 918 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 919 
 920   // Try to start the compiler again if we freed any memory
 921   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 922     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 923     log_info(codecache)("Restarting compiler");
 924     EventJITRestart event;
 925     event.set_freedMemory(freed_memory);
 926     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 927     event.commit();
 928   }
 929 }
 930 
 931 uint8_t CodeCache::_unloading_cycle = 1;
 932 
 933 void CodeCache::increment_unloading_cycle() {
 934   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 935   // 0 is reserved for new methods.
 936   _unloading_cycle = (_unloading_cycle + 1) % 4;
 937   if (_unloading_cycle == 0) {
 938     _unloading_cycle = 1;
 939   }
 940 }
 941 
 942 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 943   : _is_unloading_behaviour(is_alive)
 944 {
 945   _saved_behaviour = IsUnloadingBehaviour::current();
 946   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 947   increment_unloading_cycle();
 948   DependencyContext::cleaning_start();
 949 }
 950 
 951 CodeCache::UnlinkingScope::~UnlinkingScope() {
 952   IsUnloadingBehaviour::set_current(_saved_behaviour);
 953   DependencyContext::cleaning_end();
 954 }
 955 
 956 void CodeCache::verify_oops() {
 957   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 958   VerifyOopClosure voc;
 959   NMethodIterator iter(NMethodIterator::not_unloading);
 960   while(iter.next()) {
 961     nmethod* nm = iter.method();
 962     nm->oops_do(&voc);
 963     nm->verify_oop_relocations();
 964   }
 965 }
 966 
 967 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 968   CodeHeap* heap = get_code_heap(code_blob_type);
 969   return (heap != nullptr) ? heap->blob_count() : 0;
 970 }
 971 
 972 int CodeCache::blob_count() {
 973   int count = 0;
 974   FOR_ALL_HEAPS(heap) {
 975     count += (*heap)->blob_count();
 976   }
 977   return count;
 978 }
 979 
 980 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
 981   CodeHeap* heap = get_code_heap(code_blob_type);
 982   return (heap != nullptr) ? heap->nmethod_count() : 0;
 983 }
 984 
 985 int CodeCache::nmethod_count() {
 986   int count = 0;
 987   for (CodeHeap* heap : *_nmethod_heaps) {
 988     count += heap->nmethod_count();
 989   }
 990   return count;
 991 }
 992 
 993 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
 994   CodeHeap* heap = get_code_heap(code_blob_type);
 995   return (heap != nullptr) ? heap->adapter_count() : 0;
 996 }
 997 
 998 int CodeCache::adapter_count() {
 999   int count = 0;
1000   FOR_ALL_HEAPS(heap) {
1001     count += (*heap)->adapter_count();
1002   }
1003   return count;
1004 }
1005 
1006 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1007   CodeHeap* heap = get_code_heap(code_blob_type);
1008   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1009 }
1010 
1011 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1012   CodeHeap* heap = get_code_heap(code_blob_type);
1013   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1014 }
1015 
1016 size_t CodeCache::capacity() {
1017   size_t cap = 0;
1018   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1019     cap += (*heap)->capacity();
1020   }
1021   return cap;
1022 }
1023 
1024 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1025   CodeHeap* heap = get_code_heap(code_blob_type);
1026   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1027 }
1028 
1029 size_t CodeCache::unallocated_capacity() {
1030   size_t unallocated_cap = 0;
1031   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1032     unallocated_cap += (*heap)->unallocated_capacity();
1033   }
1034   return unallocated_cap;
1035 }
1036 
1037 size_t CodeCache::max_capacity() {
1038   size_t max_cap = 0;
1039   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1040     max_cap += (*heap)->max_capacity();
1041   }
1042   return max_cap;
1043 }
1044 
1045 bool CodeCache::is_non_nmethod(address addr) {
1046   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1047   return blob->contains(addr);
1048 }
1049 
1050 size_t CodeCache::max_distance_to_non_nmethod() {
1051   if (!SegmentedCodeCache) {
1052     return ReservedCodeCacheSize;
1053   } else {
1054     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1055     // the max distance is minimized by placing the NonNMethod segment
1056     // in between MethodProfiled and MethodNonProfiled segments
1057     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1058     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1059     return dist1 > dist2 ? dist1 : dist2;
1060   }
1061 }
1062 
1063 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1064 // is free, reverse_free_ratio() returns 4.
1065 // Since code heap for each type of code blobs falls forward to the next
1066 // type of code heap, return the reverse free ratio for the entire
1067 // code cache.
1068 double CodeCache::reverse_free_ratio() {
1069   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1070   double max = (double)max_capacity();
1071   double result = max / unallocated;
1072   assert (max >= unallocated, "Must be");
1073   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1074   return result;
1075 }
1076 
1077 size_t CodeCache::bytes_allocated_in_freelists() {
1078   size_t allocated_bytes = 0;
1079   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1080     allocated_bytes += (*heap)->allocated_in_freelist();
1081   }
1082   return allocated_bytes;
1083 }
1084 
1085 int CodeCache::allocated_segments() {
1086   int number_of_segments = 0;
1087   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1088     number_of_segments += (*heap)->allocated_segments();
1089   }
1090   return number_of_segments;
1091 }
1092 
1093 size_t CodeCache::freelists_length() {
1094   size_t length = 0;
1095   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1096     length += (*heap)->freelist_length();
1097   }
1098   return length;
1099 }
1100 
1101 void icache_init();
1102 
1103 void CodeCache::initialize() {
1104   assert(CodeCacheSegmentSize >= (size_t)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1105 #ifdef COMPILER2
1106   assert(CodeCacheSegmentSize >= (size_t)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1107 #endif
1108   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1109   // This was originally just a check of the alignment, causing failure, instead, round
1110   // the code cache to the page size.  In particular, Solaris is moving to a larger
1111   // default page size.
1112   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1113 
1114   if (SegmentedCodeCache) {
1115     // Use multiple code heaps
1116     initialize_heaps();
1117   } else {
1118     // Use a single code heap
1119     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1120     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1121     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1122 
1123     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1124     // users want to use the largest available page.
1125     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1126     ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1127     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1128     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1129     add_heap(rs, "CodeCache", CodeBlobType::All);
1130   }
1131 
1132   // Initialize ICache flush mechanism
1133   // This service is needed for os::register_code_area
1134   icache_init();
1135 
1136   // Give OS a chance to register generated code area.
1137   // This is used on Windows 64 bit platforms to register
1138   // Structured Exception Handlers for our generated code.
1139   os::register_code_area((char*)low_bound(), (char*)high_bound());
1140 }
1141 
1142 void codeCache_init() {
1143   CodeCache::initialize();
1144 }
1145 
1146 //------------------------------------------------------------------------------------------------
1147 
1148 bool CodeCache::has_nmethods_with_dependencies() {
1149   return AtomicAccess::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1150 }
1151 
1152 void CodeCache::clear_inline_caches() {
1153   assert_locked_or_safepoint(CodeCache_lock);
1154   NMethodIterator iter(NMethodIterator::not_unloading);
1155   while(iter.next()) {
1156     iter.method()->clear_inline_caches();
1157   }
1158 }
1159 
1160 // Only used by whitebox API
1161 void CodeCache::cleanup_inline_caches_whitebox() {
1162   assert_locked_or_safepoint(CodeCache_lock);
1163   NMethodIterator iter(NMethodIterator::not_unloading);
1164   while(iter.next()) {
1165     iter.method()->cleanup_inline_caches_whitebox();
1166   }
1167 }
1168 
1169 // Keeps track of time spent for checking dependencies
1170 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1171 
1172 #ifndef PRODUCT
1173 // Check if any of live methods dependencies have been invalidated.
1174 // (this is expensive!)
1175 static void check_live_nmethods_dependencies(DepChange& changes) {
1176   // Checked dependencies are allocated into this ResourceMark
1177   ResourceMark rm;
1178 
1179   // Turn off dependency tracing while actually testing dependencies.
1180   FlagSetting fs(Dependencies::_verify_in_progress, true);
1181 
1182   typedef HashTable<DependencySignature, int, 11027,
1183                             AnyObj::RESOURCE_AREA, mtInternal,
1184                             &DependencySignature::hash,
1185                             &DependencySignature::equals> DepTable;
1186 
1187   DepTable* table = new DepTable();
1188 
1189   // Iterate over live nmethods and check dependencies of all nmethods that are not
1190   // marked for deoptimization. A particular dependency is only checked once.
1191   NMethodIterator iter(NMethodIterator::not_unloading);
1192   while(iter.next()) {
1193     nmethod* nm = iter.method();
1194     // Only notify for live nmethods
1195     if (!nm->is_marked_for_deoptimization()) {
1196       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1197         // Construct abstraction of a dependency.
1198         DependencySignature* current_sig = new DependencySignature(deps);
1199 
1200         // Determine if dependency is already checked. table->put(...) returns
1201         // 'true' if the dependency is added (i.e., was not in the hashtable).
1202         if (table->put(*current_sig, 1)) {
1203           if (deps.check_dependency() != nullptr) {
1204             // Dependency checking failed. Print out information about the failed
1205             // dependency and finally fail with an assert. We can fail here, since
1206             // dependency checking is never done in a product build.
1207             tty->print_cr("Failed dependency:");
1208             changes.print();
1209             nm->print();
1210             nm->print_dependencies_on(tty);
1211             assert(false, "Should have been marked for deoptimization");
1212           }
1213         }
1214       }
1215     }
1216   }
1217 }
1218 #endif
1219 
1220 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1221   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1222 
1223   // search the hierarchy looking for nmethods which are affected by the loading of this class
1224 
1225   // then search the interfaces this class implements looking for nmethods
1226   // which might be dependent of the fact that an interface only had one
1227   // implementor.
1228   // nmethod::check_all_dependencies works only correctly, if no safepoint
1229   // can happen
1230   NoSafepointVerifier nsv;
1231   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1232     InstanceKlass* d = str.klass();
1233     d->mark_dependent_nmethods(deopt_scope, changes);
1234   }
1235 
1236 #ifndef PRODUCT
1237   if (VerifyDependencies) {
1238     // Object pointers are used as unique identifiers for dependency arguments. This
1239     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1240     dependentCheckTime.start();
1241     check_live_nmethods_dependencies(changes);
1242     dependentCheckTime.stop();
1243   }
1244 #endif
1245 }
1246 
1247 #if INCLUDE_JVMTI
1248 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1249 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1250 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1251 
1252 static void add_to_old_table(nmethod* c) {
1253   if (old_nmethod_table == nullptr) {
1254     old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1255   }
1256   old_nmethod_table->push(c);
1257 }
1258 
1259 static void reset_old_method_table() {
1260   if (old_nmethod_table != nullptr) {
1261     delete old_nmethod_table;
1262     old_nmethod_table = nullptr;
1263   }
1264 }
1265 
1266 // Remove this method when flushed.
1267 void CodeCache::unregister_old_nmethod(nmethod* c) {
1268   assert_lock_strong(CodeCache_lock);
1269   if (old_nmethod_table != nullptr) {
1270     int index = old_nmethod_table->find(c);
1271     if (index != -1) {
1272       old_nmethod_table->delete_at(index);
1273     }
1274   }
1275 }
1276 
1277 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1278   // Walk old method table and mark those on stack.
1279   int length = 0;
1280   if (old_nmethod_table != nullptr) {
1281     length = old_nmethod_table->length();
1282     for (int i = 0; i < length; i++) {
1283       // Walk all methods saved on the last pass.  Concurrent class unloading may
1284       // also be looking at this method's metadata, so don't delete it yet if
1285       // it is marked as unloaded.
1286       old_nmethod_table->at(i)->metadata_do(f);
1287     }
1288   }
1289   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1290 }
1291 
1292 // Walk compiled methods and mark dependent methods for deoptimization.
1293 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1294   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1295   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1296   // So delete old method table and create a new one.
1297   reset_old_method_table();
1298 
1299   NMethodIterator iter(NMethodIterator::all);
1300   while(iter.next()) {
1301     nmethod* nm = iter.method();
1302     // Walk all alive nmethods to check for old Methods.
1303     // This includes methods whose inline caches point to old methods, so
1304     // inline cache clearing is unnecessary.
1305     if (nm->has_evol_metadata()) {
1306       deopt_scope->mark(nm);
1307       add_to_old_table(nm);
1308     }
1309   }
1310 }
1311 
1312 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1313   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1314   NMethodIterator iter(NMethodIterator::all);
1315   while(iter.next()) {
1316     nmethod* nm = iter.method();
1317     if (!nm->method()->is_method_handle_intrinsic()) {
1318       if (nm->can_be_deoptimized()) {
1319         deopt_scope->mark(nm);
1320       }
1321       if (nm->has_evol_metadata()) {
1322         add_to_old_table(nm);
1323       }
1324     }
1325   }
1326 }
1327 
1328 #endif // INCLUDE_JVMTI
1329 
1330 // Mark methods for deopt (if safe or possible).
1331 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1332   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1333   NMethodIterator iter(NMethodIterator::not_unloading);
1334   while(iter.next()) {
1335     nmethod* nm = iter.method();
1336     if (!nm->is_native_method()) {
1337       deopt_scope->mark(nm);
1338     }
1339   }
1340 }
1341 
1342 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1343   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1344 
1345   NMethodIterator iter(NMethodIterator::not_unloading);
1346   while(iter.next()) {
1347     nmethod* nm = iter.method();
1348     if (nm->is_dependent_on_method(dependee)) {
1349       deopt_scope->mark(nm);
1350     }
1351   }
1352 }
1353 
1354 void CodeCache::make_marked_nmethods_deoptimized() {
1355   RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1356   while(iter.next()) {
1357     nmethod* nm = iter.method();
1358     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1359       nm->make_not_entrant(nmethod::InvalidationReason::MARKED_FOR_DEOPTIMIZATION);
1360       nm->make_deoptimized();
1361     }
1362   }
1363 }
1364 
1365 // Marks compiled methods dependent on dependee.
1366 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1367   assert_lock_strong(Compile_lock);
1368 
1369   if (!has_nmethods_with_dependencies()) {
1370     return;
1371   }
1372 
1373   if (dependee->is_linked()) {
1374     // Class initialization state change.
1375     KlassInitDepChange changes(dependee);
1376     mark_for_deoptimization(deopt_scope, changes);
1377   } else {
1378     // New class is loaded.
1379     NewKlassDepChange changes(dependee);
1380     mark_for_deoptimization(deopt_scope, changes);
1381   }
1382 }
1383 
1384 // Marks compiled methods dependent on dependee
1385 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1386   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1387 
1388   DeoptimizationScope deopt_scope;
1389   // Compute the dependent nmethods
1390   mark_for_deoptimization(&deopt_scope, m_h());
1391   deopt_scope.deoptimize_marked();
1392 }
1393 
1394 void CodeCache::verify() {
1395   assert_locked_or_safepoint(CodeCache_lock);
1396   FOR_ALL_HEAPS(heap) {
1397     (*heap)->verify();
1398     FOR_ALL_BLOBS(cb, *heap) {
1399       cb->verify();
1400     }
1401   }
1402 }
1403 
1404 // A CodeHeap is full. Print out warning and report event.
1405 PRAGMA_DIAG_PUSH
1406 PRAGMA_FORMAT_NONLITERAL_IGNORED
1407 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1408   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1409   CodeHeap* heap = get_code_heap(code_blob_type);
1410   assert(heap != nullptr, "heap is null");
1411 
1412   int full_count = heap->report_full();
1413 
1414   if ((full_count == 1) || print) {
1415     // Not yet reported for this heap, report
1416     if (SegmentedCodeCache) {
1417       ResourceMark rm;
1418       stringStream msg1_stream, msg2_stream;
1419       msg1_stream.print("%s is full. Compiler has been disabled.",
1420                         get_code_heap_name(code_blob_type));
1421       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1422                  get_code_heap_flag_name(code_blob_type));
1423       const char *msg1 = msg1_stream.as_string();
1424       const char *msg2 = msg2_stream.as_string();
1425 
1426       log_warning(codecache)("%s", msg1);
1427       log_warning(codecache)("%s", msg2);
1428       warning("%s", msg1);
1429       warning("%s", msg2);
1430     } else {
1431       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1432       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1433 
1434       log_warning(codecache)("%s", msg1);
1435       log_warning(codecache)("%s", msg2);
1436       warning("%s", msg1);
1437       warning("%s", msg2);
1438     }
1439     stringStream s;
1440     // Dump code cache into a buffer before locking the tty.
1441     {
1442       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1443       print_summary(&s);
1444     }
1445     {
1446       ttyLocker ttyl;
1447       tty->print("%s", s.freeze());
1448     }
1449 
1450     if (full_count == 1) {
1451       if (PrintCodeHeapAnalytics) {
1452         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1453       }
1454     }
1455   }
1456 
1457   EventCodeCacheFull event;
1458   if (event.should_commit()) {
1459     event.set_codeBlobType((u1)code_blob_type);
1460     event.set_startAddress((u8)heap->low_boundary());
1461     event.set_commitedTopAddress((u8)heap->high());
1462     event.set_reservedTopAddress((u8)heap->high_boundary());
1463     event.set_entryCount(heap->blob_count());
1464     event.set_methodCount(heap->nmethod_count());
1465     event.set_adaptorCount(heap->adapter_count());
1466     event.set_unallocatedCapacity(heap->unallocated_capacity());
1467     event.set_fullCount(heap->full_count());
1468     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1469     event.commit();
1470   }
1471 }
1472 PRAGMA_DIAG_POP
1473 
1474 void CodeCache::print_memory_overhead() {
1475   size_t wasted_bytes = 0;
1476   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1477       CodeHeap* curr_heap = *heap;
1478       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1479         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1480         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1481       }
1482   }
1483   // Print bytes that are allocated in the freelist
1484   ttyLocker ttl;
1485   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1486   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1487   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1488   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1489 }
1490 
1491 //------------------------------------------------------------------------------------------------
1492 // Non-product version
1493 
1494 #ifndef PRODUCT
1495 
1496 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1497   if (PrintCodeCache2) {  // Need to add a new flag
1498     ResourceMark rm;
1499     if (size == 0) {
1500       int s = cb->size();
1501       assert(s >= 0, "CodeBlob size is negative: %d", s);
1502       size = (uint) s;
1503     }
1504     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1505   }
1506 }
1507 
1508 void CodeCache::print_internals() {
1509   int nmethodCount = 0;
1510   int runtimeStubCount = 0;
1511   int upcallStubCount = 0;
1512   int adapterCount = 0;
1513   int mhAdapterCount = 0;
1514   int vtableBlobCount = 0;
1515   int deoptimizationStubCount = 0;
1516   int uncommonTrapStubCount = 0;
1517   int exceptionStubCount = 0;
1518   int safepointStubCount = 0;
1519   int bufferBlobCount = 0;
1520   int total = 0;
1521   int nmethodNotEntrant = 0;
1522   int nmethodJava = 0;
1523   int nmethodNative = 0;
1524   int max_nm_size = 0;
1525   ResourceMark rm;
1526 
1527   int i = 0;
1528   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1529     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1530       tty->print_cr("-- %s --", (*heap)->name());
1531     }
1532     FOR_ALL_BLOBS(cb, *heap) {
1533       total++;
1534       if (cb->is_nmethod()) {
1535         nmethod* nm = (nmethod*)cb;
1536 
1537         if (Verbose && nm->method() != nullptr) {
1538           ResourceMark rm;
1539           char *method_name = nm->method()->name_and_sig_as_C_string();
1540           tty->print("%s", method_name);
1541           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1542         }
1543 
1544         nmethodCount++;
1545 
1546         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1547         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1548 
1549         if(nm->method() != nullptr && nm->is_java_method()) {
1550           nmethodJava++;
1551           max_nm_size = MAX2(max_nm_size, nm->size());
1552         }
1553       } else if (cb->is_runtime_stub()) {
1554         runtimeStubCount++;
1555       } else if (cb->is_upcall_stub()) {
1556         upcallStubCount++;
1557       } else if (cb->is_deoptimization_stub()) {
1558         deoptimizationStubCount++;
1559       } else if (cb->is_uncommon_trap_stub()) {
1560         uncommonTrapStubCount++;
1561       } else if (cb->is_exception_stub()) {
1562         exceptionStubCount++;
1563       } else if (cb->is_safepoint_stub()) {
1564         safepointStubCount++;
1565       } else if (cb->is_adapter_blob()) {
1566         adapterCount++;
1567       } else if (cb->is_method_handles_adapter_blob()) {
1568         mhAdapterCount++;
1569       } else if (cb->is_vtable_blob()) {
1570         vtableBlobCount++;
1571       } else if (cb->is_buffer_blob()) {
1572         bufferBlobCount++;
1573       }
1574     }
1575   }
1576 
1577   int bucketSize = 512;
1578   int bucketLimit = max_nm_size / bucketSize + 1;
1579   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1580   memset(buckets, 0, sizeof(int) * bucketLimit);
1581 
1582   NMethodIterator iter(NMethodIterator::all);
1583   while(iter.next()) {
1584     nmethod* nm = iter.method();
1585     if(nm->method() != nullptr && nm->is_java_method()) {
1586       buckets[nm->size() / bucketSize]++;
1587     }
1588   }
1589 
1590   tty->print_cr("Code Cache Entries (total of %d)",total);
1591   tty->print_cr("-------------------------------------------------");
1592   tty->print_cr("nmethods: %d",nmethodCount);
1593   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1594   tty->print_cr("\tjava: %d",nmethodJava);
1595   tty->print_cr("\tnative: %d",nmethodNative);
1596   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1597   tty->print_cr("upcall_stubs: %d",upcallStubCount);
1598   tty->print_cr("adapters: %d",adapterCount);
1599   tty->print_cr("MH adapters: %d",mhAdapterCount);
1600   tty->print_cr("VTables: %d",vtableBlobCount);
1601   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1602   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1603   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1604   tty->print_cr("exception_stubs: %d",exceptionStubCount);
1605   tty->print_cr("safepoint_stubs: %d",safepointStubCount);
1606   tty->print_cr("\nnmethod size distribution");
1607   tty->print_cr("-------------------------------------------------");
1608 
1609   for(int i=0; i<bucketLimit; i++) {
1610     if(buckets[i] != 0) {
1611       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1612       tty->fill_to(40);
1613       tty->print_cr("%d",buckets[i]);
1614     }
1615   }
1616 
1617   FREE_C_HEAP_ARRAY(int, buckets);
1618   print_memory_overhead();
1619 }
1620 
1621 #endif // !PRODUCT
1622 
1623 void CodeCache::print() {
1624   print_summary(tty);
1625 
1626 #ifndef PRODUCT
1627   if (!Verbose) return;
1628 
1629   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1630   CodeBlob_sizes runtimeStub;
1631   CodeBlob_sizes upcallStub;
1632   CodeBlob_sizes uncommonTrapStub;
1633   CodeBlob_sizes deoptimizationStub;
1634   CodeBlob_sizes exceptionStub;
1635   CodeBlob_sizes safepointStub;
1636   CodeBlob_sizes adapter;
1637   CodeBlob_sizes mhAdapter;
1638   CodeBlob_sizes vtableBlob;
1639   CodeBlob_sizes bufferBlob;
1640   CodeBlob_sizes other;
1641 
1642   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1643     FOR_ALL_BLOBS(cb, *heap) {
1644       if (cb->is_nmethod()) {
1645         const int level = cb->as_nmethod()->comp_level();
1646         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1647         live[level].add(cb);
1648       } else if (cb->is_runtime_stub()) {
1649         runtimeStub.add(cb);
1650       } else if (cb->is_upcall_stub()) {
1651         upcallStub.add(cb);
1652       } else if (cb->is_deoptimization_stub()) {
1653         deoptimizationStub.add(cb);
1654       } else if (cb->is_uncommon_trap_stub()) {
1655         uncommonTrapStub.add(cb);
1656       } else if (cb->is_exception_stub()) {
1657         exceptionStub.add(cb);
1658       } else if (cb->is_safepoint_stub()) {
1659         safepointStub.add(cb);
1660       } else if (cb->is_adapter_blob()) {
1661         adapter.add(cb);
1662       } else if (cb->is_method_handles_adapter_blob()) {
1663         mhAdapter.add(cb);
1664       } else if (cb->is_vtable_blob()) {
1665         vtableBlob.add(cb);
1666       } else if (cb->is_buffer_blob()) {
1667         bufferBlob.add(cb);
1668       } else {
1669         other.add(cb);
1670       }
1671     }
1672   }
1673 
1674   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1675 
1676   tty->print_cr("nmethod blobs per compilation level:");
1677   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1678     const char *level_name;
1679     switch (i) {
1680     case CompLevel_none:              level_name = "none";              break;
1681     case CompLevel_simple:            level_name = "simple";            break;
1682     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1683     case CompLevel_full_profile:      level_name = "full profile";      break;
1684     case CompLevel_full_optimization: level_name = "full optimization"; break;
1685     default: assert(false, "invalid compilation level");
1686     }
1687     tty->print_cr("%s:", level_name);
1688     live[i].print("live");
1689   }
1690 
1691   struct {
1692     const char* name;
1693     const CodeBlob_sizes* sizes;
1694   } non_nmethod_blobs[] = {
1695     { "runtime",        &runtimeStub },
1696     { "upcall",         &upcallStub },
1697     { "uncommon trap",  &uncommonTrapStub },
1698     { "deoptimization", &deoptimizationStub },
1699     { "exception",      &exceptionStub },
1700     { "safepoint",      &safepointStub },
1701     { "adapter",        &adapter },
1702     { "mh_adapter",     &mhAdapter },
1703     { "vtable",         &vtableBlob },
1704     { "buffer blob",    &bufferBlob },
1705     { "other",          &other },
1706   };
1707   tty->print_cr("Non-nmethod blobs:");
1708   for (auto& blob: non_nmethod_blobs) {
1709     blob.sizes->print(blob.name);
1710   }
1711 
1712   if (WizardMode) {
1713      // print the oop_map usage
1714     int code_size = 0;
1715     int number_of_blobs = 0;
1716     int number_of_oop_maps = 0;
1717     int map_size = 0;
1718     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1719       FOR_ALL_BLOBS(cb, *heap) {
1720         number_of_blobs++;
1721         code_size += cb->code_size();
1722         ImmutableOopMapSet* set = cb->oop_maps();
1723         if (set != nullptr) {
1724           number_of_oop_maps += set->count();
1725           map_size           += set->nr_of_bytes();
1726         }
1727       }
1728     }
1729     tty->print_cr("OopMaps");
1730     tty->print_cr("  #blobs    = %d", number_of_blobs);
1731     tty->print_cr("  code size = %d", code_size);
1732     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1733     tty->print_cr("  map size  = %d", map_size);
1734   }
1735 
1736 #endif // !PRODUCT
1737 }
1738 
1739 void CodeCache::print_summary(outputStream* st, bool detailed) {
1740   int full_count = 0;
1741   julong total_used = 0;
1742   julong total_max_used = 0;
1743   julong total_free = 0;
1744   julong total_size = 0;
1745   FOR_ALL_HEAPS(heap_iterator) {
1746     CodeHeap* heap = (*heap_iterator);
1747     size_t total = (heap->high_boundary() - heap->low_boundary());
1748     if (_heaps->length() >= 1) {
1749       st->print("%s:", heap->name());
1750     } else {
1751       st->print("CodeCache:");
1752     }
1753     size_t size = total/K;
1754     size_t used = (total - heap->unallocated_capacity())/K;
1755     size_t max_used = heap->max_allocated_capacity()/K;
1756     size_t free = heap->unallocated_capacity()/K;
1757     total_size += size;
1758     total_used += used;
1759     total_max_used += max_used;
1760     total_free += free;
1761     st->print_cr(" size=%zuKb used=%zu"
1762                  "Kb max_used=%zuKb free=%zuKb",
1763                  size, used, max_used, free);
1764 
1765     if (detailed) {
1766       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1767                    p2i(heap->low_boundary()),
1768                    p2i(heap->high()),
1769                    p2i(heap->high_boundary()));
1770 
1771       full_count += get_codemem_full_count(heap->code_blob_type());
1772     }
1773   }
1774 
1775   if (detailed) {
1776     if (SegmentedCodeCache) {
1777       st->print("CodeCache:");
1778       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1779                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1780                    total_size, total_used, total_max_used, total_free);
1781     }
1782     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1783                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1784                  blob_count(), nmethod_count(), adapter_count(), full_count);
1785     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1786                  CompileBroker::should_compile_new_jobs() ?
1787                  "enabled" : Arguments::mode() == Arguments::_int ?
1788                  "disabled (interpreter mode)" :
1789                  "disabled (not enough contiguous free space left)",
1790                  CompileBroker::get_total_compiler_stopped_count(),
1791                  CompileBroker::get_total_compiler_restarted_count());
1792   }
1793 }
1794 
1795 void CodeCache::print_codelist(outputStream* st) {
1796   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1797 
1798   NMethodIterator iter(NMethodIterator::not_unloading);
1799   while (iter.next()) {
1800     nmethod* nm = iter.method();
1801     ResourceMark rm;
1802     char* method_name = nm->method()->name_and_sig_as_C_string();
1803     const char* jvmci_name = nullptr;
1804 #if INCLUDE_JVMCI
1805     jvmci_name = nm->jvmci_name();
1806 #endif
1807     st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1808                  nm->compile_id(), nm->comp_level(), nm->get_state(),
1809                  method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1810                  (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1811   }
1812 }
1813 
1814 void CodeCache::print_layout(outputStream* st) {
1815   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1816   ResourceMark rm;
1817   print_summary(st, true);
1818 }
1819 
1820 void CodeCache::log_state(outputStream* st) {
1821   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1822             " adapters='" UINT32_FORMAT "' free_code_cache='%zu'",
1823             blob_count(), nmethod_count(), adapter_count(),
1824             unallocated_capacity());
1825 }
1826 
1827 #ifdef LINUX
1828 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1829   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1830   char fname[JVM_MAXPATHLEN];
1831   if (filename == nullptr) {
1832     // Invocation outside of jcmd requires pid substitution.
1833     if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1834                                     strlen(DEFAULT_PERFMAP_FILENAME),
1835                                     fname, JVM_MAXPATHLEN)) {
1836       st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1837       return;
1838     }
1839     filename = fname;
1840   }
1841   fileStream fs(filename, "w");
1842   if (!fs.is_open()) {
1843     st->print_cr("Warning: Failed to create %s for perf map", filename);
1844     return;
1845   }
1846 
1847   AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1848   while (iter.next()) {
1849     CodeBlob *cb = iter.method();
1850     ResourceMark rm;
1851     const char* method_name = nullptr;
1852     const char* jvmci_name = nullptr;
1853     if (cb->is_nmethod()) {
1854       nmethod* nm = cb->as_nmethod();
1855       method_name = nm->method()->external_name();
1856 #if INCLUDE_JVMCI
1857       jvmci_name = nm->jvmci_name();
1858 #endif
1859     } else {
1860       method_name = cb->name();
1861     }
1862     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1863                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1864                 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1865   }
1866 }
1867 #endif // LINUX
1868 
1869 //---<  BEGIN  >--- CodeHeap State Analytics.
1870 
1871 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1872   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1873     CodeHeapState::aggregate(out, (*heap), granularity);
1874   }
1875 }
1876 
1877 void CodeCache::discard(outputStream *out) {
1878   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1879     CodeHeapState::discard(out, (*heap));
1880   }
1881 }
1882 
1883 void CodeCache::print_usedSpace(outputStream *out) {
1884   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1885     CodeHeapState::print_usedSpace(out, (*heap));
1886   }
1887 }
1888 
1889 void CodeCache::print_freeSpace(outputStream *out) {
1890   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1891     CodeHeapState::print_freeSpace(out, (*heap));
1892   }
1893 }
1894 
1895 void CodeCache::print_count(outputStream *out) {
1896   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1897     CodeHeapState::print_count(out, (*heap));
1898   }
1899 }
1900 
1901 void CodeCache::print_space(outputStream *out) {
1902   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1903     CodeHeapState::print_space(out, (*heap));
1904   }
1905 }
1906 
1907 void CodeCache::print_age(outputStream *out) {
1908   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1909     CodeHeapState::print_age(out, (*heap));
1910   }
1911 }
1912 
1913 void CodeCache::print_names(outputStream *out) {
1914   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1915     CodeHeapState::print_names(out, (*heap));
1916   }
1917 }
1918 //---<  END  >--- CodeHeap State Analytics.