1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotCacheAccess.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/barrierSetNMethod.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "jvm_io.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/memoryReserver.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/method.inline.hpp"
  51 #include "oops/objArrayOop.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "oops/verifyOopClosure.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomicAccess.hpp"
  56 #include "runtime/deoptimization.hpp"
  57 #include "runtime/globals_extension.hpp"
  58 #include "runtime/handles.inline.hpp"
  59 #include "runtime/icache.hpp"
  60 #include "runtime/init.hpp"
  61 #include "runtime/java.hpp"
  62 #include "runtime/mutexLocker.hpp"
  63 #include "runtime/os.inline.hpp"
  64 #include "runtime/safepointVerifiers.hpp"
  65 #include "runtime/vmThread.hpp"
  66 #include "sanitizers/leak.hpp"
  67 #include "services/memoryService.hpp"
  68 #include "utilities/align.hpp"
  69 #include "utilities/vmError.hpp"
  70 #include "utilities/xmlstream.hpp"
  71 #ifdef COMPILER1
  72 #include "c1/c1_Compilation.hpp"
  73 #include "c1/c1_Compiler.hpp"
  74 #endif
  75 #ifdef COMPILER2
  76 #include "opto/c2compiler.hpp"
  77 #include "opto/compile.hpp"
  78 #include "opto/node.hpp"
  79 #endif
  80 
  81 // Helper class for printing in CodeCache
  82 class CodeBlob_sizes {
  83  private:
  84   int count;
  85   int total_size;
  86   int header_size;
  87   int code_size;
  88   int stub_size;
  89   int relocation_size;
  90   int scopes_oop_size;
  91   int scopes_metadata_size;
  92   int scopes_data_size;
  93   int scopes_pcs_size;
  94 
  95  public:
  96   CodeBlob_sizes() {
  97     count            = 0;
  98     total_size       = 0;
  99     header_size      = 0;
 100     code_size        = 0;
 101     stub_size        = 0;
 102     relocation_size  = 0;
 103     scopes_oop_size  = 0;
 104     scopes_metadata_size  = 0;
 105     scopes_data_size = 0;
 106     scopes_pcs_size  = 0;
 107   }
 108 
 109   int total() const                              { return total_size; }
 110   bool is_empty() const                          { return count == 0; }
 111 
 112   void print(const char* title) const {
 113     if (is_empty()) {
 114       tty->print_cr(" #%d %s = %dK",
 115                     count,
 116                     title,
 117                     total()                 / (int)K);
 118     } else {
 119       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 120                     count,
 121                     title,
 122                     total()                 / (int)K,
 123                     header_size             / (int)K,
 124                     header_size             * 100 / total_size,
 125                     relocation_size         / (int)K,
 126                     relocation_size         * 100 / total_size,
 127                     code_size               / (int)K,
 128                     code_size               * 100 / total_size,
 129                     stub_size               / (int)K,
 130                     stub_size               * 100 / total_size,
 131                     scopes_oop_size         / (int)K,
 132                     scopes_oop_size         * 100 / total_size,
 133                     scopes_metadata_size    / (int)K,
 134                     scopes_metadata_size    * 100 / total_size,
 135                     scopes_data_size        / (int)K,
 136                     scopes_data_size        * 100 / total_size,
 137                     scopes_pcs_size         / (int)K,
 138                     scopes_pcs_size         * 100 / total_size);
 139     }
 140   }
 141 
 142   void add(CodeBlob* cb) {
 143     count++;
 144     total_size       += cb->size();
 145     header_size      += cb->header_size();
 146     relocation_size  += cb->relocation_size();
 147     if (cb->is_nmethod()) {
 148       nmethod* nm = cb->as_nmethod_or_null();
 149       code_size        += nm->insts_size();
 150       stub_size        += nm->stub_size();
 151 
 152       scopes_oop_size  += nm->oops_size();
 153       scopes_metadata_size  += nm->metadata_size();
 154       scopes_data_size += nm->scopes_data_size();
 155       scopes_pcs_size  += nm->scopes_pcs_size();
 156     } else {
 157       code_size        += cb->code_size();
 158     }
 159   }
 160 };
 161 
 162 // Iterate over all CodeHeaps
 163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 165 
 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 168 
 169 address CodeCache::_low_bound = nullptr;
 170 address CodeCache::_high_bound = nullptr;
 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 173 
 174 static ReservedSpace _cds_code_space;
 175 
 176 // Initialize arrays of CodeHeap subsets
 177 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 178 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 
 181 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 182   if (size < required_size) {
 183     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 184                          codeheap, size/K, required_size/K);
 185     err_msg title("Not enough space in %s to run VM", codeheap);
 186     err_msg message("%zuK < %zuK", size/K, required_size/K);
 187     vm_exit_during_initialization(title, message);
 188   }
 189 }
 190 
 191 struct CodeHeapInfo {
 192   size_t size;
 193   bool set;
 194   bool enabled;
 195 };
 196 
 197 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 198   assert(!heap->set, "sanity");
 199   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 200 }
 201 
 202 void CodeCache::initialize_heaps() {
 203   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 204   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 205   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 206 
 207   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 208   const size_t ps             = page_size(false, 8);
 209   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 210   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 211   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 212 
 213   // Prerequisites
 214   if (!heap_available(CodeBlobType::MethodProfiled)) {
 215     // For compatibility reasons, disabled tiered compilation overrides
 216     // segment size even if it is set explicitly.
 217     non_profiled.size += profiled.size;
 218     // Profiled code heap is not available, forcibly set size to 0
 219     profiled.size = 0;
 220     profiled.set = true;
 221     profiled.enabled = false;
 222   }
 223 
 224   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 225 
 226   size_t compiler_buffer_size = 0;
 227   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 228   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 229 
 230   if (!non_nmethod.set) {
 231     non_nmethod.size += compiler_buffer_size;
 232   }
 233 
 234   if (!profiled.set && !non_profiled.set) {
 235     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 236                                         (cache_size - non_nmethod.size) / 2 : min_size;
 237   }
 238 
 239   if (profiled.set && !non_profiled.set) {
 240     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 241   }
 242 
 243   if (!profiled.set && non_profiled.set) {
 244     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 245   }
 246 
 247   // Compatibility.
 248   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 249   if (!non_nmethod.set && profiled.set && non_profiled.set) {
 250     set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
 251   }
 252 
 253   size_t total = non_nmethod.size + profiled.size + non_profiled.size;
 254   if (total != cache_size && !cache_size_set) {
 255     log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
 256                         "%zuK NonProfiled %zuK Profiled %zuK = %zuK",
 257                         cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
 258     // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
 259     cache_size = total;
 260   }
 261 
 262   log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK"
 263                        " NonProfiled %zuK Profiled %zuK",
 264                        cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
 265 
 266   // Validation
 267   // Check minimal required sizes
 268   check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
 269   if (profiled.enabled) {
 270     check_min_size("profiled code heap", profiled.size, min_size);
 271   }
 272   if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
 273     check_min_size("non-profiled code heap", non_profiled.size, min_size);
 274   }
 275   if (cache_size_set) {
 276     check_min_size("reserved code cache", cache_size, min_cache_size);
 277   }
 278 
 279   // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
 280   if (total != cache_size && cache_size_set) {
 281     err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K);
 282     if (profiled.enabled) {
 283       message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K);
 284     }
 285     if (non_profiled.enabled) {
 286       message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K);
 287     }
 288     message.append(" = %zuK", total/K);
 289     message.append((total > cache_size) ? " is greater than " : " is less than ");
 290     message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
 291 
 292     vm_exit_during_initialization("Invalid code heap sizes", message);
 293   }
 294 
 295   // Compatibility. Print warning if using large pages but not able to use the size given
 296   if (UseLargePages) {
 297     const size_t lg_ps = page_size(false, 1);
 298     if (ps < lg_ps) {
 299       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 300                              "Reverting to smaller page size (" PROPERFMT ").",
 301                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 302     }
 303   }
 304 
 305   // Note: if large page support is enabled, min_size is at least the large
 306   // page size. This ensures that the code cache is covered by large pages.
 307   non_nmethod.size = align_up(non_nmethod.size, min_size);
 308   profiled.size = align_up(profiled.size, min_size);
 309   non_profiled.size = align_up(non_profiled.size, min_size);
 310   cache_size = non_nmethod.size + profiled.size + non_profiled.size;
 311 
 312   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 313   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 314   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 315   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 316 
 317   const size_t cds_code_size = 0;
 318   // FIXME: we should not increase CodeCache size - it affects branches.
 319   // Instead we need to create separate code heap in CodeCache for AOT code.
 320   // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size);
 321   // cache_size += cds_code_size;
 322 
 323   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 324 
 325   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 326   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 327 
 328   size_t offset = 0;
 329   if (cds_code_size > 0) {
 330     // FIXME: use CodeHeapInfo for this hack ...
 331     _cds_code_space = rs.partition(offset, cds_code_size);
 332     offset += cds_code_size;
 333   }
 334 
 335   if (profiled.enabled) {
 336     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 337     offset += profiled.size;
 338     // Tier 2 and tier 3 (profiled) methods
 339     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 340   }
 341 
 342   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 343   offset += non_nmethod.size;
 344   // Non-nmethods (stubs, adapters, ...)
 345   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 346 
 347   if (non_profiled.enabled) {
 348     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 349     // Tier 1 and tier 4 (non-profiled) methods and native methods
 350     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 351   }
 352 }
 353 
 354 void* CodeCache::map_aot_code() {
 355   if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) {
 356     return _cds_code_space.base();
 357   } else {
 358     return nullptr;
 359   }
 360 }
 361 
 362 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 363   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 364                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 365 }
 366 
 367 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 368   // Align and reserve space for code cache
 369   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 370   const size_t rs_size = align_up(size, rs_align);
 371 
 372   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 373   if (!rs.is_reserved()) {
 374     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 375                                           rs_size/K));
 376   }
 377 
 378   // Initialize bounds
 379   _low_bound = (address)rs.base();
 380   _high_bound = _low_bound + rs.size();
 381   return rs;
 382 }
 383 
 384 // Heaps available for allocation
 385 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 386   if (!SegmentedCodeCache) {
 387     // No segmentation: use a single code heap
 388     return (code_blob_type == CodeBlobType::All);
 389   } else if (CompilerConfig::is_interpreter_only()) {
 390     // Interpreter only: we don't need any method code heaps
 391     return (code_blob_type == CodeBlobType::NonNMethod);
 392   } else if (CompilerConfig::is_c1_profiling()) {
 393     // Tiered compilation: use all code heaps
 394     return (code_blob_type < CodeBlobType::All);
 395   } else {
 396     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 397     return (code_blob_type == CodeBlobType::NonNMethod) ||
 398            (code_blob_type == CodeBlobType::MethodNonProfiled);
 399   }
 400 }
 401 
 402 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 403   switch(code_blob_type) {
 404   case CodeBlobType::NonNMethod:
 405     return "NonNMethodCodeHeapSize";
 406     break;
 407   case CodeBlobType::MethodNonProfiled:
 408     return "NonProfiledCodeHeapSize";
 409     break;
 410   case CodeBlobType::MethodProfiled:
 411     return "ProfiledCodeHeapSize";
 412     break;
 413   default:
 414     ShouldNotReachHere();
 415     return nullptr;
 416   }
 417 }
 418 
 419 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 420   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 421     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 422   } else {
 423     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 424   }
 425 }
 426 
 427 void CodeCache::add_heap(CodeHeap* heap) {
 428   assert(!Universe::is_fully_initialized(), "late heap addition?");
 429 
 430   _heaps->insert_sorted<code_heap_compare>(heap);
 431 
 432   CodeBlobType type = heap->code_blob_type();
 433   if (code_blob_type_accepts_nmethod(type)) {
 434     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 435   }
 436   if (code_blob_type_accepts_allocable(type)) {
 437     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 438   }
 439 }
 440 
 441 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 442   // Check if heap is needed
 443   if (!heap_available(code_blob_type)) {
 444     return;
 445   }
 446 
 447   // Create CodeHeap
 448   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 449   add_heap(heap);
 450 
 451   // Reserve Space
 452   size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
 453   size_initial = align_up(size_initial, rs.page_size());
 454   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 455     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)",
 456                                           heap->name(), size_initial/K));
 457   }
 458 
 459   // Register the CodeHeap
 460   MemoryService::add_code_heap_memory_pool(heap, name);
 461 }
 462 
 463 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 464   FOR_ALL_HEAPS(heap) {
 465     if ((*heap)->contains(start)) {
 466       return *heap;
 467     }
 468   }
 469   return nullptr;
 470 }
 471 
 472 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 473   assert(cb != nullptr, "CodeBlob is null");
 474   FOR_ALL_HEAPS(heap) {
 475     if ((*heap)->contains(cb)) {
 476       return *heap;
 477     }
 478   }
 479   ShouldNotReachHere();
 480   return nullptr;
 481 }
 482 
 483 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 484   FOR_ALL_HEAPS(heap) {
 485     if ((*heap)->accepts(code_blob_type)) {
 486       return *heap;
 487     }
 488   }
 489   return nullptr;
 490 }
 491 
 492 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 493   assert_locked_or_safepoint(CodeCache_lock);
 494   assert(heap != nullptr, "heap is null");
 495   return (CodeBlob*)heap->first();
 496 }
 497 
 498 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 499   if (heap_available(code_blob_type)) {
 500     return first_blob(get_code_heap(code_blob_type));
 501   } else {
 502     return nullptr;
 503   }
 504 }
 505 
 506 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 507   assert_locked_or_safepoint(CodeCache_lock);
 508   assert(heap != nullptr, "heap is null");
 509   return (CodeBlob*)heap->next(cb);
 510 }
 511 
 512 /**
 513  * Do not seize the CodeCache lock here--if the caller has not
 514  * already done so, we are going to lose bigtime, since the code
 515  * cache will contain a garbage CodeBlob until the caller can
 516  * run the constructor for the CodeBlob subclass he is busy
 517  * instantiating.
 518  */
 519 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 520   assert_locked_or_safepoint(CodeCache_lock);
 521   assert(size > 0, "Code cache allocation request must be > 0");
 522   if (size == 0) {
 523     return nullptr;
 524   }
 525   CodeBlob* cb = nullptr;
 526 
 527   // Get CodeHeap for the given CodeBlobType
 528   CodeHeap* heap = get_code_heap(code_blob_type);
 529   assert(heap != nullptr, "heap is null");
 530 
 531   while (true) {
 532     cb = (CodeBlob*)heap->allocate(size);
 533     if (cb != nullptr) break;
 534     if (!heap->expand_by(CodeCacheExpansionSize)) {
 535       // Save original type for error reporting
 536       if (orig_code_blob_type == CodeBlobType::All) {
 537         orig_code_blob_type = code_blob_type;
 538       }
 539       // Expansion failed
 540       if (SegmentedCodeCache) {
 541         // Fallback solution: Try to store code in another code heap.
 542         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 543         CodeBlobType type = code_blob_type;
 544         switch (type) {
 545         case CodeBlobType::NonNMethod:
 546           type = CodeBlobType::MethodNonProfiled;
 547           break;
 548         case CodeBlobType::MethodNonProfiled:
 549           type = CodeBlobType::MethodProfiled;
 550           break;
 551         case CodeBlobType::MethodProfiled:
 552           // Avoid loop if we already tried that code heap
 553           if (type == orig_code_blob_type) {
 554             type = CodeBlobType::MethodNonProfiled;
 555           }
 556           break;
 557         default:
 558           break;
 559         }
 560         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 561           if (PrintCodeCacheExtension) {
 562             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 563                           heap->name(), get_code_heap(type)->name());
 564           }
 565           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 566         }
 567       }
 568       if (handle_alloc_failure) {
 569         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 570         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 571       }
 572       return nullptr;
 573     } else {
 574       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 575     }
 576     if (PrintCodeCacheExtension) {
 577       ResourceMark rm;
 578       if (_nmethod_heaps->length() >= 1) {
 579         tty->print("%s", heap->name());
 580       } else {
 581         tty->print("CodeCache");
 582       }
 583       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
 584                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 585                     (address)heap->high() - (address)heap->low_boundary());
 586     }
 587   }
 588   print_trace("allocation", cb, size);
 589   return cb;
 590 }
 591 
 592 void CodeCache::free(CodeBlob* cb) {
 593   assert_locked_or_safepoint(CodeCache_lock);
 594   CodeHeap* heap = get_code_heap(cb);
 595   print_trace("free", cb);
 596   if (cb->is_nmethod()) {
 597     heap->set_nmethod_count(heap->nmethod_count() - 1);
 598     if (((nmethod *)cb)->has_dependencies()) {
 599       AtomicAccess::dec(&_number_of_nmethods_with_dependencies);
 600     }
 601   }
 602   if (cb->is_adapter_blob()) {
 603     heap->set_adapter_count(heap->adapter_count() - 1);
 604   }
 605 
 606   cb->~CodeBlob();
 607   // Get heap for given CodeBlob and deallocate
 608   heap->deallocate(cb);
 609 
 610   assert(heap->blob_count() >= 0, "sanity check");
 611 }
 612 
 613 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 614   assert_locked_or_safepoint(CodeCache_lock);
 615   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 616   print_trace("free_unused_tail", cb);
 617 
 618   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 619   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 620   used += CodeBlob::align_code_offset(cb->header_size());
 621 
 622   // Get heap for given CodeBlob and deallocate its unused tail
 623   get_code_heap(cb)->deallocate_tail(cb, used);
 624   // Adjust the sizes of the CodeBlob
 625   cb->adjust_size(used);
 626 }
 627 
 628 void CodeCache::commit(CodeBlob* cb) {
 629   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 630   assert_locked_or_safepoint(CodeCache_lock);
 631   CodeHeap* heap = get_code_heap(cb);
 632   if (cb->is_nmethod()) {
 633     heap->set_nmethod_count(heap->nmethod_count() + 1);
 634     if (((nmethod *)cb)->has_dependencies()) {
 635       AtomicAccess::inc(&_number_of_nmethods_with_dependencies);
 636     }
 637   }
 638   if (cb->is_adapter_blob()) {
 639     heap->set_adapter_count(heap->adapter_count() + 1);
 640   }
 641 }
 642 
 643 bool CodeCache::contains(void *p) {
 644   // S390 uses contains() in current_frame(), which is used before
 645   // code cache initialization if NativeMemoryTracking=detail is set.
 646   S390_ONLY(if (_heaps == nullptr) return false;)
 647   // It should be ok to call contains without holding a lock.
 648   FOR_ALL_HEAPS(heap) {
 649     if ((*heap)->contains(p)) {
 650       return true;
 651     }
 652   }
 653   return false;
 654 }
 655 
 656 bool CodeCache::contains(nmethod *nm) {
 657   return contains((void *)nm);
 658 }
 659 
 660 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 661 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 662 CodeBlob* CodeCache::find_blob(void* start) {
 663   // NMT can walk the stack before code cache is created
 664   if (_heaps != nullptr) {
 665     CodeHeap* heap = get_code_heap_containing(start);
 666     if (heap != nullptr) {
 667       return heap->find_blob(start);
 668     }
 669   }
 670   return nullptr;
 671 }
 672 
 673 nmethod* CodeCache::find_nmethod(void* start) {
 674   CodeBlob* cb = find_blob(start);
 675   assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
 676   return (nmethod*)cb;
 677 }
 678 
 679 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 680   assert_locked_or_safepoint(CodeCache_lock);
 681   FOR_ALL_HEAPS(heap) {
 682     FOR_ALL_BLOBS(cb, *heap) {
 683       f(cb);
 684     }
 685   }
 686 }
 687 
 688 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 689   assert_locked_or_safepoint(CodeCache_lock);
 690   NMethodIterator iter(NMethodIterator::all);
 691   while(iter.next()) {
 692     f(iter.method());
 693   }
 694 }
 695 
 696 void CodeCache::nmethods_do(NMethodClosure* cl) {
 697   assert_locked_or_safepoint(CodeCache_lock);
 698   NMethodIterator iter(NMethodIterator::all);
 699   while(iter.next()) {
 700     cl->do_nmethod(iter.method());
 701   }
 702 }
 703 
 704 void CodeCache::metadata_do(MetadataClosure* f) {
 705   assert_locked_or_safepoint(CodeCache_lock);
 706   NMethodIterator iter(NMethodIterator::all);
 707   while(iter.next()) {
 708     iter.method()->metadata_do(f);
 709   }
 710 }
 711 
 712 // Calculate the number of GCs after which an nmethod is expected to have been
 713 // used in order to not be classed as cold.
 714 void CodeCache::update_cold_gc_count() {
 715   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 716     // No aging
 717     return;
 718   }
 719 
 720   size_t last_used = _last_unloading_used;
 721   double last_time = _last_unloading_time;
 722 
 723   double time = os::elapsedTime();
 724 
 725   size_t free = unallocated_capacity();
 726   size_t max = max_capacity();
 727   size_t used = max - free;
 728   double gc_interval = time - last_time;
 729 
 730   _unloading_threshold_gc_requested = false;
 731   _last_unloading_time = time;
 732   _last_unloading_used = used;
 733 
 734   if (last_time == 0.0) {
 735     // The first GC doesn't have enough information to make good
 736     // decisions, so just keep everything afloat
 737     log_info(codecache)("Unknown code cache pressure; don't age code");
 738     return;
 739   }
 740 
 741   if (gc_interval <= 0.0 || last_used >= used) {
 742     // Dodge corner cases where there is no pressure or negative pressure
 743     // on the code cache. Just don't unload when this happens.
 744     _cold_gc_count = INT_MAX;
 745     log_info(codecache)("No code cache pressure; don't age code");
 746     return;
 747   }
 748 
 749   double allocation_rate = (used - last_used) / gc_interval;
 750 
 751   _unloading_allocation_rates.add(allocation_rate);
 752   _unloading_gc_intervals.add(gc_interval);
 753 
 754   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 755   if (free < aggressive_sweeping_free_threshold) {
 756     // We are already in the red zone; be very aggressive to avoid disaster
 757     // But not more aggressive than 2. This ensures that an nmethod must
 758     // have been unused at least between two GCs to be considered cold still.
 759     _cold_gc_count = 2;
 760     log_info(codecache)("Code cache critically low; use aggressive aging");
 761     return;
 762   }
 763 
 764   // The code cache has an expected time for cold nmethods to "time out"
 765   // when they have not been used. The time for nmethods to time out
 766   // depends on how long we expect we can keep allocating code until
 767   // aggressive sweeping starts, based on sampled allocation rates.
 768   double average_gc_interval = _unloading_gc_intervals.avg();
 769   double average_allocation_rate = _unloading_allocation_rates.avg();
 770   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 771   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 772 
 773   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 774   // that is that the _cold_gc_count will be added to an epoch number
 775   // and that addition must not overflow, or we can crash the VM.
 776   // But not more aggressive than 2. This ensures that an nmethod must
 777   // have been unused at least between two GCs to be considered cold still.
 778   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 779 
 780   double used_ratio = double(used) / double(max);
 781   double last_used_ratio = double(last_used) / double(max);
 782   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 783                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 784                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 785                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 786 
 787 }
 788 
 789 uint64_t CodeCache::cold_gc_count() {
 790   return _cold_gc_count;
 791 }
 792 
 793 void CodeCache::gc_on_allocation() {
 794   if (!is_init_completed()) {
 795     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 796     return;
 797   }
 798 
 799   size_t free = unallocated_capacity();
 800   size_t max = max_capacity();
 801   size_t used = max - free;
 802   double free_ratio = double(free) / double(max);
 803   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 804     // In case the GC is concurrent, we make sure only one thread requests the GC.
 805     if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 806       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 807       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 808     }
 809     return;
 810   }
 811 
 812   size_t last_used = _last_unloading_used;
 813   if (last_used >= used) {
 814     // No increase since last GC; no need to sweep yet
 815     return;
 816   }
 817   size_t allocated_since_last = used - last_used;
 818   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 819   double threshold = SweeperThreshold / 100.0;
 820   double used_ratio = double(used) / double(max);
 821   double last_used_ratio = double(last_used) / double(max);
 822   if (used_ratio > threshold) {
 823     // After threshold is reached, scale it by free_ratio so that more aggressive
 824     // GC is triggered as we approach code cache exhaustion
 825     threshold *= free_ratio;
 826   }
 827   // If code cache has been allocated without any GC at all, let's make sure
 828   // it is eventually invoked to avoid trouble.
 829   if (allocated_since_last_ratio > threshold) {
 830     // In case the GC is concurrent, we make sure only one thread requests the GC.
 831     if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 832       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 833                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 834       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 835     }
 836   }
 837 }
 838 
 839 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 840 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 841 //
 842 // Odd values mean that marking is in progress, and even values mean that no
 843 // marking is currently active.
 844 uint64_t CodeCache::_gc_epoch = 2;
 845 
 846 // How many GCs after an nmethod has not been used, do we consider it cold?
 847 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 848 
 849 double CodeCache::_last_unloading_time = 0.0;
 850 size_t CodeCache::_last_unloading_used = 0;
 851 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 852 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 853 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 854 
 855 uint64_t CodeCache::gc_epoch() {
 856   return _gc_epoch;
 857 }
 858 
 859 bool CodeCache::is_gc_marking_cycle_active() {
 860   // Odd means that marking is active
 861   return (_gc_epoch % 2) == 1;
 862 }
 863 
 864 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 865   if (is_gc_marking_cycle_active()) {
 866     return _gc_epoch - 2;
 867   } else {
 868     return _gc_epoch - 1;
 869   }
 870 }
 871 
 872 void CodeCache::on_gc_marking_cycle_start() {
 873   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 874   ++_gc_epoch;
 875 }
 876 
 877 // Once started the code cache marking cycle must only be finished after marking of
 878 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 879 // if they have frames in continuation StackChunks that were not yet visited.
 880 void CodeCache::on_gc_marking_cycle_finish() {
 881   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 882   ++_gc_epoch;
 883   update_cold_gc_count();
 884 }
 885 
 886 void CodeCache::arm_all_nmethods() {
 887   BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
 888 }
 889 
 890 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 891 void CodeCache::do_unloading(bool unloading_occurred) {
 892   assert_locked_or_safepoint(CodeCache_lock);
 893   NMethodIterator iter(NMethodIterator::all);
 894   while(iter.next()) {
 895     iter.method()->do_unloading(unloading_occurred);
 896   }
 897 }
 898 
 899 void CodeCache::verify_clean_inline_caches() {
 900 #ifdef ASSERT
 901   if (!VerifyInlineCaches) return;
 902   NMethodIterator iter(NMethodIterator::not_unloading);
 903   while(iter.next()) {
 904     nmethod* nm = iter.method();
 905     nm->verify_clean_inline_caches();
 906     nm->verify();
 907   }
 908 #endif
 909 }
 910 
 911 // Defer freeing of concurrently cleaned ExceptionCache entries until
 912 // after a global handshake operation.
 913 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 914   if (SafepointSynchronize::is_at_safepoint()) {
 915     delete entry;
 916   } else {
 917     for (;;) {
 918       ExceptionCache* purge_list_head = AtomicAccess::load(&_exception_cache_purge_list);
 919       entry->set_purge_list_next(purge_list_head);
 920       if (AtomicAccess::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 921         break;
 922       }
 923     }
 924   }
 925 }
 926 
 927 // Delete exception caches that have been concurrently unlinked,
 928 // followed by a global handshake operation.
 929 void CodeCache::purge_exception_caches() {
 930   ExceptionCache* curr = _exception_cache_purge_list;
 931   while (curr != nullptr) {
 932     ExceptionCache* next = curr->purge_list_next();
 933     delete curr;
 934     curr = next;
 935   }
 936   _exception_cache_purge_list = nullptr;
 937 }
 938 
 939 // Restart compiler if possible and required..
 940 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 941 
 942   // Try to start the compiler again if we freed any memory
 943   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 944     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 945     log_info(codecache)("Restarting compiler");
 946     EventJITRestart event;
 947     event.set_freedMemory(freed_memory);
 948     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 949     event.commit();
 950   }
 951 }
 952 
 953 uint8_t CodeCache::_unloading_cycle = 1;
 954 
 955 void CodeCache::increment_unloading_cycle() {
 956   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 957   // 0 is reserved for new methods.
 958   _unloading_cycle = (_unloading_cycle + 1) % 4;
 959   if (_unloading_cycle == 0) {
 960     _unloading_cycle = 1;
 961   }
 962 }
 963 
 964 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 965   : _is_unloading_behaviour(is_alive)
 966 {
 967   _saved_behaviour = IsUnloadingBehaviour::current();
 968   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 969   increment_unloading_cycle();
 970   DependencyContext::cleaning_start();
 971 }
 972 
 973 CodeCache::UnlinkingScope::~UnlinkingScope() {
 974   IsUnloadingBehaviour::set_current(_saved_behaviour);
 975   DependencyContext::cleaning_end();
 976 }
 977 
 978 void CodeCache::verify_oops() {
 979   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 980   VerifyOopClosure voc;
 981   NMethodIterator iter(NMethodIterator::not_unloading);
 982   while(iter.next()) {
 983     nmethod* nm = iter.method();
 984     nm->oops_do(&voc);
 985     nm->verify_oop_relocations();
 986   }
 987 }
 988 
 989 int CodeCache::blob_count(CodeBlobType code_blob_type) {
 990   CodeHeap* heap = get_code_heap(code_blob_type);
 991   return (heap != nullptr) ? heap->blob_count() : 0;
 992 }
 993 
 994 int CodeCache::blob_count() {
 995   int count = 0;
 996   FOR_ALL_HEAPS(heap) {
 997     count += (*heap)->blob_count();
 998   }
 999   return count;
1000 }
1001 
1002 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
1003   CodeHeap* heap = get_code_heap(code_blob_type);
1004   return (heap != nullptr) ? heap->nmethod_count() : 0;
1005 }
1006 
1007 int CodeCache::nmethod_count() {
1008   int count = 0;
1009   for (CodeHeap* heap : *_nmethod_heaps) {
1010     count += heap->nmethod_count();
1011   }
1012   return count;
1013 }
1014 
1015 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1016   CodeHeap* heap = get_code_heap(code_blob_type);
1017   return (heap != nullptr) ? heap->adapter_count() : 0;
1018 }
1019 
1020 int CodeCache::adapter_count() {
1021   int count = 0;
1022   FOR_ALL_HEAPS(heap) {
1023     count += (*heap)->adapter_count();
1024   }
1025   return count;
1026 }
1027 
1028 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1029   CodeHeap* heap = get_code_heap(code_blob_type);
1030   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1031 }
1032 
1033 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1034   CodeHeap* heap = get_code_heap(code_blob_type);
1035   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1036 }
1037 
1038 size_t CodeCache::capacity() {
1039   size_t cap = 0;
1040   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1041     cap += (*heap)->capacity();
1042   }
1043   return cap;
1044 }
1045 
1046 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1047   CodeHeap* heap = get_code_heap(code_blob_type);
1048   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1049 }
1050 
1051 size_t CodeCache::unallocated_capacity() {
1052   size_t unallocated_cap = 0;
1053   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1054     unallocated_cap += (*heap)->unallocated_capacity();
1055   }
1056   return unallocated_cap;
1057 }
1058 
1059 size_t CodeCache::max_capacity() {
1060   size_t max_cap = 0;
1061   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1062     max_cap += (*heap)->max_capacity();
1063   }
1064   return max_cap;
1065 }
1066 
1067 bool CodeCache::is_non_nmethod(address addr) {
1068   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1069   return blob->contains(addr);
1070 }
1071 
1072 size_t CodeCache::max_distance_to_non_nmethod() {
1073   if (!SegmentedCodeCache) {
1074     return ReservedCodeCacheSize;
1075   } else {
1076     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1077     // the max distance is minimized by placing the NonNMethod segment
1078     // in between MethodProfiled and MethodNonProfiled segments
1079     size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound;
1080     size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary();
1081     return dist1 > dist2 ? dist1 : dist2;
1082   }
1083 }
1084 
1085 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1086 // is free, reverse_free_ratio() returns 4.
1087 // Since code heap for each type of code blobs falls forward to the next
1088 // type of code heap, return the reverse free ratio for the entire
1089 // code cache.
1090 double CodeCache::reverse_free_ratio() {
1091   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1092   double max = (double)max_capacity();
1093   double result = max / unallocated;
1094   assert (max >= unallocated, "Must be");
1095   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1096   return result;
1097 }
1098 
1099 size_t CodeCache::bytes_allocated_in_freelists() {
1100   size_t allocated_bytes = 0;
1101   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1102     allocated_bytes += (*heap)->allocated_in_freelist();
1103   }
1104   return allocated_bytes;
1105 }
1106 
1107 int CodeCache::allocated_segments() {
1108   int number_of_segments = 0;
1109   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1110     number_of_segments += (*heap)->allocated_segments();
1111   }
1112   return number_of_segments;
1113 }
1114 
1115 size_t CodeCache::freelists_length() {
1116   size_t length = 0;
1117   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1118     length += (*heap)->freelist_length();
1119   }
1120   return length;
1121 }
1122 
1123 void icache_init();
1124 
1125 void CodeCache::initialize() {
1126   assert(CodeCacheSegmentSize >= (size_t)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1127 #ifdef COMPILER2
1128   assert(CodeCacheSegmentSize >= (size_t)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1129 #endif
1130   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1131   // This was originally just a check of the alignment, causing failure, instead, round
1132   // the code cache to the page size.  In particular, Solaris is moving to a larger
1133   // default page size.
1134   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1135 
1136   if (SegmentedCodeCache) {
1137     // Use multiple code heaps
1138     initialize_heaps();
1139   } else {
1140     // Use a single code heap
1141     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1142     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1143     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1144 
1145     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1146     // users want to use the largest available page.
1147     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1148     ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1149     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1150     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1151     add_heap(rs, "CodeCache", CodeBlobType::All);
1152   }
1153 
1154   // Initialize ICache flush mechanism
1155   // This service is needed for os::register_code_area
1156   icache_init();
1157 
1158   // Give OS a chance to register generated code area.
1159   // This is used on Windows 64 bit platforms to register
1160   // Structured Exception Handlers for our generated code.
1161   os::register_code_area((char*)low_bound(), (char*)high_bound());
1162 }
1163 
1164 void codeCache_init() {
1165   CodeCache::initialize();
1166 }
1167 
1168 //------------------------------------------------------------------------------------------------
1169 
1170 bool CodeCache::has_nmethods_with_dependencies() {
1171   return AtomicAccess::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1172 }
1173 
1174 void CodeCache::clear_inline_caches() {
1175   assert_locked_or_safepoint(CodeCache_lock);
1176   NMethodIterator iter(NMethodIterator::not_unloading);
1177   while(iter.next()) {
1178     iter.method()->clear_inline_caches();
1179   }
1180 }
1181 
1182 // Only used by whitebox API
1183 void CodeCache::cleanup_inline_caches_whitebox() {
1184   assert_locked_or_safepoint(CodeCache_lock);
1185   NMethodIterator iter(NMethodIterator::not_unloading);
1186   while(iter.next()) {
1187     iter.method()->cleanup_inline_caches_whitebox();
1188   }
1189 }
1190 
1191 // Keeps track of time spent for checking dependencies
1192 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1193 
1194 #ifndef PRODUCT
1195 // Check if any of live methods dependencies have been invalidated.
1196 // (this is expensive!)
1197 static void check_live_nmethods_dependencies(DepChange& changes) {
1198   // Checked dependencies are allocated into this ResourceMark
1199   ResourceMark rm;
1200 
1201   // Turn off dependency tracing while actually testing dependencies.
1202   FlagSetting fs(Dependencies::_verify_in_progress, true);
1203 
1204   typedef HashTable<DependencySignature, int, 11027,
1205                             AnyObj::RESOURCE_AREA, mtInternal,
1206                             &DependencySignature::hash,
1207                             &DependencySignature::equals> DepTable;
1208 
1209   DepTable* table = new DepTable();
1210 
1211   // Iterate over live nmethods and check dependencies of all nmethods that are not
1212   // marked for deoptimization. A particular dependency is only checked once.
1213   NMethodIterator iter(NMethodIterator::not_unloading);
1214   while(iter.next()) {
1215     nmethod* nm = iter.method();
1216     // Only notify for live nmethods
1217     if (!nm->is_marked_for_deoptimization()) {
1218       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1219         // Construct abstraction of a dependency.
1220         DependencySignature* current_sig = new DependencySignature(deps);
1221 
1222         // Determine if dependency is already checked. table->put(...) returns
1223         // 'true' if the dependency is added (i.e., was not in the hashtable).
1224         if (table->put(*current_sig, 1)) {
1225           Klass* witness = deps.check_dependency();
1226           if (witness != nullptr) {
1227             // Dependency checking failed. Print out information about the failed
1228             // dependency and finally fail with an assert. We can fail here, since
1229             // dependency checking is never done in a product build.
1230             deps.print_dependency(tty, witness, true);
1231             changes.print();
1232             nm->print();
1233             nm->print_dependencies_on(tty);
1234             assert(false, "Should have been marked for deoptimization");
1235           }
1236         }
1237       }
1238     }
1239   }
1240 }
1241 #endif
1242 
1243 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1244   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1245 
1246   // search the hierarchy looking for nmethods which are affected by the loading of this class
1247 
1248   // then search the interfaces this class implements looking for nmethods
1249   // which might be dependent of the fact that an interface only had one
1250   // implementor.
1251   // nmethod::check_all_dependencies works only correctly, if no safepoint
1252   // can happen
1253   NoSafepointVerifier nsv;
1254   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1255     InstanceKlass* d = str.klass();
1256     {
1257       LogStreamHandle(Trace, dependencies) log;
1258       if (log.is_enabled()) {
1259         log.print("Processing context ");
1260         d->name()->print_value_on(&log);
1261       }
1262     }
1263     d->mark_dependent_nmethods(deopt_scope, changes);
1264   }
1265 
1266 #ifndef PRODUCT
1267   if (VerifyDependencies) {
1268     // Object pointers are used as unique identifiers for dependency arguments. This
1269     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1270     dependentCheckTime.start();
1271     check_live_nmethods_dependencies(changes);
1272     dependentCheckTime.stop();
1273   }
1274 #endif
1275 }
1276 
1277 #if INCLUDE_JVMTI
1278 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1279 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1280 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1281 
1282 static void add_to_old_table(nmethod* c) {
1283   if (old_nmethod_table == nullptr) {
1284     old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1285   }
1286   old_nmethod_table->push(c);
1287 }
1288 
1289 static void reset_old_method_table() {
1290   if (old_nmethod_table != nullptr) {
1291     delete old_nmethod_table;
1292     old_nmethod_table = nullptr;
1293   }
1294 }
1295 
1296 // Remove this method when flushed.
1297 void CodeCache::unregister_old_nmethod(nmethod* c) {
1298   assert_lock_strong(CodeCache_lock);
1299   if (old_nmethod_table != nullptr) {
1300     int index = old_nmethod_table->find(c);
1301     if (index != -1) {
1302       old_nmethod_table->delete_at(index);
1303     }
1304   }
1305 }
1306 
1307 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1308   // Walk old method table and mark those on stack.
1309   int length = 0;
1310   if (old_nmethod_table != nullptr) {
1311     length = old_nmethod_table->length();
1312     for (int i = 0; i < length; i++) {
1313       // Walk all methods saved on the last pass.  Concurrent class unloading may
1314       // also be looking at this method's metadata, so don't delete it yet if
1315       // it is marked as unloaded.
1316       old_nmethod_table->at(i)->metadata_do(f);
1317     }
1318   }
1319   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1320 }
1321 
1322 // Walk compiled methods and mark dependent methods for deoptimization.
1323 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1324   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1325   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1326   // So delete old method table and create a new one.
1327   reset_old_method_table();
1328 
1329   NMethodIterator iter(NMethodIterator::all);
1330   while(iter.next()) {
1331     nmethod* nm = iter.method();
1332     // Walk all alive nmethods to check for old Methods.
1333     // This includes methods whose inline caches point to old methods, so
1334     // inline cache clearing is unnecessary.
1335     if (nm->has_evol_metadata()) {
1336       deopt_scope->mark(nm);
1337       add_to_old_table(nm);
1338     }
1339   }
1340 }
1341 
1342 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1343   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1344   NMethodIterator iter(NMethodIterator::all);
1345   while(iter.next()) {
1346     nmethod* nm = iter.method();
1347     if (!nm->method()->is_method_handle_intrinsic()) {
1348       if (nm->can_be_deoptimized()) {
1349         deopt_scope->mark(nm);
1350       }
1351       if (nm->has_evol_metadata()) {
1352         add_to_old_table(nm);
1353       }
1354     }
1355   }
1356 }
1357 
1358 #endif // INCLUDE_JVMTI
1359 
1360 // Mark methods for deopt (if safe or possible).
1361 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1362   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1363   NMethodIterator iter(NMethodIterator::not_unloading);
1364   while(iter.next()) {
1365     nmethod* nm = iter.method();
1366     if (!nm->is_native_method()) {
1367       deopt_scope->mark(nm);
1368     }
1369   }
1370 }
1371 
1372 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1373   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1374 
1375   NMethodIterator iter(NMethodIterator::not_unloading);
1376   while(iter.next()) {
1377     nmethod* nm = iter.method();
1378     if (nm->is_dependent_on_method(dependee)) {
1379       deopt_scope->mark(nm);
1380     }
1381   }
1382 }
1383 
1384 void CodeCache::make_marked_nmethods_deoptimized() {
1385   RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1386   while(iter.next()) {
1387     nmethod* nm = iter.method();
1388     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1389       nm->make_not_entrant(nmethod::InvalidationReason::MARKED_FOR_DEOPTIMIZATION);
1390       nm->make_deoptimized();
1391     }
1392   }
1393 }
1394 
1395 // Marks compiled methods dependent on dependee.
1396 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1397   assert_lock_strong(Compile_lock);
1398 
1399   if (!has_nmethods_with_dependencies()) {
1400     return;
1401   }
1402 
1403   if (dependee->is_linked()) {
1404     // Class initialization state change.
1405     KlassInitDepChange changes(dependee);
1406     mark_for_deoptimization(deopt_scope, changes);
1407   } else {
1408     // New class is loaded.
1409     NewKlassDepChange changes(dependee);
1410     mark_for_deoptimization(deopt_scope, changes);
1411   }
1412 }
1413 
1414 // Marks compiled methods dependent on dependee
1415 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1416   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1417 
1418   DeoptimizationScope deopt_scope;
1419   // Compute the dependent nmethods
1420   mark_for_deoptimization(&deopt_scope, m_h());
1421   deopt_scope.deoptimize_marked();
1422 }
1423 
1424 void CodeCache::verify() {
1425   assert_locked_or_safepoint(CodeCache_lock);
1426   FOR_ALL_HEAPS(heap) {
1427     (*heap)->verify();
1428     FOR_ALL_BLOBS(cb, *heap) {
1429       cb->verify();
1430     }
1431   }
1432 }
1433 
1434 // A CodeHeap is full. Print out warning and report event.
1435 PRAGMA_DIAG_PUSH
1436 PRAGMA_FORMAT_NONLITERAL_IGNORED
1437 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1438   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1439   CodeHeap* heap = get_code_heap(code_blob_type);
1440   assert(heap != nullptr, "heap is null");
1441 
1442   int full_count = heap->report_full();
1443 
1444   if ((full_count == 1) || print) {
1445     // Not yet reported for this heap, report
1446     if (SegmentedCodeCache) {
1447       ResourceMark rm;
1448       stringStream msg1_stream, msg2_stream;
1449       msg1_stream.print("%s is full. Compiler has been disabled.",
1450                         get_code_heap_name(code_blob_type));
1451       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1452                  get_code_heap_flag_name(code_blob_type));
1453       const char *msg1 = msg1_stream.as_string();
1454       const char *msg2 = msg2_stream.as_string();
1455 
1456       log_warning(codecache)("%s", msg1);
1457       log_warning(codecache)("%s", msg2);
1458       warning("%s", msg1);
1459       warning("%s", msg2);
1460     } else {
1461       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1462       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1463 
1464       log_warning(codecache)("%s", msg1);
1465       log_warning(codecache)("%s", msg2);
1466       warning("%s", msg1);
1467       warning("%s", msg2);
1468     }
1469     stringStream s;
1470     // Dump code cache into a buffer before locking the tty.
1471     {
1472       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1473       print_summary(&s);
1474     }
1475     {
1476       ttyLocker ttyl;
1477       tty->print("%s", s.freeze());
1478     }
1479 
1480     if (full_count == 1) {
1481       if (PrintCodeHeapAnalytics) {
1482         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1483       }
1484     }
1485   }
1486 
1487   EventCodeCacheFull event;
1488   if (event.should_commit()) {
1489     event.set_codeBlobType((u1)code_blob_type);
1490     event.set_startAddress((u8)heap->low_boundary());
1491     event.set_commitedTopAddress((u8)heap->high());
1492     event.set_reservedTopAddress((u8)heap->high_boundary());
1493     event.set_entryCount(heap->blob_count());
1494     event.set_methodCount(heap->nmethod_count());
1495     event.set_adaptorCount(heap->adapter_count());
1496     event.set_unallocatedCapacity(heap->unallocated_capacity());
1497     event.set_fullCount(heap->full_count());
1498     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1499     event.commit();
1500   }
1501 }
1502 PRAGMA_DIAG_POP
1503 
1504 void CodeCache::print_memory_overhead() {
1505   size_t wasted_bytes = 0;
1506   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1507       CodeHeap* curr_heap = *heap;
1508       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1509         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1510         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1511       }
1512   }
1513   // Print bytes that are allocated in the freelist
1514   ttyLocker ttl;
1515   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1516   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1517   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1518   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1519 }
1520 
1521 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1522   if (total > 0) {
1523     double ratio = (100.0 * used) / total;
1524     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1525   }
1526 }
1527 
1528 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1529   int stats     [2][6][3][2] = {0};
1530   int stats_used[2][6][3][2] = {0};
1531 
1532   int total_osr = 0;
1533   int total_entrant = 0;
1534   int total_non_entrant = 0;
1535   int total_other = 0;
1536   int total_used = 0;
1537 
1538   NMethodIterator iter(NMethodIterator::all);
1539   while (iter.next()) {
1540     nmethod* nm = iter.method();
1541     if (nm->is_in_use()) {
1542       ++total_entrant;
1543     } else if (nm->is_not_entrant()) {
1544       ++total_non_entrant;
1545     } else {
1546       ++total_other;
1547     }
1548     if (nm->is_osr_method()) {
1549       ++total_osr;
1550     }
1551     if (nm->used()) {
1552       ++total_used;
1553     }
1554     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1555 
1556     int idx1 = nm->is_aot() ? 1 : 0;
1557     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1558     int idx3 = (nm->is_in_use()      ? 0 :
1559                (nm->is_not_entrant() ? 1 :
1560                                        2));
1561     int idx4 = (nm->is_osr_method() ? 1 : 0);
1562     stats[idx1][idx2][idx3][idx4] += 1;
1563     if (nm->used()) {
1564       stats_used[idx1][idx2][idx3][idx4] += 1;
1565     }
1566   }
1567 
1568   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1569                total_entrant + total_non_entrant + total_other,
1570                total_entrant, total_non_entrant, total_osr);
1571   if (total_other > 0) {
1572     st->print("; %d other", total_other);
1573   }
1574   st->print_cr(")");
1575 
1576   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1577     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1578     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1579     if (total_normal + total_osr > 0) {
1580       st->print("  Tier%d:", i);
1581       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1582       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1583       st->cr();
1584     }
1585   }
1586   st->cr();
1587   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1588     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1589     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1590     assert(total_osr == 0, "sanity");
1591     if (total_normal + total_osr > 0) {
1592       st->print("  AOT Code T%d:", i);
1593       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1594       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1595       st->cr();
1596     }
1597   }
1598 }
1599 
1600 //------------------------------------------------------------------------------------------------
1601 // Non-product version
1602 
1603 #ifndef PRODUCT
1604 
1605 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1606   if (PrintCodeCache2) {  // Need to add a new flag
1607     ResourceMark rm;
1608     if (size == 0) {
1609       int s = cb->size();
1610       assert(s >= 0, "CodeBlob size is negative: %d", s);
1611       size = (uint) s;
1612     }
1613     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1614   }
1615 }
1616 
1617 void CodeCache::print_internals() {
1618   int nmethodCount = 0;
1619   int runtimeStubCount = 0;
1620   int upcallStubCount = 0;
1621   int adapterCount = 0;
1622   int mhAdapterCount = 0;
1623   int vtableBlobCount = 0;
1624   int deoptimizationStubCount = 0;
1625   int uncommonTrapStubCount = 0;
1626   int exceptionStubCount = 0;
1627   int safepointStubCount = 0;
1628   int bufferBlobCount = 0;
1629   int total = 0;
1630   int nmethodNotEntrant = 0;
1631   int nmethodJava = 0;
1632   int nmethodNative = 0;
1633   int max_nm_size = 0;
1634   ResourceMark rm;
1635 
1636   int i = 0;
1637   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1638     int heap_total = 0;
1639     tty->print_cr("-- %s --", (*heap)->name());
1640     FOR_ALL_BLOBS(cb, *heap) {
1641       total++;
1642       heap_total++;
1643       if (cb->is_nmethod()) {
1644         nmethod* nm = (nmethod*)cb;
1645 
1646         tty->print("%4d: ", heap_total);
1647         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1648 
1649         nmethodCount++;
1650 
1651         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1652         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1653 
1654         if(nm->method() != nullptr && nm->is_java_method()) {
1655           nmethodJava++;
1656           max_nm_size = MAX2(max_nm_size, nm->size());
1657         }
1658       } else if (cb->is_runtime_stub()) {
1659         runtimeStubCount++;
1660       } else if (cb->is_upcall_stub()) {
1661         upcallStubCount++;
1662       } else if (cb->is_deoptimization_stub()) {
1663         deoptimizationStubCount++;
1664       } else if (cb->is_uncommon_trap_stub()) {
1665         uncommonTrapStubCount++;
1666       } else if (cb->is_exception_stub()) {
1667         exceptionStubCount++;
1668       } else if (cb->is_safepoint_stub()) {
1669         safepointStubCount++;
1670       } else if (cb->is_adapter_blob()) {
1671         adapterCount++;
1672       } else if (cb->is_method_handles_adapter_blob()) {
1673         mhAdapterCount++;
1674       } else if (cb->is_vtable_blob()) {
1675         vtableBlobCount++;
1676       } else if (cb->is_buffer_blob()) {
1677         bufferBlobCount++;
1678       }
1679     }
1680   }
1681 
1682   int bucketSize = 512;
1683   int bucketLimit = max_nm_size / bucketSize + 1;
1684   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1685   memset(buckets, 0, sizeof(int) * bucketLimit);
1686 
1687   NMethodIterator iter(NMethodIterator::all);
1688   while(iter.next()) {
1689     nmethod* nm = iter.method();
1690     if(nm->method() != nullptr && nm->is_java_method()) {
1691       buckets[nm->size() / bucketSize]++;
1692     }
1693   }
1694 
1695   tty->print_cr("Code Cache Entries (total of %d)",total);
1696   tty->print_cr("-------------------------------------------------");
1697   tty->print_cr("nmethods: %d",nmethodCount);
1698   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1699   tty->print_cr("\tjava: %d",nmethodJava);
1700   tty->print_cr("\tnative: %d",nmethodNative);
1701   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1702   tty->print_cr("upcall_stubs: %d",upcallStubCount);
1703   tty->print_cr("adapters: %d",adapterCount);
1704   tty->print_cr("MH adapters: %d",mhAdapterCount);
1705   tty->print_cr("VTables: %d",vtableBlobCount);
1706   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1707   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1708   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1709   tty->print_cr("exception_stubs: %d",exceptionStubCount);
1710   tty->print_cr("safepoint_stubs: %d",safepointStubCount);
1711   tty->print_cr("\nnmethod size distribution");
1712   tty->print_cr("-------------------------------------------------");
1713 
1714   for(int i=0; i<bucketLimit; i++) {
1715     if(buckets[i] != 0) {
1716       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1717       tty->fill_to(40);
1718       tty->print_cr("%d",buckets[i]);
1719     }
1720   }
1721 
1722   FREE_C_HEAP_ARRAY(int, buckets);
1723   print_memory_overhead();
1724 }
1725 
1726 #endif // !PRODUCT
1727 
1728 void CodeCache::print() {
1729   print_summary(tty);
1730 
1731 #ifndef PRODUCT
1732   if (!Verbose) return;
1733 
1734   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1735   CodeBlob_sizes runtimeStub;
1736   CodeBlob_sizes upcallStub;
1737   CodeBlob_sizes uncommonTrapStub;
1738   CodeBlob_sizes deoptimizationStub;
1739   CodeBlob_sizes exceptionStub;
1740   CodeBlob_sizes safepointStub;
1741   CodeBlob_sizes adapter;
1742   CodeBlob_sizes mhAdapter;
1743   CodeBlob_sizes vtableBlob;
1744   CodeBlob_sizes bufferBlob;
1745   CodeBlob_sizes other;
1746 
1747   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1748     FOR_ALL_BLOBS(cb, *heap) {
1749       if (cb->is_nmethod()) {
1750         const int level = cb->as_nmethod()->comp_level();
1751         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1752         live[level].add(cb);
1753       } else if (cb->is_runtime_stub()) {
1754         runtimeStub.add(cb);
1755       } else if (cb->is_upcall_stub()) {
1756         upcallStub.add(cb);
1757       } else if (cb->is_deoptimization_stub()) {
1758         deoptimizationStub.add(cb);
1759       } else if (cb->is_uncommon_trap_stub()) {
1760         uncommonTrapStub.add(cb);
1761       } else if (cb->is_exception_stub()) {
1762         exceptionStub.add(cb);
1763       } else if (cb->is_safepoint_stub()) {
1764         safepointStub.add(cb);
1765       } else if (cb->is_adapter_blob()) {
1766         adapter.add(cb);
1767       } else if (cb->is_method_handles_adapter_blob()) {
1768         mhAdapter.add(cb);
1769       } else if (cb->is_vtable_blob()) {
1770         vtableBlob.add(cb);
1771       } else if (cb->is_buffer_blob()) {
1772         bufferBlob.add(cb);
1773       } else {
1774         other.add(cb);
1775       }
1776     }
1777   }
1778 
1779   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1780 
1781   tty->print_cr("nmethod blobs per compilation level:");
1782   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1783     const char *level_name;
1784     switch (i) {
1785     case CompLevel_none:              level_name = "none";              break;
1786     case CompLevel_simple:            level_name = "simple";            break;
1787     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1788     case CompLevel_full_profile:      level_name = "full profile";      break;
1789     case CompLevel_full_optimization: level_name = "full optimization"; break;
1790     default: assert(false, "invalid compilation level");
1791     }
1792     tty->print_cr("%s:", level_name);
1793     live[i].print("live");
1794   }
1795 
1796   struct {
1797     const char* name;
1798     const CodeBlob_sizes* sizes;
1799   } non_nmethod_blobs[] = {
1800     { "runtime",        &runtimeStub },
1801     { "upcall",         &upcallStub },
1802     { "uncommon trap",  &uncommonTrapStub },
1803     { "deoptimization", &deoptimizationStub },
1804     { "exception",      &exceptionStub },
1805     { "safepoint",      &safepointStub },
1806     { "adapter",        &adapter },
1807     { "mh_adapter",     &mhAdapter },
1808     { "vtable",         &vtableBlob },
1809     { "buffer blob",    &bufferBlob },
1810     { "other",          &other },
1811   };
1812   tty->print_cr("Non-nmethod blobs:");
1813   for (auto& blob: non_nmethod_blobs) {
1814     blob.sizes->print(blob.name);
1815   }
1816 
1817   if (WizardMode) {
1818      // print the oop_map usage
1819     int code_size = 0;
1820     int number_of_blobs = 0;
1821     int number_of_oop_maps = 0;
1822     int map_size = 0;
1823     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1824       FOR_ALL_BLOBS(cb, *heap) {
1825         number_of_blobs++;
1826         code_size += cb->code_size();
1827         ImmutableOopMapSet* set = cb->oop_maps();
1828         if (set != nullptr) {
1829           number_of_oop_maps += set->count();
1830           map_size           += set->nr_of_bytes();
1831         }
1832       }
1833     }
1834     tty->print_cr("OopMaps");
1835     tty->print_cr("  #blobs    = %d", number_of_blobs);
1836     tty->print_cr("  code size = %d", code_size);
1837     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1838     tty->print_cr("  map size  = %d", map_size);
1839   }
1840 
1841 #endif // !PRODUCT
1842 }
1843 
1844 void CodeCache::print_nmethods_on(outputStream* st) {
1845   ResourceMark rm;
1846   int i = 0;
1847   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1848     st->print_cr("-- %s --", (*heap)->name());
1849     FOR_ALL_BLOBS(cb, *heap) {
1850       i++;
1851       if (cb->is_nmethod()) {
1852         nmethod* nm = (nmethod*)cb;
1853         st->print("%4d: ", i);
1854         CompileTask::print(st, nm, nullptr, true, false);
1855 
1856         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1857         st->print_cr(" %c", non_entrant_char);
1858       }
1859     }
1860   }
1861 }
1862 
1863 void CodeCache::print_summary(outputStream* st, bool detailed) {
1864   int full_count = 0;
1865   julong total_used = 0;
1866   julong total_max_used = 0;
1867   julong total_free = 0;
1868   julong total_size = 0;
1869   FOR_ALL_HEAPS(heap_iterator) {
1870     CodeHeap* heap = (*heap_iterator);
1871     size_t total = (heap->high_boundary() - heap->low_boundary());
1872     if (_heaps->length() >= 1) {
1873       st->print("%s:", heap->name());
1874     } else {
1875       st->print("CodeCache:");
1876     }
1877     size_t size = total/K;
1878     size_t used = (total - heap->unallocated_capacity())/K;
1879     size_t max_used = heap->max_allocated_capacity()/K;
1880     size_t free = heap->unallocated_capacity()/K;
1881     total_size += size;
1882     total_used += used;
1883     total_max_used += max_used;
1884     total_free += free;
1885     st->print_cr(" size=%zuKb used=%zu"
1886                  "Kb max_used=%zuKb free=%zuKb",
1887                  size, used, max_used, free);
1888 
1889     if (detailed) {
1890       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1891                    p2i(heap->low_boundary()),
1892                    p2i(heap->high()),
1893                    p2i(heap->high_boundary()));
1894 
1895       full_count += get_codemem_full_count(heap->code_blob_type());
1896     }
1897   }
1898 
1899   if (detailed) {
1900     if (SegmentedCodeCache) {
1901       st->print("CodeCache:");
1902       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1903                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1904                    total_size, total_used, total_max_used, total_free);
1905     }
1906     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1907                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1908                  blob_count(), nmethod_count(), adapter_count(), full_count);
1909     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1910                  CompileBroker::should_compile_new_jobs() ?
1911                  "enabled" : Arguments::mode() == Arguments::_int ?
1912                  "disabled (interpreter mode)" :
1913                  "disabled (not enough contiguous free space left)",
1914                  CompileBroker::get_total_compiler_stopped_count(),
1915                  CompileBroker::get_total_compiler_restarted_count());
1916   }
1917 }
1918 
1919 void CodeCache::print_codelist(outputStream* st) {
1920   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1921 
1922   NMethodIterator iter(NMethodIterator::not_unloading);
1923   while (iter.next()) {
1924     nmethod* nm = iter.method();
1925     ResourceMark rm;
1926     char* method_name = nm->method()->name_and_sig_as_C_string();
1927     const char* jvmci_name = nullptr;
1928 #if INCLUDE_JVMCI
1929     jvmci_name = nm->jvmci_name();
1930 #endif
1931     st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1932                  nm->compile_id(), nm->comp_level(), nm->get_state(),
1933                  method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1934                  (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1935   }
1936 }
1937 
1938 void CodeCache::print_layout(outputStream* st) {
1939   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1940   ResourceMark rm;
1941   print_summary(st, true);
1942 }
1943 
1944 void CodeCache::log_state(outputStream* st) {
1945   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1946             " adapters='" UINT32_FORMAT "' free_code_cache='%zu'",
1947             blob_count(), nmethod_count(), adapter_count(),
1948             unallocated_capacity());
1949 }
1950 
1951 #ifdef LINUX
1952 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1953   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1954   char fname[JVM_MAXPATHLEN];
1955   if (filename == nullptr) {
1956     // Invocation outside of jcmd requires pid substitution.
1957     if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1958                                     strlen(DEFAULT_PERFMAP_FILENAME),
1959                                     fname, JVM_MAXPATHLEN)) {
1960       st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1961       return;
1962     }
1963     filename = fname;
1964   }
1965   fileStream fs(filename, "w");
1966   if (!fs.is_open()) {
1967     st->print_cr("Warning: Failed to create %s for perf map", filename);
1968     return;
1969   }
1970 
1971   AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1972   while (iter.next()) {
1973     CodeBlob *cb = iter.method();
1974     ResourceMark rm;
1975     const char* method_name = nullptr;
1976     const char* jvmci_name = nullptr;
1977     if (cb->is_nmethod()) {
1978       nmethod* nm = cb->as_nmethod();
1979       method_name = nm->method()->external_name();
1980 #if INCLUDE_JVMCI
1981       jvmci_name = nm->jvmci_name();
1982 #endif
1983     } else {
1984       method_name = cb->name();
1985     }
1986     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1987                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1988                 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1989   }
1990 }
1991 #endif // LINUX
1992 
1993 //---<  BEGIN  >--- CodeHeap State Analytics.
1994 
1995 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1996   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1997     CodeHeapState::aggregate(out, (*heap), granularity);
1998   }
1999 }
2000 
2001 void CodeCache::discard(outputStream *out) {
2002   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2003     CodeHeapState::discard(out, (*heap));
2004   }
2005 }
2006 
2007 void CodeCache::print_usedSpace(outputStream *out) {
2008   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2009     CodeHeapState::print_usedSpace(out, (*heap));
2010   }
2011 }
2012 
2013 void CodeCache::print_freeSpace(outputStream *out) {
2014   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2015     CodeHeapState::print_freeSpace(out, (*heap));
2016   }
2017 }
2018 
2019 void CodeCache::print_count(outputStream *out) {
2020   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2021     CodeHeapState::print_count(out, (*heap));
2022   }
2023 }
2024 
2025 void CodeCache::print_space(outputStream *out) {
2026   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2027     CodeHeapState::print_space(out, (*heap));
2028   }
2029 }
2030 
2031 void CodeCache::print_age(outputStream *out) {
2032   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2033     CodeHeapState::print_age(out, (*heap));
2034   }
2035 }
2036 
2037 void CodeCache::print_names(outputStream *out) {
2038   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2039     CodeHeapState::print_names(out, (*heap));
2040   }
2041 }
2042 //---<  END  >--- CodeHeap State Analytics.