1 /*
   2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/cdsAccess.hpp"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/SCCache.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "compiler/oopMap.hpp"
  40 #include "gc/shared/barrierSetNMethod.hpp"
  41 #include "gc/shared/classUnloadingContext.hpp"
  42 #include "gc/shared/collectedHeap.hpp"
  43 #include "jfr/jfrEvents.hpp"
  44 #include "jvm_io.h"
  45 #include "logging/log.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/objArrayOop.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "oops/verifyOopClosure.hpp"
  55 #include "runtime/arguments.hpp"
  56 #include "runtime/atomic.hpp"
  57 #include "runtime/deoptimization.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/icache.hpp"
  61 #include "runtime/init.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/mutexLocker.hpp"
  64 #include "runtime/os.inline.hpp"
  65 #include "runtime/safepointVerifiers.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "sanitizers/leak.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/vmError.hpp"
  71 #include "utilities/xmlstream.hpp"
  72 #ifdef COMPILER1
  73 #include "c1/c1_Compilation.hpp"
  74 #include "c1/c1_Compiler.hpp"
  75 #endif
  76 #ifdef COMPILER2
  77 #include "opto/c2compiler.hpp"
  78 #include "opto/compile.hpp"
  79 #include "opto/node.hpp"
  80 #endif
  81 
  82 // Helper class for printing in CodeCache
  83 class CodeBlob_sizes {
  84  private:
  85   int count;
  86   int total_size;
  87   int header_size;
  88   int code_size;
  89   int stub_size;
  90   int relocation_size;
  91   int scopes_oop_size;
  92   int scopes_metadata_size;
  93   int scopes_data_size;
  94   int scopes_pcs_size;
  95 
  96  public:
  97   CodeBlob_sizes() {
  98     count            = 0;
  99     total_size       = 0;
 100     header_size      = 0;
 101     code_size        = 0;
 102     stub_size        = 0;
 103     relocation_size  = 0;
 104     scopes_oop_size  = 0;
 105     scopes_metadata_size  = 0;
 106     scopes_data_size = 0;
 107     scopes_pcs_size  = 0;
 108   }
 109 
 110   int total() const                              { return total_size; }
 111   bool is_empty() const                          { return count == 0; }
 112 
 113   void print(const char* title) const {
 114     if (is_empty()) {
 115       tty->print_cr(" #%d %s = %dK",
 116                     count,
 117                     title,
 118                     total()                 / (int)K);
 119     } else {
 120       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 121                     count,
 122                     title,
 123                     total()                 / (int)K,
 124                     header_size             / (int)K,
 125                     header_size             * 100 / total_size,
 126                     relocation_size         / (int)K,
 127                     relocation_size         * 100 / total_size,
 128                     code_size               / (int)K,
 129                     code_size               * 100 / total_size,
 130                     stub_size               / (int)K,
 131                     stub_size               * 100 / total_size,
 132                     scopes_oop_size         / (int)K,
 133                     scopes_oop_size         * 100 / total_size,
 134                     scopes_metadata_size    / (int)K,
 135                     scopes_metadata_size    * 100 / total_size,
 136                     scopes_data_size        / (int)K,
 137                     scopes_data_size        * 100 / total_size,
 138                     scopes_pcs_size         / (int)K,
 139                     scopes_pcs_size         * 100 / total_size);
 140     }
 141   }
 142 
 143   void add(CodeBlob* cb) {
 144     count++;
 145     total_size       += cb->size();
 146     header_size      += cb->header_size();
 147     relocation_size  += cb->relocation_size();
 148     if (cb->is_nmethod()) {
 149       nmethod* nm = cb->as_nmethod_or_null();
 150       code_size        += nm->insts_size();
 151       stub_size        += nm->stub_size();
 152 
 153       scopes_oop_size  += nm->oops_size();
 154       scopes_metadata_size  += nm->metadata_size();
 155       scopes_data_size += nm->scopes_data_size();
 156       scopes_pcs_size  += nm->scopes_pcs_size();
 157     } else {
 158       code_size        += cb->code_size();
 159     }
 160   }
 161 };
 162 
 163 // Iterate over all CodeHeaps
 164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 165 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 166 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 167 
 168 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 169 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 170 
 171 address CodeCache::_low_bound = 0;
 172 address CodeCache::_high_bound = 0;
 173 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 174 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 175 
 176 static ReservedSpace _cds_code_space;
 177 
 178 // Initialize arrays of CodeHeap subsets
 179 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 181 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 182 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 183 
 184 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 185   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 186   // Prepare error message
 187   const char* error = "Invalid code heap sizes";
 188   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 189                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 190           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 191 
 192   if (total_size > cache_size) {
 193     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 194     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 195     vm_exit_during_initialization(error, message);
 196   } else if (all_set && total_size != cache_size) {
 197     // All code heap sizes were explicitly set: total_size must equal cache_size
 198     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 199     vm_exit_during_initialization(error, message);
 200   }
 201 }
 202 
 203 void CodeCache::initialize_heaps() {
 204   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 205   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 206   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 207   const size_t ps           = page_size(false, 8);
 208   const size_t min_size     = MAX2(os::vm_allocation_granularity(), ps);
 209   size_t cache_size         = ReservedCodeCacheSize;
 210   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 211   size_t profiled_size      = ProfiledCodeHeapSize;
 212   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 213   // Check if total size set via command line flags exceeds the reserved size
 214   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 215                    (profiled_set     ? profiled_size     : min_size),
 216                    (non_profiled_set ? non_profiled_size : min_size),
 217                    cache_size,
 218                    non_nmethod_set && profiled_set && non_profiled_set);
 219 
 220   // Determine size of compiler buffers
 221   size_t code_buffers_size = 0;
 222 #ifdef COMPILER1
 223   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 224   const int c1_count = CompilationPolicy::c1_count();
 225   code_buffers_size += c1_count * Compiler::code_buffer_size();
 226 #endif
 227 #ifdef COMPILER2
 228   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 229   const int c2_count = CompilationPolicy::c2_count() + CompilationPolicy::c3_count();
 230   // Initial size of constant table (this may be increased if a compiled method needs more space)
 231   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 232 #endif
 233 
 234   // Increase default non_nmethod_size to account for compiler buffers
 235   if (!non_nmethod_set) {
 236     non_nmethod_size += code_buffers_size;
 237   }
 238   // Calculate default CodeHeap sizes if not set by user
 239   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 240     // Leave room for the other two parts of the code cache
 241     const size_t max_non_nmethod_size = cache_size - 2 * min_size;
 242     // Check if we have enough space for the non-nmethod code heap
 243     if (max_non_nmethod_size >= non_nmethod_size) {
 244       // Use the default value for non_nmethod_size and one half of the
 245       // remaining size for non-profiled and one half for profiled methods
 246       size_t remaining_size = cache_size - non_nmethod_size;
 247       profiled_size = remaining_size / 2;
 248       non_profiled_size = remaining_size - profiled_size;
 249     } else {
 250       // Use all space for the non-nmethod heap and set other heaps to minimal size
 251       non_nmethod_size = max_non_nmethod_size;
 252       profiled_size = min_size;
 253       non_profiled_size = min_size;
 254     }
 255   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
 256     // The user explicitly set some code heap sizes. Increase or decrease the (default)
 257     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
 258     // code heap sizes and then only change non-nmethod code heap size if still necessary.
 259     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
 260     if (non_profiled_set) {
 261       if (!profiled_set) {
 262         // Adapt size of profiled code heap
 263         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
 264           // Not enough space available, set to minimum size
 265           diff_size += profiled_size - min_size;
 266           profiled_size = min_size;
 267         } else {
 268           profiled_size += diff_size;
 269           diff_size = 0;
 270         }
 271       }
 272     } else if (profiled_set) {
 273       // Adapt size of non-profiled code heap
 274       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
 275         // Not enough space available, set to minimum size
 276         diff_size += non_profiled_size - min_size;
 277         non_profiled_size = min_size;
 278       } else {
 279         non_profiled_size += diff_size;
 280         diff_size = 0;
 281       }
 282     } else if (non_nmethod_set) {
 283       // Distribute remaining size between profiled and non-profiled code heaps
 284       diff_size = cache_size - non_nmethod_size;
 285       profiled_size = diff_size / 2;
 286       non_profiled_size = diff_size - profiled_size;
 287       diff_size = 0;
 288     }
 289     if (diff_size != 0) {
 290       // Use non-nmethod code heap for remaining space requirements
 291       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
 292       non_nmethod_size += diff_size;
 293     }
 294   }
 295 
 296   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 297   if (!heap_available(CodeBlobType::MethodProfiled)) {
 298     non_profiled_size += profiled_size;
 299     profiled_size = 0;
 300   }
 301   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 302   if (!heap_available(CodeBlobType::MethodNonProfiled)) {
 303     non_nmethod_size += non_profiled_size;
 304     non_profiled_size = 0;
 305   }
 306   // Make sure we have enough space for VM internal code
 307   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 308   if (non_nmethod_size < min_code_cache_size) {
 309     vm_exit_during_initialization(err_msg(
 310         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 311         non_nmethod_size/K, min_code_cache_size/K));
 312   }
 313 
 314   // Verify sizes and update flag values
 315   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 316   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 317   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 318   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 319 
 320   // Print warning if using large pages but not able to use the size given
 321   if (UseLargePages) {
 322     const size_t lg_ps = page_size(false, 1);
 323     if (ps < lg_ps) {
 324       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 325                              "Reverting to smaller page size (" PROPERFMT ").",
 326                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 327     }
 328   }
 329 
 330   // Note: if large page support is enabled, min_size is at least the large
 331   // page size. This ensures that the code cache is covered by large pages.
 332   non_nmethod_size = align_up(non_nmethod_size, min_size);
 333   profiled_size    = align_down(profiled_size, min_size);
 334   non_profiled_size = align_down(non_profiled_size, min_size);
 335 
 336   const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
 337   cache_size += cds_code_size;
 338 
 339   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 340   // parts for the individual heaps. The memory layout looks like this:
 341   // ---------- high -----------
 342   //    Non-profiled nmethods
 343   //         Non-nmethods
 344   //      Profiled nmethods
 345   // ---------- low ------------
 346   ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
 347   _cds_code_space                   = rs.first_part(cds_code_size);
 348   ReservedSpace rest                = rs.last_part(cds_code_size);
 349   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 350   ReservedSpace rest2               = rest.last_part(profiled_size);
 351   ReservedSpace non_method_space    = rest2.first_part(non_nmethod_size);
 352   ReservedSpace non_profiled_space  = rest2.last_part(non_nmethod_size);
 353 
 354   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 355   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 356 
 357   // Non-nmethods (stubs, adapters, ...)
 358   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 359   // Tier 2 and tier 3 (profiled) methods
 360   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 361   // Tier 1 and tier 4 (non-profiled) methods and native methods
 362   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 363 }
 364 
 365 void* CodeCache::map_cached_code() {
 366   if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
 367     return _cds_code_space.base();
 368   } else {
 369     return nullptr;
 370   }
 371 }
 372 
 373 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 374   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 375                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 376 }
 377 
 378 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 379   // Align and reserve space for code cache
 380   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 381   const size_t rs_size = align_up(size, rs_align);
 382   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 383   if (!rs.is_reserved()) {
 384     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 385                                           rs_size/K));
 386   }
 387 
 388   // Initialize bounds
 389   _low_bound = (address)rs.base();
 390   _high_bound = _low_bound + rs.size();
 391   return rs;
 392 }
 393 
 394 // Heaps available for allocation
 395 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
 396   if (!SegmentedCodeCache) {
 397     // No segmentation: use a single code heap
 398     return (code_blob_type == CodeBlobType::All);
 399   } else if (CompilerConfig::is_interpreter_only()) {
 400     // Interpreter only: we don't need any method code heaps
 401     return (code_blob_type == CodeBlobType::NonNMethod);
 402   } else if (CompilerConfig::is_c1_profiling()) {
 403     // Tiered compilation: use all code heaps
 404     return (code_blob_type < CodeBlobType::All);
 405   } else {
 406     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 407     return (code_blob_type == CodeBlobType::NonNMethod) ||
 408            (code_blob_type == CodeBlobType::MethodNonProfiled);
 409   }
 410 }
 411 
 412 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
 413   switch(code_blob_type) {
 414   case CodeBlobType::NonNMethod:
 415     return "NonNMethodCodeHeapSize";
 416     break;
 417   case CodeBlobType::MethodNonProfiled:
 418     return "NonProfiledCodeHeapSize";
 419     break;
 420   case CodeBlobType::MethodProfiled:
 421     return "ProfiledCodeHeapSize";
 422     break;
 423   default:
 424     ShouldNotReachHere();
 425     return nullptr;
 426   }
 427 }
 428 
 429 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 430   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 431     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 432   } else {
 433     return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
 434   }
 435 }
 436 
 437 void CodeCache::add_heap(CodeHeap* heap) {
 438   assert(!Universe::is_fully_initialized(), "late heap addition?");
 439 
 440   _heaps->insert_sorted<code_heap_compare>(heap);
 441 
 442   CodeBlobType type = heap->code_blob_type();
 443   if (code_blob_type_accepts_compiled(type)) {
 444     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
 445   }
 446   if (code_blob_type_accepts_nmethod(type)) {
 447     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 448   }
 449   if (code_blob_type_accepts_allocable(type)) {
 450     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 451   }
 452 }
 453 
 454 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
 455   // Check if heap is needed
 456   if (!heap_available(code_blob_type)) {
 457     return;
 458   }
 459 
 460   // Create CodeHeap
 461   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 462   add_heap(heap);
 463 
 464   // Reserve Space
 465   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 466   size_initial = align_up(size_initial, os::vm_page_size());
 467   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 468     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 469                                           heap->name(), size_initial/K));
 470   }
 471 
 472   // Register the CodeHeap
 473   MemoryService::add_code_heap_memory_pool(heap, name);
 474 }
 475 
 476 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 477   FOR_ALL_HEAPS(heap) {
 478     if ((*heap)->contains(start)) {
 479       return *heap;
 480     }
 481   }
 482   return nullptr;
 483 }
 484 
 485 CodeHeap* CodeCache::get_code_heap(const void* cb) {
 486   assert(cb != nullptr, "CodeBlob is null");
 487   FOR_ALL_HEAPS(heap) {
 488     if ((*heap)->contains(cb)) {
 489       return *heap;
 490     }
 491   }
 492   ShouldNotReachHere();
 493   return nullptr;
 494 }
 495 
 496 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
 497   FOR_ALL_HEAPS(heap) {
 498     if ((*heap)->accepts(code_blob_type)) {
 499       return *heap;
 500     }
 501   }
 502   return nullptr;
 503 }
 504 
 505 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 506   assert_locked_or_safepoint(CodeCache_lock);
 507   assert(heap != nullptr, "heap is null");
 508   return (CodeBlob*)heap->first();
 509 }
 510 
 511 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
 512   if (heap_available(code_blob_type)) {
 513     return first_blob(get_code_heap(code_blob_type));
 514   } else {
 515     return nullptr;
 516   }
 517 }
 518 
 519 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 520   assert_locked_or_safepoint(CodeCache_lock);
 521   assert(heap != nullptr, "heap is null");
 522   return (CodeBlob*)heap->next(cb);
 523 }
 524 
 525 /**
 526  * Do not seize the CodeCache lock here--if the caller has not
 527  * already done so, we are going to lose bigtime, since the code
 528  * cache will contain a garbage CodeBlob until the caller can
 529  * run the constructor for the CodeBlob subclass he is busy
 530  * instantiating.
 531  */
 532 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
 533   assert_locked_or_safepoint(CodeCache_lock);
 534   assert(size > 0, "Code cache allocation request must be > 0");
 535   if (size == 0) {
 536     return nullptr;
 537   }
 538   CodeBlob* cb = nullptr;
 539 
 540   // Get CodeHeap for the given CodeBlobType
 541   CodeHeap* heap = get_code_heap(code_blob_type);
 542   assert(heap != nullptr, "heap is null");
 543 
 544   while (true) {
 545     cb = (CodeBlob*)heap->allocate(size);
 546     if (cb != nullptr) break;
 547     if (!heap->expand_by(CodeCacheExpansionSize)) {
 548       // Save original type for error reporting
 549       if (orig_code_blob_type == CodeBlobType::All) {
 550         orig_code_blob_type = code_blob_type;
 551       }
 552       // Expansion failed
 553       if (SegmentedCodeCache) {
 554         // Fallback solution: Try to store code in another code heap.
 555         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 556         CodeBlobType type = code_blob_type;
 557         switch (type) {
 558         case CodeBlobType::NonNMethod:
 559           type = CodeBlobType::MethodNonProfiled;
 560           break;
 561         case CodeBlobType::MethodNonProfiled:
 562           type = CodeBlobType::MethodProfiled;
 563           break;
 564         case CodeBlobType::MethodProfiled:
 565           // Avoid loop if we already tried that code heap
 566           if (type == orig_code_blob_type) {
 567             type = CodeBlobType::MethodNonProfiled;
 568           }
 569           break;
 570         default:
 571           break;
 572         }
 573         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 574           if (PrintCodeCacheExtension) {
 575             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 576                           heap->name(), get_code_heap(type)->name());
 577           }
 578           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 579         }
 580       }
 581       if (handle_alloc_failure) {
 582         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 583         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 584       }
 585       return nullptr;
 586     } else {
 587       OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
 588     }
 589     if (PrintCodeCacheExtension) {
 590       ResourceMark rm;
 591       if (_nmethod_heaps->length() >= 1) {
 592         tty->print("%s", heap->name());
 593       } else {
 594         tty->print("CodeCache");
 595       }
 596       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 597                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 598                     (address)heap->high() - (address)heap->low_boundary());
 599     }
 600   }
 601   print_trace("allocation", cb, size);
 602   return cb;
 603 }
 604 
 605 void CodeCache::free(CodeBlob* cb) {
 606   assert_locked_or_safepoint(CodeCache_lock);
 607   CodeHeap* heap = get_code_heap(cb);
 608   print_trace("free", cb);
 609   if (cb->is_nmethod()) {
 610     heap->set_nmethod_count(heap->nmethod_count() - 1);
 611     if (((nmethod *)cb)->has_dependencies()) {
 612       Atomic::dec(&_number_of_nmethods_with_dependencies);
 613     }
 614   }
 615   if (cb->is_adapter_blob()) {
 616     heap->set_adapter_count(heap->adapter_count() - 1);
 617   }
 618 
 619   cb->~CodeBlob();
 620   // Get heap for given CodeBlob and deallocate
 621   heap->deallocate(cb);
 622 
 623   assert(heap->blob_count() >= 0, "sanity check");
 624 }
 625 
 626 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 627   assert_locked_or_safepoint(CodeCache_lock);
 628   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 629   print_trace("free_unused_tail", cb);
 630 
 631   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 632   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 633   used += CodeBlob::align_code_offset(cb->header_size());
 634 
 635   // Get heap for given CodeBlob and deallocate its unused tail
 636   get_code_heap(cb)->deallocate_tail(cb, used);
 637   // Adjust the sizes of the CodeBlob
 638   cb->adjust_size(used);
 639 }
 640 
 641 void CodeCache::commit(CodeBlob* cb) {
 642   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 643   assert_locked_or_safepoint(CodeCache_lock);
 644   CodeHeap* heap = get_code_heap(cb);
 645   if (cb->is_nmethod()) {
 646     heap->set_nmethod_count(heap->nmethod_count() + 1);
 647     if (((nmethod *)cb)->has_dependencies()) {
 648       Atomic::inc(&_number_of_nmethods_with_dependencies);
 649     }
 650   }
 651   if (cb->is_adapter_blob()) {
 652     heap->set_adapter_count(heap->adapter_count() + 1);
 653   }
 654 }
 655 
 656 bool CodeCache::contains(void *p) {
 657   // S390 uses contains() in current_frame(), which is used before
 658   // code cache initialization if NativeMemoryTracking=detail is set.
 659   S390_ONLY(if (_heaps == nullptr) return false;)
 660   // It should be ok to call contains without holding a lock.
 661   FOR_ALL_HEAPS(heap) {
 662     if ((*heap)->contains(p)) {
 663       return true;
 664     }
 665   }
 666   return false;
 667 }
 668 
 669 bool CodeCache::contains(nmethod *nm) {
 670   return contains((void *)nm);
 671 }
 672 
 673 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
 674 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 675 CodeBlob* CodeCache::find_blob(void* start) {
 676   // NMT can walk the stack before code cache is created
 677   if (_heaps != nullptr) {
 678     CodeHeap* heap = get_code_heap_containing(start);
 679     if (heap != nullptr) {
 680       return heap->find_blob(start);
 681     }
 682   }
 683   return nullptr;
 684 }
 685 
 686 nmethod* CodeCache::find_nmethod(void* start) {
 687   CodeBlob* cb = find_blob(start);
 688   assert(cb->is_nmethod(), "did not find an nmethod");
 689   return (nmethod*)cb;
 690 }
 691 
 692 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 693   assert_locked_or_safepoint(CodeCache_lock);
 694   FOR_ALL_HEAPS(heap) {
 695     FOR_ALL_BLOBS(cb, *heap) {
 696       f(cb);
 697     }
 698   }
 699 }
 700 
 701 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 702   assert_locked_or_safepoint(CodeCache_lock);
 703   NMethodIterator iter(NMethodIterator::all_blobs);
 704   while(iter.next()) {
 705     f(iter.method());
 706   }
 707 }
 708 
 709 void CodeCache::metadata_do(MetadataClosure* f) {
 710   assert_locked_or_safepoint(CodeCache_lock);
 711   NMethodIterator iter(NMethodIterator::all_blobs);
 712   while(iter.next()) {
 713     iter.method()->metadata_do(f);
 714   }
 715 }
 716 
 717 // Calculate the number of GCs after which an nmethod is expected to have been
 718 // used in order to not be classed as cold.
 719 void CodeCache::update_cold_gc_count() {
 720   if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
 721     // No aging
 722     return;
 723   }
 724 
 725   size_t last_used = _last_unloading_used;
 726   double last_time = _last_unloading_time;
 727 
 728   double time = os::elapsedTime();
 729 
 730   size_t free = unallocated_capacity();
 731   size_t max = max_capacity();
 732   size_t used = max - free;
 733   double gc_interval = time - last_time;
 734 
 735   _unloading_threshold_gc_requested = false;
 736   _last_unloading_time = time;
 737   _last_unloading_used = used;
 738 
 739   if (last_time == 0.0) {
 740     // The first GC doesn't have enough information to make good
 741     // decisions, so just keep everything afloat
 742     log_info(codecache)("Unknown code cache pressure; don't age code");
 743     return;
 744   }
 745 
 746   if (gc_interval <= 0.0 || last_used >= used) {
 747     // Dodge corner cases where there is no pressure or negative pressure
 748     // on the code cache. Just don't unload when this happens.
 749     _cold_gc_count = INT_MAX;
 750     log_info(codecache)("No code cache pressure; don't age code");
 751     return;
 752   }
 753 
 754   double allocation_rate = (used - last_used) / gc_interval;
 755 
 756   _unloading_allocation_rates.add(allocation_rate);
 757   _unloading_gc_intervals.add(gc_interval);
 758 
 759   size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
 760   if (free < aggressive_sweeping_free_threshold) {
 761     // We are already in the red zone; be very aggressive to avoid disaster
 762     // But not more aggressive than 2. This ensures that an nmethod must
 763     // have been unused at least between two GCs to be considered cold still.
 764     _cold_gc_count = 2;
 765     log_info(codecache)("Code cache critically low; use aggressive aging");
 766     return;
 767   }
 768 
 769   // The code cache has an expected time for cold nmethods to "time out"
 770   // when they have not been used. The time for nmethods to time out
 771   // depends on how long we expect we can keep allocating code until
 772   // aggressive sweeping starts, based on sampled allocation rates.
 773   double average_gc_interval = _unloading_gc_intervals.avg();
 774   double average_allocation_rate = _unloading_allocation_rates.avg();
 775   double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
 776   double cold_timeout = time_to_aggressive / NmethodSweepActivity;
 777 
 778   // Convert time to GC cycles, and crop at INT_MAX. The reason for
 779   // that is that the _cold_gc_count will be added to an epoch number
 780   // and that addition must not overflow, or we can crash the VM.
 781   // But not more aggressive than 2. This ensures that an nmethod must
 782   // have been unused at least between two GCs to be considered cold still.
 783   _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
 784 
 785   double used_ratio = double(used) / double(max);
 786   double last_used_ratio = double(last_used) / double(max);
 787   log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
 788                       ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
 789                       average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
 790                       double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
 791 
 792 }
 793 
 794 uint64_t CodeCache::cold_gc_count() {
 795   return _cold_gc_count;
 796 }
 797 
 798 void CodeCache::gc_on_allocation() {
 799   if (!is_init_completed()) {
 800     // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
 801     return;
 802   }
 803 
 804   size_t free = unallocated_capacity();
 805   size_t max = max_capacity();
 806   size_t used = max - free;
 807   double free_ratio = double(free) / double(max);
 808   if (free_ratio <= StartAggressiveSweepingAt / 100.0)  {
 809     // In case the GC is concurrent, we make sure only one thread requests the GC.
 810     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 811       log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
 812       Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
 813     }
 814     return;
 815   }
 816 
 817   size_t last_used = _last_unloading_used;
 818   if (last_used >= used) {
 819     // No increase since last GC; no need to sweep yet
 820     return;
 821   }
 822   size_t allocated_since_last = used - last_used;
 823   double allocated_since_last_ratio = double(allocated_since_last) / double(max);
 824   double threshold = SweeperThreshold / 100.0;
 825   double used_ratio = double(used) / double(max);
 826   double last_used_ratio = double(last_used) / double(max);
 827   if (used_ratio > threshold) {
 828     // After threshold is reached, scale it by free_ratio so that more aggressive
 829     // GC is triggered as we approach code cache exhaustion
 830     threshold *= free_ratio;
 831   }
 832   // If code cache has been allocated without any GC at all, let's make sure
 833   // it is eventually invoked to avoid trouble.
 834   if (allocated_since_last_ratio > threshold) {
 835     // In case the GC is concurrent, we make sure only one thread requests the GC.
 836     if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
 837       log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
 838                           threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
 839       Universe::heap()->collect(GCCause::_codecache_GC_threshold);
 840     }
 841   }
 842 }
 843 
 844 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
 845 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
 846 //
 847 // Odd values mean that marking is in progress, and even values mean that no
 848 // marking is currently active.
 849 uint64_t CodeCache::_gc_epoch = 2;
 850 
 851 // How many GCs after an nmethod has not been used, do we consider it cold?
 852 uint64_t CodeCache::_cold_gc_count = INT_MAX;
 853 
 854 double CodeCache::_last_unloading_time = 0.0;
 855 size_t CodeCache::_last_unloading_used = 0;
 856 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
 857 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
 858 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
 859 
 860 uint64_t CodeCache::gc_epoch() {
 861   return _gc_epoch;
 862 }
 863 
 864 bool CodeCache::is_gc_marking_cycle_active() {
 865   // Odd means that marking is active
 866   return (_gc_epoch % 2) == 1;
 867 }
 868 
 869 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
 870   if (is_gc_marking_cycle_active()) {
 871     return _gc_epoch - 2;
 872   } else {
 873     return _gc_epoch - 1;
 874   }
 875 }
 876 
 877 void CodeCache::on_gc_marking_cycle_start() {
 878   assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
 879   ++_gc_epoch;
 880 }
 881 
 882 // Once started the code cache marking cycle must only be finished after marking of
 883 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
 884 // if they have frames in continuation StackChunks that were not yet visited.
 885 void CodeCache::on_gc_marking_cycle_finish() {
 886   assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
 887   ++_gc_epoch;
 888   update_cold_gc_count();
 889 }
 890 
 891 void CodeCache::arm_all_nmethods() {
 892   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 893   if (bs_nm != nullptr) {
 894     bs_nm->arm_all_nmethods();
 895   }
 896 }
 897 
 898 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 899 void CodeCache::do_unloading(bool unloading_occurred) {
 900   assert_locked_or_safepoint(CodeCache_lock);
 901   CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
 902   while(iter.next()) {
 903     iter.method()->do_unloading(unloading_occurred);
 904   }
 905 }
 906 
 907 void CodeCache::blobs_do(CodeBlobClosure* f) {
 908   assert_locked_or_safepoint(CodeCache_lock);
 909   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 910     FOR_ALL_BLOBS(cb, *heap) {
 911       f->do_code_blob(cb);
 912 #ifdef ASSERT
 913       if (cb->is_nmethod()) {
 914         Universe::heap()->verify_nmethod((nmethod*)cb);
 915       }
 916 #endif //ASSERT
 917     }
 918   }
 919 }
 920 
 921 void CodeCache::verify_clean_inline_caches() {
 922 #ifdef ASSERT
 923   NMethodIterator iter(NMethodIterator::only_not_unloading);
 924   while(iter.next()) {
 925     nmethod* nm = iter.method();
 926     nm->verify_clean_inline_caches();
 927     nm->verify();
 928   }
 929 #endif
 930 }
 931 
 932 // Defer freeing of concurrently cleaned ExceptionCache entries until
 933 // after a global handshake operation.
 934 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 935   if (SafepointSynchronize::is_at_safepoint()) {
 936     delete entry;
 937   } else {
 938     for (;;) {
 939       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 940       entry->set_purge_list_next(purge_list_head);
 941       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 942         break;
 943       }
 944     }
 945   }
 946 }
 947 
 948 // Delete exception caches that have been concurrently unlinked,
 949 // followed by a global handshake operation.
 950 void CodeCache::purge_exception_caches() {
 951   ExceptionCache* curr = _exception_cache_purge_list;
 952   while (curr != nullptr) {
 953     ExceptionCache* next = curr->purge_list_next();
 954     delete curr;
 955     curr = next;
 956   }
 957   _exception_cache_purge_list = nullptr;
 958 }
 959 
 960 // Restart compiler if possible and required..
 961 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
 962 
 963   // Try to start the compiler again if we freed any memory
 964   if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
 965     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 966     log_info(codecache)("Restarting compiler");
 967     EventJITRestart event;
 968     event.set_freedMemory(freed_memory);
 969     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
 970     event.commit();
 971   }
 972 }
 973 
 974 uint8_t CodeCache::_unloading_cycle = 1;
 975 
 976 void CodeCache::increment_unloading_cycle() {
 977   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 978   // 0 is reserved for new methods.
 979   _unloading_cycle = (_unloading_cycle + 1) % 4;
 980   if (_unloading_cycle == 0) {
 981     _unloading_cycle = 1;
 982   }
 983 }
 984 
 985 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
 986   : _is_unloading_behaviour(is_alive)
 987 {
 988   _saved_behaviour = IsUnloadingBehaviour::current();
 989   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 990   increment_unloading_cycle();
 991   DependencyContext::cleaning_start();
 992 }
 993 
 994 CodeCache::UnlinkingScope::~UnlinkingScope() {
 995   IsUnloadingBehaviour::set_current(_saved_behaviour);
 996   DependencyContext::cleaning_end();
 997 }
 998 
 999 void CodeCache::verify_oops() {
1000   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1001   VerifyOopClosure voc;
1002   NMethodIterator iter(NMethodIterator::only_not_unloading);
1003   while(iter.next()) {
1004     nmethod* nm = iter.method();
1005     nm->oops_do(&voc);
1006     nm->verify_oop_relocations();
1007   }
1008 }
1009 
1010 int CodeCache::blob_count(CodeBlobType code_blob_type) {
1011   CodeHeap* heap = get_code_heap(code_blob_type);
1012   return (heap != nullptr) ? heap->blob_count() : 0;
1013 }
1014 
1015 int CodeCache::blob_count() {
1016   int count = 0;
1017   FOR_ALL_HEAPS(heap) {
1018     count += (*heap)->blob_count();
1019   }
1020   return count;
1021 }
1022 
1023 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
1024   CodeHeap* heap = get_code_heap(code_blob_type);
1025   return (heap != nullptr) ? heap->nmethod_count() : 0;
1026 }
1027 
1028 int CodeCache::nmethod_count() {
1029   int count = 0;
1030   FOR_ALL_NMETHOD_HEAPS(heap) {
1031     count += (*heap)->nmethod_count();
1032   }
1033   return count;
1034 }
1035 
1036 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1037   CodeHeap* heap = get_code_heap(code_blob_type);
1038   return (heap != nullptr) ? heap->adapter_count() : 0;
1039 }
1040 
1041 int CodeCache::adapter_count() {
1042   int count = 0;
1043   FOR_ALL_HEAPS(heap) {
1044     count += (*heap)->adapter_count();
1045   }
1046   return count;
1047 }
1048 
1049 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1050   CodeHeap* heap = get_code_heap(code_blob_type);
1051   return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1052 }
1053 
1054 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1055   CodeHeap* heap = get_code_heap(code_blob_type);
1056   return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1057 }
1058 
1059 size_t CodeCache::capacity() {
1060   size_t cap = 0;
1061   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1062     cap += (*heap)->capacity();
1063   }
1064   return cap;
1065 }
1066 
1067 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1068   CodeHeap* heap = get_code_heap(code_blob_type);
1069   return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1070 }
1071 
1072 size_t CodeCache::unallocated_capacity() {
1073   size_t unallocated_cap = 0;
1074   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1075     unallocated_cap += (*heap)->unallocated_capacity();
1076   }
1077   return unallocated_cap;
1078 }
1079 
1080 size_t CodeCache::max_capacity() {
1081   size_t max_cap = 0;
1082   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1083     max_cap += (*heap)->max_capacity();
1084   }
1085   return max_cap;
1086 }
1087 
1088 bool CodeCache::is_non_nmethod(address addr) {
1089   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1090   return blob->contains(addr);
1091 }
1092 
1093 size_t CodeCache::max_distance_to_non_nmethod() {
1094   if (!SegmentedCodeCache) {
1095     return ReservedCodeCacheSize;
1096   } else {
1097     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1098     // the max distance is minimized by placing the NonNMethod segment
1099     // in between MethodProfiled and MethodNonProfiled segments
1100     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1101     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1102     return dist1 > dist2 ? dist1 : dist2;
1103   }
1104 }
1105 
1106 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1107 // is free, reverse_free_ratio() returns 4.
1108 // Since code heap for each type of code blobs falls forward to the next
1109 // type of code heap, return the reverse free ratio for the entire
1110 // code cache.
1111 double CodeCache::reverse_free_ratio() {
1112   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1113   double max = (double)max_capacity();
1114   double result = max / unallocated;
1115   assert (max >= unallocated, "Must be");
1116   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1117   return result;
1118 }
1119 
1120 size_t CodeCache::bytes_allocated_in_freelists() {
1121   size_t allocated_bytes = 0;
1122   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1123     allocated_bytes += (*heap)->allocated_in_freelist();
1124   }
1125   return allocated_bytes;
1126 }
1127 
1128 int CodeCache::allocated_segments() {
1129   int number_of_segments = 0;
1130   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1131     number_of_segments += (*heap)->allocated_segments();
1132   }
1133   return number_of_segments;
1134 }
1135 
1136 size_t CodeCache::freelists_length() {
1137   size_t length = 0;
1138   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1139     length += (*heap)->freelist_length();
1140   }
1141   return length;
1142 }
1143 
1144 void icache_init();
1145 
1146 void CodeCache::initialize() {
1147   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1148 #ifdef COMPILER2
1149   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1150 #endif
1151   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1152   // This was originally just a check of the alignment, causing failure, instead, round
1153   // the code cache to the page size.  In particular, Solaris is moving to a larger
1154   // default page size.
1155   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1156 
1157   if (SegmentedCodeCache) {
1158     // Use multiple code heaps
1159     initialize_heaps();
1160   } else {
1161     // Use a single code heap
1162     FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1163     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1164     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1165 
1166     // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1167     // users want to use the largest available page.
1168     const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1169     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1170     // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1171     LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1172     add_heap(rs, "CodeCache", CodeBlobType::All);
1173   }
1174 
1175   // Initialize ICache flush mechanism
1176   // This service is needed for os::register_code_area
1177   icache_init();
1178 
1179   // Give OS a chance to register generated code area.
1180   // This is used on Windows 64 bit platforms to register
1181   // Structured Exception Handlers for our generated code.
1182   os::register_code_area((char*)low_bound(), (char*)high_bound());
1183 }
1184 
1185 void codeCache_init() {
1186   CodeCache::initialize();
1187 }
1188 
1189 //------------------------------------------------------------------------------------------------
1190 
1191 bool CodeCache::has_nmethods_with_dependencies() {
1192   return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1193 }
1194 
1195 void CodeCache::clear_inline_caches() {
1196   assert_locked_or_safepoint(CodeCache_lock);
1197   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1198   while(iter.next()) {
1199     iter.method()->clear_inline_caches();
1200   }
1201 }
1202 
1203 // Only used by whitebox API
1204 void CodeCache::cleanup_inline_caches_whitebox() {
1205   assert_locked_or_safepoint(CodeCache_lock);
1206   NMethodIterator iter(NMethodIterator::only_not_unloading);
1207   while(iter.next()) {
1208     iter.method()->cleanup_inline_caches_whitebox();
1209   }
1210 }
1211 
1212 // Keeps track of time spent for checking dependencies
1213 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1214 
1215 #ifndef PRODUCT
1216 // Check if any of live methods dependencies have been invalidated.
1217 // (this is expensive!)
1218 static void check_live_nmethods_dependencies(DepChange& changes) {
1219   // Checked dependencies are allocated into this ResourceMark
1220   ResourceMark rm;
1221 
1222   // Turn off dependency tracing while actually testing dependencies.
1223   FlagSetting fs(Dependencies::_verify_in_progress, true);
1224 
1225   typedef ResourceHashtable<DependencySignature, int, 11027,
1226                             AnyObj::RESOURCE_AREA, mtInternal,
1227                             &DependencySignature::hash,
1228                             &DependencySignature::equals> DepTable;
1229 
1230   DepTable* table = new DepTable();
1231 
1232   // Iterate over live nmethods and check dependencies of all nmethods that are not
1233   // marked for deoptimization. A particular dependency is only checked once.
1234   NMethodIterator iter(NMethodIterator::only_not_unloading);
1235   while(iter.next()) {
1236     nmethod* nm = iter.method();
1237     // Only notify for live nmethods
1238     if (!nm->is_marked_for_deoptimization()) {
1239       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1240         // Construct abstraction of a dependency.
1241         DependencySignature* current_sig = new DependencySignature(deps);
1242 
1243         // Determine if dependency is already checked. table->put(...) returns
1244         // 'true' if the dependency is added (i.e., was not in the hashtable).
1245         if (table->put(*current_sig, 1)) {
1246           Klass* witness = deps.check_dependency();
1247           if (witness != nullptr) {
1248             // Dependency checking failed. Print out information about the failed
1249             // dependency and finally fail with an assert. We can fail here, since
1250             // dependency checking is never done in a product build.
1251             deps.print_dependency(tty, witness, true);
1252             changes.print();
1253             nm->print();
1254             nm->print_dependencies_on(tty);
1255             assert(false, "Should have been marked for deoptimization");
1256           }
1257         }
1258       }
1259     }
1260   }
1261 }
1262 #endif
1263 
1264 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1265   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1266 
1267   // search the hierarchy looking for nmethods which are affected by the loading of this class
1268 
1269   // then search the interfaces this class implements looking for nmethods
1270   // which might be dependent of the fact that an interface only had one
1271   // implementor.
1272   // nmethod::check_all_dependencies works only correctly, if no safepoint
1273   // can happen
1274   NoSafepointVerifier nsv;
1275   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1276     InstanceKlass* d = str.klass();
1277     {
1278       LogStreamHandle(Trace, dependencies) log;
1279       if (log.is_enabled()) {
1280         log.print("Processing context ");
1281         d->name()->print_value_on(&log);
1282       }
1283     }
1284     d->mark_dependent_nmethods(deopt_scope, changes);
1285   }
1286 
1287 #ifndef PRODUCT
1288   if (VerifyDependencies) {
1289     // Object pointers are used as unique identifiers for dependency arguments. This
1290     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1291     dependentCheckTime.start();
1292     check_live_nmethods_dependencies(changes);
1293     dependentCheckTime.stop();
1294   }
1295 #endif
1296 }
1297 
1298 CompiledMethod* CodeCache::find_compiled(void* start) {
1299   CodeBlob *cb = find_blob(start);
1300   assert(cb == nullptr || cb->is_compiled(), "did not find an compiled_method");
1301   return (CompiledMethod*)cb;
1302 }
1303 
1304 #if INCLUDE_JVMTI
1305 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1306 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1307 static GrowableArray<CompiledMethod*>* old_compiled_method_table = nullptr;
1308 
1309 static void add_to_old_table(CompiledMethod* c) {
1310   if (old_compiled_method_table == nullptr) {
1311     old_compiled_method_table = new (mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
1312   }
1313   old_compiled_method_table->push(c);
1314 }
1315 
1316 static void reset_old_method_table() {
1317   if (old_compiled_method_table != nullptr) {
1318     delete old_compiled_method_table;
1319     old_compiled_method_table = nullptr;
1320   }
1321 }
1322 
1323 // Remove this method when flushed.
1324 void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1325   assert_lock_strong(CodeCache_lock);
1326   if (old_compiled_method_table != nullptr) {
1327     int index = old_compiled_method_table->find(c);
1328     if (index != -1) {
1329       old_compiled_method_table->delete_at(index);
1330     }
1331   }
1332 }
1333 
1334 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1335   // Walk old method table and mark those on stack.
1336   int length = 0;
1337   if (old_compiled_method_table != nullptr) {
1338     length = old_compiled_method_table->length();
1339     for (int i = 0; i < length; i++) {
1340       // Walk all methods saved on the last pass.  Concurrent class unloading may
1341       // also be looking at this method's metadata, so don't delete it yet if
1342       // it is marked as unloaded.
1343       old_compiled_method_table->at(i)->metadata_do(f);
1344     }
1345   }
1346   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1347 }
1348 
1349 // Walk compiled methods and mark dependent methods for deoptimization.
1350 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1351   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1352   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1353   // So delete old method table and create a new one.
1354   reset_old_method_table();
1355 
1356   CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
1357   while(iter.next()) {
1358     CompiledMethod* nm = iter.method();
1359     // Walk all alive nmethods to check for old Methods.
1360     // This includes methods whose inline caches point to old methods, so
1361     // inline cache clearing is unnecessary.
1362     if (nm->has_evol_metadata()) {
1363       deopt_scope->mark(nm);
1364       add_to_old_table(nm);
1365     }
1366   }
1367 }
1368 
1369 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1370   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1371   CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
1372   while(iter.next()) {
1373     CompiledMethod* nm = iter.method();
1374     if (!nm->method()->is_method_handle_intrinsic()) {
1375       if (nm->can_be_deoptimized()) {
1376         deopt_scope->mark(nm);
1377       }
1378       if (nm->has_evol_metadata()) {
1379         add_to_old_table(nm);
1380       }
1381     }
1382   }
1383 }
1384 
1385 #endif // INCLUDE_JVMTI
1386 
1387 // Mark methods for deopt (if safe or possible).
1388 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1389   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1390   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1391   while(iter.next()) {
1392     CompiledMethod* nm = iter.method();
1393     if (!nm->is_native_method()) {
1394       deopt_scope->mark(nm);
1395     }
1396   }
1397 }
1398 
1399 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1400   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1401 
1402   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1403   while(iter.next()) {
1404     CompiledMethod* nm = iter.method();
1405     if (nm->is_dependent_on_method(dependee)) {
1406       deopt_scope->mark(nm);
1407     }
1408   }
1409 }
1410 
1411 void CodeCache::make_marked_nmethods_deoptimized() {
1412   RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading);
1413   while(iter.next()) {
1414     CompiledMethod* nm = iter.method();
1415     if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1416       nm->make_not_entrant();
1417       nm->make_deoptimized();
1418     }
1419   }
1420 }
1421 
1422 // Marks compiled methods dependent on dependee.
1423 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1424   assert_lock_strong(Compile_lock);
1425 
1426   if (!has_nmethods_with_dependencies()) {
1427     return;
1428   }
1429 
1430   if (dependee->is_linked()) {
1431     // Class initialization state change.
1432     KlassInitDepChange changes(dependee);
1433     mark_for_deoptimization(deopt_scope, changes);
1434   } else {
1435     // New class is loaded.
1436     NewKlassDepChange changes(dependee);
1437     mark_for_deoptimization(deopt_scope, changes);
1438   }
1439 }
1440 
1441 // Marks compiled methods dependent on dependee
1442 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1443   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1444 
1445   DeoptimizationScope deopt_scope;
1446   // Compute the dependent nmethods
1447   mark_for_deoptimization(&deopt_scope, m_h());
1448   deopt_scope.deoptimize_marked();
1449 }
1450 
1451 void CodeCache::verify() {
1452   assert_locked_or_safepoint(CodeCache_lock);
1453   FOR_ALL_HEAPS(heap) {
1454     (*heap)->verify();
1455     FOR_ALL_BLOBS(cb, *heap) {
1456       cb->verify();
1457     }
1458   }
1459 }
1460 
1461 // A CodeHeap is full. Print out warning and report event.
1462 PRAGMA_DIAG_PUSH
1463 PRAGMA_FORMAT_NONLITERAL_IGNORED
1464 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1465   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1466   CodeHeap* heap = get_code_heap(code_blob_type);
1467   assert(heap != nullptr, "heap is null");
1468 
1469   int full_count = heap->report_full();
1470 
1471   if ((full_count == 1) || print) {
1472     // Not yet reported for this heap, report
1473     if (SegmentedCodeCache) {
1474       ResourceMark rm;
1475       stringStream msg1_stream, msg2_stream;
1476       msg1_stream.print("%s is full. Compiler has been disabled.",
1477                         get_code_heap_name(code_blob_type));
1478       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1479                  get_code_heap_flag_name(code_blob_type));
1480       const char *msg1 = msg1_stream.as_string();
1481       const char *msg2 = msg2_stream.as_string();
1482 
1483       log_warning(codecache)("%s", msg1);
1484       log_warning(codecache)("%s", msg2);
1485       warning("%s", msg1);
1486       warning("%s", msg2);
1487     } else {
1488       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1489       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1490 
1491       log_warning(codecache)("%s", msg1);
1492       log_warning(codecache)("%s", msg2);
1493       warning("%s", msg1);
1494       warning("%s", msg2);
1495     }
1496     stringStream s;
1497     // Dump code cache into a buffer before locking the tty.
1498     {
1499       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1500       print_summary(&s);
1501     }
1502     {
1503       ttyLocker ttyl;
1504       tty->print("%s", s.freeze());
1505     }
1506 
1507     if (full_count == 1) {
1508       if (PrintCodeHeapAnalytics) {
1509         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1510       }
1511     }
1512   }
1513 
1514   EventCodeCacheFull event;
1515   if (event.should_commit()) {
1516     event.set_codeBlobType((u1)code_blob_type);
1517     event.set_startAddress((u8)heap->low_boundary());
1518     event.set_commitedTopAddress((u8)heap->high());
1519     event.set_reservedTopAddress((u8)heap->high_boundary());
1520     event.set_entryCount(heap->blob_count());
1521     event.set_methodCount(heap->nmethod_count());
1522     event.set_adaptorCount(heap->adapter_count());
1523     event.set_unallocatedCapacity(heap->unallocated_capacity());
1524     event.set_fullCount(heap->full_count());
1525     event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1526     event.commit();
1527   }
1528 }
1529 PRAGMA_DIAG_POP
1530 
1531 void CodeCache::print_memory_overhead() {
1532   size_t wasted_bytes = 0;
1533   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1534       CodeHeap* curr_heap = *heap;
1535       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1536         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1537         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1538       }
1539   }
1540   // Print bytes that are allocated in the freelist
1541   ttyLocker ttl;
1542   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1543   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1544   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1545   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1546 }
1547 
1548 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1549   if (total > 0) {
1550     double ratio = (100.0 * used) / total;
1551     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1552   }
1553 }
1554 
1555 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1556   int stats     [2][6][3][2] = {0};
1557   int stats_used[2][6][3][2] = {0};
1558 
1559   int total_osr = 0;
1560   int total_entrant = 0;
1561   int total_non_entrant = 0;
1562   int total_other = 0;
1563   int total_used = 0;
1564 
1565   NMethodIterator iter(NMethodIterator::all_blobs);
1566   while (iter.next()) {
1567     nmethod* nm = iter.method();
1568     if (nm->is_in_use()) {
1569       ++total_entrant;
1570     } else if (nm->is_not_entrant()) {
1571       ++total_non_entrant;
1572     } else {
1573       ++total_other;
1574     }
1575     if (nm->is_osr_method()) {
1576       ++total_osr;
1577     }
1578     if (nm->used()) {
1579       ++total_used;
1580     }
1581     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1582 
1583     int idx1 = nm->is_scc() ? 1 : 0;
1584     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1585     int idx3 = (nm->is_in_use()      ? 0 :
1586                (nm->is_not_entrant() ? 1 :
1587                                        2));
1588     int idx4 = (nm->is_osr_method() ? 1 : 0);
1589     stats[idx1][idx2][idx3][idx4] += 1;
1590     if (nm->used()) {
1591       stats_used[idx1][idx2][idx3][idx4] += 1;
1592     }
1593   }
1594 
1595   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1596                total_entrant + total_non_entrant + total_other,
1597                total_entrant, total_non_entrant, total_osr);
1598   if (total_other > 0) {
1599     st->print("; %d other", total_other);
1600   }
1601   st->print_cr(")");
1602 
1603   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1604     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1605     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1606     if (total_normal + total_osr > 0) {
1607       st->print("  Tier%d:", i);
1608       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1609       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1610       st->cr();
1611     }
1612   }
1613   st->cr();
1614   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1615     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1616     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1617     assert(total_osr == 0, "sanity");
1618     if (total_normal + total_osr > 0) {
1619       st->print("  SC T%d:", i);
1620       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1621       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1622       st->cr();
1623     }
1624   }
1625 }
1626 
1627 //------------------------------------------------------------------------------------------------
1628 // Non-product version
1629 
1630 #ifndef PRODUCT
1631 
1632 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1633   if (PrintCodeCache2) {  // Need to add a new flag
1634     ResourceMark rm;
1635     if (size == 0) {
1636       int s = cb->size();
1637       assert(s >= 0, "CodeBlob size is negative: %d", s);
1638       size = (uint) s;
1639     }
1640     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1641   }
1642 }
1643 
1644 void CodeCache::print_internals() {
1645   int nmethodCount = 0;
1646   int runtimeStubCount = 0;
1647   int adapterCount = 0;
1648   int deoptimizationStubCount = 0;
1649   int uncommonTrapStubCount = 0;
1650   int bufferBlobCount = 0;
1651   int total = 0;
1652   int nmethodNotEntrant = 0;
1653   int nmethodJava = 0;
1654   int nmethodNative = 0;
1655   int max_nm_size = 0;
1656   ResourceMark rm;
1657 
1658   int i = 0;
1659   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1660     int heap_total = 0;
1661     tty->print_cr("-- %s --", (*heap)->name());
1662     FOR_ALL_BLOBS(cb, *heap) {
1663       total++;
1664       heap_total++;
1665       if (cb->is_nmethod()) {
1666         nmethod* nm = (nmethod*)cb;
1667 
1668         tty->print("%4d: ", heap_total);
1669         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1670 
1671         nmethodCount++;
1672 
1673         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1674         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1675 
1676         if(nm->method() != nullptr && nm->is_java_method()) {
1677           nmethodJava++;
1678           max_nm_size = MAX2(max_nm_size, nm->size());
1679         }
1680       } else if (cb->is_runtime_stub()) {
1681         runtimeStubCount++;
1682       } else if (cb->is_deoptimization_stub()) {
1683         deoptimizationStubCount++;
1684       } else if (cb->is_uncommon_trap_stub()) {
1685         uncommonTrapStubCount++;
1686       } else if (cb->is_adapter_blob()) {
1687         adapterCount++;
1688       } else if (cb->is_buffer_blob()) {
1689         bufferBlobCount++;
1690       }
1691     }
1692   }
1693 
1694   int bucketSize = 512;
1695   int bucketLimit = max_nm_size / bucketSize + 1;
1696   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1697   memset(buckets, 0, sizeof(int) * bucketLimit);
1698 
1699   NMethodIterator iter(NMethodIterator::all_blobs);
1700   while(iter.next()) {
1701     nmethod* nm = iter.method();
1702     if(nm->method() != nullptr && nm->is_java_method()) {
1703       buckets[nm->size() / bucketSize]++;
1704     }
1705   }
1706 
1707   tty->print_cr("Code Cache Entries (total of %d)",total);
1708   tty->print_cr("-------------------------------------------------");
1709   tty->print_cr("nmethods: %d",nmethodCount);
1710   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1711   tty->print_cr("\tjava: %d",nmethodJava);
1712   tty->print_cr("\tnative: %d",nmethodNative);
1713   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1714   tty->print_cr("adapters: %d",adapterCount);
1715   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1716   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1717   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1718   tty->print_cr("\nnmethod size distribution");
1719   tty->print_cr("-------------------------------------------------");
1720 
1721   for(int i=0; i<bucketLimit; i++) {
1722     if(buckets[i] != 0) {
1723       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1724       tty->fill_to(40);
1725       tty->print_cr("%d",buckets[i]);
1726     }
1727   }
1728 
1729   FREE_C_HEAP_ARRAY(int, buckets);
1730   print_memory_overhead();
1731 }
1732 
1733 #endif // !PRODUCT
1734 
1735 void CodeCache::print() {
1736   print_summary(tty);
1737 
1738 #ifndef PRODUCT
1739   if (!Verbose) return;
1740 
1741   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1742   CodeBlob_sizes runtimeStub;
1743   CodeBlob_sizes uncommonTrapStub;
1744   CodeBlob_sizes deoptimizationStub;
1745   CodeBlob_sizes adapter;
1746   CodeBlob_sizes bufferBlob;
1747   CodeBlob_sizes other;
1748 
1749   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1750     FOR_ALL_BLOBS(cb, *heap) {
1751       if (cb->is_nmethod()) {
1752         const int level = cb->as_nmethod()->comp_level();
1753         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1754         live[level].add(cb);
1755       } else if (cb->is_runtime_stub()) {
1756         runtimeStub.add(cb);
1757       } else if (cb->is_deoptimization_stub()) {
1758         deoptimizationStub.add(cb);
1759       } else if (cb->is_uncommon_trap_stub()) {
1760         uncommonTrapStub.add(cb);
1761       } else if (cb->is_adapter_blob()) {
1762         adapter.add(cb);
1763       } else if (cb->is_buffer_blob()) {
1764         bufferBlob.add(cb);
1765       } else {
1766         other.add(cb);
1767       }
1768     }
1769   }
1770 
1771   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1772 
1773   tty->print_cr("nmethod blobs per compilation level:");
1774   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1775     const char *level_name;
1776     switch (i) {
1777     case CompLevel_none:              level_name = "none";              break;
1778     case CompLevel_simple:            level_name = "simple";            break;
1779     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1780     case CompLevel_full_profile:      level_name = "full profile";      break;
1781     case CompLevel_full_optimization: level_name = "full optimization"; break;
1782     default: assert(false, "invalid compilation level");
1783     }
1784     tty->print_cr("%s:", level_name);
1785     live[i].print("live");
1786   }
1787 
1788   struct {
1789     const char* name;
1790     const CodeBlob_sizes* sizes;
1791   } non_nmethod_blobs[] = {
1792     { "runtime",        &runtimeStub },
1793     { "uncommon trap",  &uncommonTrapStub },
1794     { "deoptimization", &deoptimizationStub },
1795     { "adapter",        &adapter },
1796     { "buffer blob",    &bufferBlob },
1797     { "other",          &other },
1798   };
1799   tty->print_cr("Non-nmethod blobs:");
1800   for (auto& blob: non_nmethod_blobs) {
1801     blob.sizes->print(blob.name);
1802   }
1803 
1804   if (WizardMode) {
1805      // print the oop_map usage
1806     int code_size = 0;
1807     int number_of_blobs = 0;
1808     int number_of_oop_maps = 0;
1809     int map_size = 0;
1810     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1811       FOR_ALL_BLOBS(cb, *heap) {
1812         number_of_blobs++;
1813         code_size += cb->code_size();
1814         ImmutableOopMapSet* set = cb->oop_maps();
1815         if (set != nullptr) {
1816           number_of_oop_maps += set->count();
1817           map_size           += set->nr_of_bytes();
1818         }
1819       }
1820     }
1821     tty->print_cr("OopMaps");
1822     tty->print_cr("  #blobs    = %d", number_of_blobs);
1823     tty->print_cr("  code size = %d", code_size);
1824     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1825     tty->print_cr("  map size  = %d", map_size);
1826   }
1827 
1828 #endif // !PRODUCT
1829 }
1830 
1831 void CodeCache::print_nmethods_on(outputStream* st) {
1832   ResourceMark rm;
1833   int i = 0;
1834   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1835     st->print_cr("-- %s --", (*heap)->name());
1836     FOR_ALL_BLOBS(cb, *heap) {
1837       i++;
1838       if (cb->is_nmethod()) {
1839         nmethod* nm = (nmethod*)cb;
1840         st->print("%4d: ", i);
1841         CompileTask::print(st, nm, nullptr, true, false);
1842 
1843         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1844         st->print_cr(" %c", non_entrant_char);
1845       }
1846     }
1847   }
1848 }
1849 
1850 void CodeCache::print_summary(outputStream* st, bool detailed) {
1851   int full_count = 0;
1852   julong total_used = 0;
1853   julong total_max_used = 0;
1854   julong total_free = 0;
1855   julong total_size = 0;
1856   FOR_ALL_HEAPS(heap_iterator) {
1857     CodeHeap* heap = (*heap_iterator);
1858     size_t total = (heap->high_boundary() - heap->low_boundary());
1859     if (_heaps->length() >= 1) {
1860       st->print("%s:", heap->name());
1861     } else {
1862       st->print("CodeCache:");
1863     }
1864     size_t size = total/K;
1865     size_t used = (total - heap->unallocated_capacity())/K;
1866     size_t max_used = heap->max_allocated_capacity()/K;
1867     size_t free = heap->unallocated_capacity()/K;
1868     total_size += size;
1869     total_used += used;
1870     total_max_used += max_used;
1871     total_free += free;
1872     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1873                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1874                  size, used, max_used, free);
1875 
1876     if (detailed) {
1877       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1878                    p2i(heap->low_boundary()),
1879                    p2i(heap->high()),
1880                    p2i(heap->high_boundary()));
1881 
1882       full_count += get_codemem_full_count(heap->code_blob_type());
1883     }
1884   }
1885 
1886   if (detailed) {
1887     if (SegmentedCodeCache) {
1888       st->print("CodeCache:");
1889       st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1890                    "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1891                    total_size, total_used, total_max_used, total_free);
1892     }
1893     st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1894                  ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1895                  blob_count(), nmethod_count(), adapter_count(), full_count);
1896     st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1897                  CompileBroker::should_compile_new_jobs() ?
1898                  "enabled" : Arguments::mode() == Arguments::_int ?
1899                  "disabled (interpreter mode)" :
1900                  "disabled (not enough contiguous free space left)",
1901                  CompileBroker::get_total_compiler_stopped_count(),
1902                  CompileBroker::get_total_compiler_restarted_count());
1903   }
1904 }
1905 
1906 void CodeCache::print_codelist(outputStream* st) {
1907   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1908 
1909   CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
1910   while (iter.next()) {
1911     CompiledMethod* cm = iter.method();
1912     ResourceMark rm;
1913     char* method_name = cm->method()->name_and_sig_as_C_string();
1914     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1915                  cm->compile_id(), cm->comp_level(), cm->get_state(),
1916                  method_name,
1917                  (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1918   }
1919 }
1920 
1921 void CodeCache::print_layout(outputStream* st) {
1922   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1923   ResourceMark rm;
1924   print_summary(st, true);
1925 }
1926 
1927 void CodeCache::log_state(outputStream* st) {
1928   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1929             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1930             blob_count(), nmethod_count(), adapter_count(),
1931             unallocated_capacity());
1932 }
1933 
1934 #ifdef LINUX
1935 void CodeCache::write_perf_map(const char* filename) {
1936   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1937 
1938   // Perf expects to find the map file at /tmp/perf-<pid>.map
1939   // if the file name is not specified.
1940   char fname[32];
1941   if (filename == nullptr) {
1942     jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1943     filename = fname;
1944   }
1945 
1946   fileStream fs(filename, "w");
1947   if (!fs.is_open()) {
1948     log_warning(codecache)("Failed to create %s for perf map", filename);
1949     return;
1950   }
1951 
1952   AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_not_unloading);
1953   while (iter.next()) {
1954     CodeBlob *cb = iter.method();
1955     ResourceMark rm;
1956     const char* method_name =
1957       cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
1958                         : cb->name();
1959     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1960                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1961                 method_name);
1962   }
1963 }
1964 #endif // LINUX
1965 
1966 //---<  BEGIN  >--- CodeHeap State Analytics.
1967 
1968 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1969   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1970     CodeHeapState::aggregate(out, (*heap), granularity);
1971   }
1972 }
1973 
1974 void CodeCache::discard(outputStream *out) {
1975   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1976     CodeHeapState::discard(out, (*heap));
1977   }
1978 }
1979 
1980 void CodeCache::print_usedSpace(outputStream *out) {
1981   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1982     CodeHeapState::print_usedSpace(out, (*heap));
1983   }
1984 }
1985 
1986 void CodeCache::print_freeSpace(outputStream *out) {
1987   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1988     CodeHeapState::print_freeSpace(out, (*heap));
1989   }
1990 }
1991 
1992 void CodeCache::print_count(outputStream *out) {
1993   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1994     CodeHeapState::print_count(out, (*heap));
1995   }
1996 }
1997 
1998 void CodeCache::print_space(outputStream *out) {
1999   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2000     CodeHeapState::print_space(out, (*heap));
2001   }
2002 }
2003 
2004 void CodeCache::print_age(outputStream *out) {
2005   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2006     CodeHeapState::print_age(out, (*heap));
2007   }
2008 }
2009 
2010 void CodeCache::print_names(outputStream *out) {
2011   FOR_ALL_ALLOCABLE_HEAPS(heap) {
2012     CodeHeapState::print_names(out, (*heap));
2013   }
2014 }
2015 //---<  END  >--- CodeHeap State Analytics.