1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm_io.h"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nmethod.hpp"
  35 #include "code/pcDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/oopMap.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/iterator.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/method.inline.hpp"
  48 #include "oops/objArrayOop.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/verifyOopClosure.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/globals_extension.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/icache.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sweeper.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "services/memoryService.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/vmError.hpp"
  65 #include "utilities/xmlstream.hpp"
  66 #ifdef COMPILER1
  67 #include "c1/c1_Compilation.hpp"
  68 #include "c1/c1_Compiler.hpp"
  69 #endif
  70 #ifdef COMPILER2
  71 #include "opto/c2compiler.hpp"
  72 #include "opto/compile.hpp"
  73 #include "opto/node.hpp"
  74 #endif
  75 
  76 // Helper class for printing in CodeCache
  77 class CodeBlob_sizes {
  78  private:
  79   int count;
  80   int total_size;
  81   int header_size;
  82   int code_size;
  83   int stub_size;
  84   int relocation_size;
  85   int scopes_oop_size;
  86   int scopes_metadata_size;
  87   int scopes_data_size;
  88   int scopes_pcs_size;
  89 
  90  public:
  91   CodeBlob_sizes() {
  92     count            = 0;
  93     total_size       = 0;
  94     header_size      = 0;
  95     code_size        = 0;
  96     stub_size        = 0;
  97     relocation_size  = 0;
  98     scopes_oop_size  = 0;
  99     scopes_metadata_size  = 0;
 100     scopes_data_size = 0;
 101     scopes_pcs_size  = 0;
 102   }
 103 
 104   int total() const                              { return total_size; }
 105   bool is_empty() const                          { return count == 0; }
 106 
 107   void print(const char* title) const {
 108     if (is_empty()) {
 109       tty->print_cr(" #%d %s = %dK",
 110                     count,
 111                     title,
 112                     total()                 / (int)K);
 113     } else {
 114       tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
 115                     count,
 116                     title,
 117                     total()                 / (int)K,
 118                     header_size             / (int)K,
 119                     header_size             * 100 / total_size,
 120                     relocation_size         / (int)K,
 121                     relocation_size         * 100 / total_size,
 122                     code_size               / (int)K,
 123                     code_size               * 100 / total_size,
 124                     stub_size               / (int)K,
 125                     stub_size               * 100 / total_size,
 126                     scopes_oop_size         / (int)K,
 127                     scopes_oop_size         * 100 / total_size,
 128                     scopes_metadata_size    / (int)K,
 129                     scopes_metadata_size    * 100 / total_size,
 130                     scopes_data_size        / (int)K,
 131                     scopes_data_size        * 100 / total_size,
 132                     scopes_pcs_size         / (int)K,
 133                     scopes_pcs_size         * 100 / total_size);
 134     }
 135   }
 136 
 137   void add(CodeBlob* cb) {
 138     count++;
 139     total_size       += cb->size();
 140     header_size      += cb->header_size();
 141     relocation_size  += cb->relocation_size();
 142     if (cb->is_nmethod()) {
 143       nmethod* nm = cb->as_nmethod_or_null();
 144       code_size        += nm->insts_size();
 145       stub_size        += nm->stub_size();
 146 
 147       scopes_oop_size  += nm->oops_size();
 148       scopes_metadata_size  += nm->metadata_size();
 149       scopes_data_size += nm->scopes_data_size();
 150       scopes_pcs_size  += nm->scopes_pcs_size();
 151     } else {
 152       code_size        += cb->code_size();
 153     }
 154   }
 155 };
 156 
 157 // Iterate over all CodeHeaps
 158 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 159 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 160 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 161 
 162 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 163 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 164 
 165 address CodeCache::_low_bound = 0;
 166 address CodeCache::_high_bound = 0;
 167 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 168 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
 169 
 170 // Initialize arrays of CodeHeap subsets
 171 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 172 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 173 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 174 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 175 
 176 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 177   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 178   // Prepare error message
 179   const char* error = "Invalid code heap sizes";
 180   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 181                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 182           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 183 
 184   if (total_size > cache_size) {
 185     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 186     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 187     vm_exit_during_initialization(error, message);
 188   } else if (all_set && total_size != cache_size) {
 189     // All code heap sizes were explicitly set: total_size must equal cache_size
 190     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 191     vm_exit_during_initialization(error, message);
 192   }
 193 }
 194 
 195 void CodeCache::initialize_heaps() {
 196   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 197   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 198   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 199   size_t min_size           = os::vm_page_size();
 200   size_t cache_size         = ReservedCodeCacheSize;
 201   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 202   size_t profiled_size      = ProfiledCodeHeapSize;
 203   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 204   // Check if total size set via command line flags exceeds the reserved size
 205   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 206                    (profiled_set     ? profiled_size     : min_size),
 207                    (non_profiled_set ? non_profiled_size : min_size),
 208                    cache_size,
 209                    non_nmethod_set && profiled_set && non_profiled_set);
 210 
 211   // Determine size of compiler buffers
 212   size_t code_buffers_size = 0;
 213 #ifdef COMPILER1
 214   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 215   const int c1_count = CompilationPolicy::c1_count();
 216   code_buffers_size += c1_count * Compiler::code_buffer_size();
 217 #endif
 218 #ifdef COMPILER2
 219   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 220   const int c2_count = CompilationPolicy::c2_count();
 221   // Initial size of constant table (this may be increased if a compiled method needs more space)
 222   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 223 #endif
 224 
 225   // Increase default non_nmethod_size to account for compiler buffers
 226   if (!non_nmethod_set) {
 227     non_nmethod_size += code_buffers_size;
 228   }
 229   // Calculate default CodeHeap sizes if not set by user
 230   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 231     // Check if we have enough space for the non-nmethod code heap
 232     if (cache_size > non_nmethod_size) {
 233       // Use the default value for non_nmethod_size and one half of the
 234       // remaining size for non-profiled and one half for profiled methods
 235       size_t remaining_size = cache_size - non_nmethod_size;
 236       profiled_size = remaining_size / 2;
 237       non_profiled_size = remaining_size - profiled_size;
 238     } else {
 239       // Use all space for the non-nmethod heap and set other heaps to minimal size
 240       non_nmethod_size = cache_size - 2 * min_size;
 241       profiled_size = min_size;
 242       non_profiled_size = min_size;
 243     }
 244   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
 245     // The user explicitly set some code heap sizes. Increase or decrease the (default)
 246     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
 247     // code heap sizes and then only change non-nmethod code heap size if still necessary.
 248     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
 249     if (non_profiled_set) {
 250       if (!profiled_set) {
 251         // Adapt size of profiled code heap
 252         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
 253           // Not enough space available, set to minimum size
 254           diff_size += profiled_size - min_size;
 255           profiled_size = min_size;
 256         } else {
 257           profiled_size += diff_size;
 258           diff_size = 0;
 259         }
 260       }
 261     } else if (profiled_set) {
 262       // Adapt size of non-profiled code heap
 263       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
 264         // Not enough space available, set to minimum size
 265         diff_size += non_profiled_size - min_size;
 266         non_profiled_size = min_size;
 267       } else {
 268         non_profiled_size += diff_size;
 269         diff_size = 0;
 270       }
 271     } else if (non_nmethod_set) {
 272       // Distribute remaining size between profiled and non-profiled code heaps
 273       diff_size = cache_size - non_nmethod_size;
 274       profiled_size = diff_size / 2;
 275       non_profiled_size = diff_size - profiled_size;
 276       diff_size = 0;
 277     }
 278     if (diff_size != 0) {
 279       // Use non-nmethod code heap for remaining space requirements
 280       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
 281       non_nmethod_size += diff_size;
 282     }
 283   }
 284 
 285   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 286   if (!heap_available(CodeBlobType::MethodProfiled)) {
 287     non_profiled_size += profiled_size;
 288     profiled_size = 0;
 289   }
 290   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 291   if (!heap_available(CodeBlobType::MethodNonProfiled)) {
 292     non_nmethod_size += non_profiled_size;
 293     non_profiled_size = 0;
 294   }
 295   // Make sure we have enough space for VM internal code
 296   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 297   if (non_nmethod_size < min_code_cache_size) {
 298     vm_exit_during_initialization(err_msg(
 299         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 300         non_nmethod_size/K, min_code_cache_size/K));
 301   }
 302 
 303   // Verify sizes and update flag values
 304   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 305   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 306   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 307   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 308 
 309   // If large page support is enabled, align code heaps according to large
 310   // page size to make sure that code cache is covered by large pages.
 311   const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
 312   non_nmethod_size = align_up(non_nmethod_size, alignment);
 313   profiled_size    = align_down(profiled_size, alignment);
 314   non_profiled_size = align_down(non_profiled_size, alignment);
 315 
 316   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 317   // parts for the individual heaps. The memory layout looks like this:
 318   // ---------- high -----------
 319   //    Non-profiled nmethods
 320   //         Non-nmethods
 321   //      Profiled nmethods
 322   // ---------- low ------------
 323   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
 324   ReservedSpace profiled_space      = rs.first_part(profiled_size);
 325   ReservedSpace rest                = rs.last_part(profiled_size);
 326   ReservedSpace non_method_space    = rest.first_part(non_nmethod_size);
 327   ReservedSpace non_profiled_space  = rest.last_part(non_nmethod_size);
 328 
 329   // Non-nmethods (stubs, adapters, ...)
 330   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 331   // Tier 2 and tier 3 (profiled) methods
 332   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 333   // Tier 1 and tier 4 (non-profiled) methods and native methods
 334   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 335 }
 336 
 337 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 338   if (os::can_execute_large_page_memory()) {
 339     if (InitialCodeCacheSize < ReservedCodeCacheSize) {
 340       // Make sure that the page size allows for an incremental commit of the reserved space
 341       min_pages = MAX2(min_pages, (size_t)8);
 342     }
 343     return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 344                      os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 345   } else {
 346     return os::vm_page_size();
 347   }
 348 }
 349 
 350 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 351   // Align and reserve space for code cache
 352   const size_t rs_ps = page_size();
 353   const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
 354   const size_t rs_size = align_up(size, rs_align);
 355   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 356   if (!rs.is_reserved()) {
 357     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 358                                           rs_size/K));
 359   }
 360 
 361   // Initialize bounds
 362   _low_bound = (address)rs.base();
 363   _high_bound = _low_bound + rs.size();
 364   return rs;
 365 }
 366 
 367 // Heaps available for allocation
 368 bool CodeCache::heap_available(int code_blob_type) {
 369   if (!SegmentedCodeCache) {
 370     // No segmentation: use a single code heap
 371     return (code_blob_type == CodeBlobType::All);
 372   } else if (CompilerConfig::is_interpreter_only()) {
 373     // Interpreter only: we don't need any method code heaps
 374     return (code_blob_type == CodeBlobType::NonNMethod);
 375   } else if (CompilerConfig::is_c1_profiling()) {
 376     // Tiered compilation: use all code heaps
 377     return (code_blob_type < CodeBlobType::All);
 378   } else {
 379     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 380     return (code_blob_type == CodeBlobType::NonNMethod) ||
 381            (code_blob_type == CodeBlobType::MethodNonProfiled);
 382   }
 383 }
 384 
 385 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
 386   switch(code_blob_type) {
 387   case CodeBlobType::NonNMethod:
 388     return "NonNMethodCodeHeapSize";
 389     break;
 390   case CodeBlobType::MethodNonProfiled:
 391     return "NonProfiledCodeHeapSize";
 392     break;
 393   case CodeBlobType::MethodProfiled:
 394     return "ProfiledCodeHeapSize";
 395     break;
 396   }
 397   ShouldNotReachHere();
 398   return NULL;
 399 }
 400 
 401 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 402   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 403     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 404   } else {
 405     return lhs->code_blob_type() - rhs->code_blob_type();
 406   }
 407 }
 408 
 409 void CodeCache::add_heap(CodeHeap* heap) {
 410   assert(!Universe::is_fully_initialized(), "late heap addition?");
 411 
 412   _heaps->insert_sorted<code_heap_compare>(heap);
 413 
 414   int type = heap->code_blob_type();
 415   if (code_blob_type_accepts_compiled(type)) {
 416     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
 417   }
 418   if (code_blob_type_accepts_nmethod(type)) {
 419     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 420   }
 421   if (code_blob_type_accepts_allocable(type)) {
 422     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 423   }
 424 }
 425 
 426 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
 427   // Check if heap is needed
 428   if (!heap_available(code_blob_type)) {
 429     return;
 430   }
 431 
 432   // Create CodeHeap
 433   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 434   add_heap(heap);
 435 
 436   // Reserve Space
 437   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 438   size_initial = align_up(size_initial, os::vm_page_size());
 439   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 440     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 441                                           heap->name(), size_initial/K));
 442   }
 443 
 444   // Register the CodeHeap
 445   MemoryService::add_code_heap_memory_pool(heap, name);
 446 }
 447 
 448 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 449   FOR_ALL_HEAPS(heap) {
 450     if ((*heap)->contains(start)) {
 451       return *heap;
 452     }
 453   }
 454   return NULL;
 455 }
 456 
 457 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
 458   assert(cb != NULL, "CodeBlob is null");
 459   FOR_ALL_HEAPS(heap) {
 460     if ((*heap)->contains_blob(cb)) {
 461       return *heap;
 462     }
 463   }
 464   ShouldNotReachHere();
 465   return NULL;
 466 }
 467 
 468 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 469   FOR_ALL_HEAPS(heap) {
 470     if ((*heap)->accepts(code_blob_type)) {
 471       return *heap;
 472     }
 473   }
 474   return NULL;
 475 }
 476 
 477 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 478   assert_locked_or_safepoint(CodeCache_lock);
 479   assert(heap != NULL, "heap is null");
 480   return (CodeBlob*)heap->first();
 481 }
 482 
 483 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 484   if (heap_available(code_blob_type)) {
 485     return first_blob(get_code_heap(code_blob_type));
 486   } else {
 487     return NULL;
 488   }
 489 }
 490 
 491 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 492   assert_locked_or_safepoint(CodeCache_lock);
 493   assert(heap != NULL, "heap is null");
 494   return (CodeBlob*)heap->next(cb);
 495 }
 496 
 497 /**
 498  * Do not seize the CodeCache lock here--if the caller has not
 499  * already done so, we are going to lose bigtime, since the code
 500  * cache will contain a garbage CodeBlob until the caller can
 501  * run the constructor for the CodeBlob subclass he is busy
 502  * instantiating.
 503  */
 504 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {
 505   // Possibly wakes up the sweeper thread.
 506   NMethodSweeper::report_allocation();
 507   assert_locked_or_safepoint(CodeCache_lock);
 508   assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
 509   if (size <= 0) {
 510     return NULL;
 511   }
 512   CodeBlob* cb = NULL;
 513 
 514   // Get CodeHeap for the given CodeBlobType
 515   CodeHeap* heap = get_code_heap(code_blob_type);
 516   assert(heap != NULL, "heap is null");
 517 
 518   while (true) {
 519     cb = (CodeBlob*)heap->allocate(size);
 520     if (cb != NULL) break;
 521     if (!heap->expand_by(CodeCacheExpansionSize)) {
 522       // Save original type for error reporting
 523       if (orig_code_blob_type == CodeBlobType::All) {
 524         orig_code_blob_type = code_blob_type;
 525       }
 526       // Expansion failed
 527       if (SegmentedCodeCache) {
 528         // Fallback solution: Try to store code in another code heap.
 529         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 530         // Note that in the sweeper, we check the reverse_free_ratio of the code heap
 531         // and force stack scanning if less than 10% of the entire code cache are free.
 532         int type = code_blob_type;
 533         switch (type) {
 534         case CodeBlobType::NonNMethod:
 535           type = CodeBlobType::MethodNonProfiled;
 536           break;
 537         case CodeBlobType::MethodNonProfiled:
 538           type = CodeBlobType::MethodProfiled;
 539           break;
 540         case CodeBlobType::MethodProfiled:
 541           // Avoid loop if we already tried that code heap
 542           if (type == orig_code_blob_type) {
 543             type = CodeBlobType::MethodNonProfiled;
 544           }
 545           break;
 546         }
 547         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 548           if (PrintCodeCacheExtension) {
 549             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 550                           heap->name(), get_code_heap(type)->name());
 551           }
 552           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 553         }
 554       }
 555       if (handle_alloc_failure) {
 556         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 557         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 558       }
 559       return NULL;
 560     }
 561     if (PrintCodeCacheExtension) {
 562       ResourceMark rm;
 563       if (_nmethod_heaps->length() >= 1) {
 564         tty->print("%s", heap->name());
 565       } else {
 566         tty->print("CodeCache");
 567       }
 568       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 569                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 570                     (address)heap->high() - (address)heap->low_boundary());
 571     }
 572   }
 573   print_trace("allocation", cb, size);
 574   return cb;
 575 }
 576 
 577 void CodeCache::free(CodeBlob* cb) {
 578   assert_locked_or_safepoint(CodeCache_lock);
 579   CodeHeap* heap = get_code_heap(cb);
 580   print_trace("free", cb);
 581   if (cb->is_nmethod()) {
 582     heap->set_nmethod_count(heap->nmethod_count() - 1);
 583     if (((nmethod *)cb)->has_dependencies()) {
 584       _number_of_nmethods_with_dependencies--;
 585     }
 586   }
 587   if (cb->is_adapter_blob()) {
 588     heap->set_adapter_count(heap->adapter_count() - 1);
 589   }
 590 
 591   // Get heap for given CodeBlob and deallocate
 592   get_code_heap(cb)->deallocate(cb);
 593 
 594   assert(heap->blob_count() >= 0, "sanity check");
 595 }
 596 
 597 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 598   assert_locked_or_safepoint(CodeCache_lock);
 599   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 600   print_trace("free_unused_tail", cb);
 601 
 602   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 603   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 604   used += CodeBlob::align_code_offset(cb->header_size());
 605 
 606   // Get heap for given CodeBlob and deallocate its unused tail
 607   get_code_heap(cb)->deallocate_tail(cb, used);
 608   // Adjust the sizes of the CodeBlob
 609   cb->adjust_size(used);
 610 }
 611 
 612 void CodeCache::commit(CodeBlob* cb) {
 613   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 614   assert_locked_or_safepoint(CodeCache_lock);
 615   CodeHeap* heap = get_code_heap(cb);
 616   if (cb->is_nmethod()) {
 617     heap->set_nmethod_count(heap->nmethod_count() + 1);
 618     if (((nmethod *)cb)->has_dependencies()) {
 619       _number_of_nmethods_with_dependencies++;
 620     }
 621   }
 622   if (cb->is_adapter_blob()) {
 623     heap->set_adapter_count(heap->adapter_count() + 1);
 624   }
 625 
 626   // flush the hardware I-cache
 627   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 628 }
 629 
 630 bool CodeCache::contains(void *p) {
 631   // S390 uses contains() in current_frame(), which is used before
 632   // code cache initialization if NativeMemoryTracking=detail is set.
 633   S390_ONLY(if (_heaps == NULL) return false;)
 634   // It should be ok to call contains without holding a lock.
 635   FOR_ALL_HEAPS(heap) {
 636     if ((*heap)->contains(p)) {
 637       return true;
 638     }
 639   }
 640   return false;
 641 }
 642 
 643 bool CodeCache::contains(nmethod *nm) {
 644   return contains((void *)nm);
 645 }
 646 
 647 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 648 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 649 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 650 CodeBlob* CodeCache::find_blob(void* start) {
 651   CodeBlob* result = find_blob_unsafe(start);
 652   // We could potentially look up non_entrant methods
 653   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 654   return result;
 655 }
 656 
 657 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 658 // what you are doing)
 659 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 660   // NMT can walk the stack before code cache is created
 661   if (_heaps != NULL) {
 662     CodeHeap* heap = get_code_heap_containing(start);
 663     if (heap != NULL) {
 664       return heap->find_blob_unsafe(start);
 665     }
 666   }
 667   return NULL;
 668 }
 669 
 670 nmethod* CodeCache::find_nmethod(void* start) {
 671   CodeBlob* cb = find_blob(start);
 672   assert(cb->is_nmethod(), "did not find an nmethod");
 673   return (nmethod*)cb;
 674 }
 675 
 676 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 677   assert_locked_or_safepoint(CodeCache_lock);
 678   FOR_ALL_HEAPS(heap) {
 679     FOR_ALL_BLOBS(cb, *heap) {
 680       f(cb);
 681     }
 682   }
 683 }
 684 
 685 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 686   assert_locked_or_safepoint(CodeCache_lock);
 687   NMethodIterator iter(NMethodIterator::all_blobs);
 688   while(iter.next()) {
 689     f(iter.method());
 690   }
 691 }
 692 
 693 void CodeCache::metadata_do(MetadataClosure* f) {
 694   assert_locked_or_safepoint(CodeCache_lock);
 695   NMethodIterator iter(NMethodIterator::only_alive);
 696   while(iter.next()) {
 697     iter.method()->metadata_do(f);
 698   }
 699 }
 700 
 701 int CodeCache::alignment_unit() {
 702   return (int)_heaps->first()->alignment_unit();
 703 }
 704 
 705 int CodeCache::alignment_offset() {
 706   return (int)_heaps->first()->alignment_offset();
 707 }
 708 
 709 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 710 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 711   assert_locked_or_safepoint(CodeCache_lock);
 712   UnloadingScope scope(is_alive);
 713   CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
 714   while(iter.next()) {
 715     iter.method()->do_unloading(unloading_occurred);
 716   }
 717 }
 718 
 719 void CodeCache::blobs_do(CodeBlobClosure* f) {
 720   assert_locked_or_safepoint(CodeCache_lock);
 721   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 722     FOR_ALL_BLOBS(cb, *heap) {
 723       if (cb->is_alive()) {
 724         f->do_code_blob(cb);
 725 #ifdef ASSERT
 726         if (cb->is_nmethod()) {
 727           Universe::heap()->verify_nmethod((nmethod*)cb);
 728         }
 729 #endif //ASSERT
 730       }
 731     }
 732   }
 733 }
 734 
 735 void CodeCache::verify_clean_inline_caches() {
 736 #ifdef ASSERT
 737   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 738   while(iter.next()) {
 739     nmethod* nm = iter.method();
 740     assert(!nm->is_unloaded(), "Tautology");
 741     nm->verify_clean_inline_caches();
 742     nm->verify();
 743   }
 744 #endif
 745 }
 746 
 747 void CodeCache::verify_icholder_relocations() {
 748 #ifdef ASSERT
 749   // make sure that we aren't leaking icholders
 750   int count = 0;
 751   FOR_ALL_HEAPS(heap) {
 752     FOR_ALL_BLOBS(cb, *heap) {
 753       CompiledMethod *nm = cb->as_compiled_method_or_null();
 754       if (nm != NULL) {
 755         count += nm->verify_icholder_relocations();
 756       }
 757     }
 758   }
 759   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 760          CompiledICHolder::live_count(), "must agree");
 761 #endif
 762 }
 763 
 764 // Defer freeing of concurrently cleaned ExceptionCache entries until
 765 // after a global handshake operation.
 766 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 767   if (SafepointSynchronize::is_at_safepoint()) {
 768     delete entry;
 769   } else {
 770     for (;;) {
 771       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 772       entry->set_purge_list_next(purge_list_head);
 773       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 774         break;
 775       }
 776     }
 777   }
 778 }
 779 
 780 // Delete exception caches that have been concurrently unlinked,
 781 // followed by a global handshake operation.
 782 void CodeCache::purge_exception_caches() {
 783   ExceptionCache* curr = _exception_cache_purge_list;
 784   while (curr != NULL) {
 785     ExceptionCache* next = curr->purge_list_next();
 786     delete curr;
 787     curr = next;
 788   }
 789   _exception_cache_purge_list = NULL;
 790 }
 791 
 792 uint8_t CodeCache::_unloading_cycle = 1;
 793 
 794 void CodeCache::increment_unloading_cycle() {
 795   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 796   // 0 is reserved for new methods.
 797   _unloading_cycle = (_unloading_cycle + 1) % 4;
 798   if (_unloading_cycle == 0) {
 799     _unloading_cycle = 1;
 800   }
 801 }
 802 
 803 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
 804   : _is_unloading_behaviour(is_alive)
 805 {
 806   _saved_behaviour = IsUnloadingBehaviour::current();
 807   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 808   increment_unloading_cycle();
 809   DependencyContext::cleaning_start();
 810 }
 811 
 812 CodeCache::UnloadingScope::~UnloadingScope() {
 813   IsUnloadingBehaviour::set_current(_saved_behaviour);
 814   DependencyContext::cleaning_end();
 815 }
 816 
 817 void CodeCache::verify_oops() {
 818   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 819   VerifyOopClosure voc;
 820   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 821   while(iter.next()) {
 822     nmethod* nm = iter.method();
 823     nm->oops_do(&voc);
 824     nm->verify_oop_relocations();
 825   }
 826 }
 827 
 828 int CodeCache::blob_count(int code_blob_type) {
 829   CodeHeap* heap = get_code_heap(code_blob_type);
 830   return (heap != NULL) ? heap->blob_count() : 0;
 831 }
 832 
 833 int CodeCache::blob_count() {
 834   int count = 0;
 835   FOR_ALL_HEAPS(heap) {
 836     count += (*heap)->blob_count();
 837   }
 838   return count;
 839 }
 840 
 841 int CodeCache::nmethod_count(int code_blob_type) {
 842   CodeHeap* heap = get_code_heap(code_blob_type);
 843   return (heap != NULL) ? heap->nmethod_count() : 0;
 844 }
 845 
 846 int CodeCache::nmethod_count() {
 847   int count = 0;
 848   FOR_ALL_NMETHOD_HEAPS(heap) {
 849     count += (*heap)->nmethod_count();
 850   }
 851   return count;
 852 }
 853 
 854 int CodeCache::adapter_count(int code_blob_type) {
 855   CodeHeap* heap = get_code_heap(code_blob_type);
 856   return (heap != NULL) ? heap->adapter_count() : 0;
 857 }
 858 
 859 int CodeCache::adapter_count() {
 860   int count = 0;
 861   FOR_ALL_HEAPS(heap) {
 862     count += (*heap)->adapter_count();
 863   }
 864   return count;
 865 }
 866 
 867 address CodeCache::low_bound(int code_blob_type) {
 868   CodeHeap* heap = get_code_heap(code_blob_type);
 869   return (heap != NULL) ? (address)heap->low_boundary() : NULL;
 870 }
 871 
 872 address CodeCache::high_bound(int code_blob_type) {
 873   CodeHeap* heap = get_code_heap(code_blob_type);
 874   return (heap != NULL) ? (address)heap->high_boundary() : NULL;
 875 }
 876 
 877 size_t CodeCache::capacity() {
 878   size_t cap = 0;
 879   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 880     cap += (*heap)->capacity();
 881   }
 882   return cap;
 883 }
 884 
 885 size_t CodeCache::unallocated_capacity(int code_blob_type) {
 886   CodeHeap* heap = get_code_heap(code_blob_type);
 887   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 888 }
 889 
 890 size_t CodeCache::unallocated_capacity() {
 891   size_t unallocated_cap = 0;
 892   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 893     unallocated_cap += (*heap)->unallocated_capacity();
 894   }
 895   return unallocated_cap;
 896 }
 897 
 898 size_t CodeCache::max_capacity() {
 899   size_t max_cap = 0;
 900   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 901     max_cap += (*heap)->max_capacity();
 902   }
 903   return max_cap;
 904 }
 905 
 906 bool CodeCache::is_non_nmethod(address addr) {
 907   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
 908   return blob->contains(addr);
 909 }
 910 
 911 size_t CodeCache::max_distance_to_non_nmethod() {
 912   if (!SegmentedCodeCache) {
 913     return ReservedCodeCacheSize;
 914   } else {
 915     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
 916     // the max distance is minimized by placing the NonNMethod segment
 917     // in between MethodProfiled and MethodNonProfiled segments
 918     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
 919     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
 920     return dist1 > dist2 ? dist1 : dist2;
 921   }
 922 }
 923 
 924 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
 925 // is free, reverse_free_ratio() returns 4.
 926 // Since code heap for each type of code blobs falls forward to the next
 927 // type of code heap, return the reverse free ratio for the entire
 928 // code cache.
 929 double CodeCache::reverse_free_ratio() {
 930   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
 931   double max = (double)max_capacity();
 932   double result = max / unallocated;
 933   assert (max >= unallocated, "Must be");
 934   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
 935   return result;
 936 }
 937 
 938 size_t CodeCache::bytes_allocated_in_freelists() {
 939   size_t allocated_bytes = 0;
 940   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 941     allocated_bytes += (*heap)->allocated_in_freelist();
 942   }
 943   return allocated_bytes;
 944 }
 945 
 946 int CodeCache::allocated_segments() {
 947   int number_of_segments = 0;
 948   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 949     number_of_segments += (*heap)->allocated_segments();
 950   }
 951   return number_of_segments;
 952 }
 953 
 954 size_t CodeCache::freelists_length() {
 955   size_t length = 0;
 956   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 957     length += (*heap)->freelist_length();
 958   }
 959   return length;
 960 }
 961 
 962 void icache_init();
 963 
 964 void CodeCache::initialize() {
 965   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 966 #ifdef COMPILER2
 967   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 968 #endif
 969   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 970   // This was originally just a check of the alignment, causing failure, instead, round
 971   // the code cache to the page size.  In particular, Solaris is moving to a larger
 972   // default page size.
 973   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
 974 
 975   if (SegmentedCodeCache) {
 976     // Use multiple code heaps
 977     initialize_heaps();
 978   } else {
 979     // Use a single code heap
 980     FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);
 981     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
 982     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
 983     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
 984     add_heap(rs, "CodeCache", CodeBlobType::All);
 985   }
 986 
 987   // Initialize ICache flush mechanism
 988   // This service is needed for os::register_code_area
 989   icache_init();
 990 
 991   // Give OS a chance to register generated code area.
 992   // This is used on Windows 64 bit platforms to register
 993   // Structured Exception Handlers for our generated code.
 994   os::register_code_area((char*)low_bound(), (char*)high_bound());
 995 }
 996 
 997 void codeCache_init() {
 998   CodeCache::initialize();
 999 }
1000 
1001 //------------------------------------------------------------------------------------------------
1002 
1003 int CodeCache::number_of_nmethods_with_dependencies() {
1004   return _number_of_nmethods_with_dependencies;
1005 }
1006 
1007 void CodeCache::clear_inline_caches() {
1008   assert_locked_or_safepoint(CodeCache_lock);
1009   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1010   while(iter.next()) {
1011     iter.method()->clear_inline_caches();
1012   }
1013 }
1014 
1015 void CodeCache::cleanup_inline_caches() {
1016   assert_locked_or_safepoint(CodeCache_lock);
1017   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
1018   while(iter.next()) {
1019     iter.method()->cleanup_inline_caches(/*clean_all=*/true);
1020   }
1021 }
1022 
1023 // Keeps track of time spent for checking dependencies
1024 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1025 
1026 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
1027   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1028   int number_of_marked_CodeBlobs = 0;
1029 
1030   // search the hierarchy looking for nmethods which are affected by the loading of this class
1031 
1032   // then search the interfaces this class implements looking for nmethods
1033   // which might be dependent of the fact that an interface only had one
1034   // implementor.
1035   // nmethod::check_all_dependencies works only correctly, if no safepoint
1036   // can happen
1037   NoSafepointVerifier nsv;
1038   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1039     Klass* d = str.klass();
1040     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1041   }
1042 
1043 #ifndef PRODUCT
1044   if (VerifyDependencies) {
1045     // Object pointers are used as unique identifiers for dependency arguments. This
1046     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1047     dependentCheckTime.start();
1048     nmethod::check_all_dependencies(changes);
1049     dependentCheckTime.stop();
1050   }
1051 #endif
1052 
1053   return number_of_marked_CodeBlobs;
1054 }
1055 
1056 CompiledMethod* CodeCache::find_compiled(void* start) {
1057   CodeBlob *cb = find_blob(start);
1058   assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1059   return (CompiledMethod*)cb;
1060 }
1061 
1062 #if INCLUDE_JVMTI
1063 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1064 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1065 static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL;
1066 
1067 static void add_to_old_table(CompiledMethod* c) {
1068   if (old_compiled_method_table == NULL) {
1069     old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
1070   }
1071   old_compiled_method_table->push(c);
1072 }
1073 
1074 static void reset_old_method_table() {
1075   if (old_compiled_method_table != NULL) {
1076     delete old_compiled_method_table;
1077     old_compiled_method_table = NULL;
1078   }
1079 }
1080 
1081 // Remove this method when zombied or unloaded.
1082 void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1083   assert_lock_strong(CodeCache_lock);
1084   if (old_compiled_method_table != NULL) {
1085     int index = old_compiled_method_table->find(c);
1086     if (index != -1) {
1087       old_compiled_method_table->delete_at(index);
1088     }
1089   }
1090 }
1091 
1092 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1093   // Walk old method table and mark those on stack.
1094   int length = 0;
1095   if (old_compiled_method_table != NULL) {
1096     length = old_compiled_method_table->length();
1097     for (int i = 0; i < length; i++) {
1098       CompiledMethod* cm = old_compiled_method_table->at(i);
1099       // Only walk alive nmethods, the dead ones will get removed by the sweeper or GC.
1100       if (cm->is_alive() && !cm->is_unloading()) {
1101         old_compiled_method_table->at(i)->metadata_do(f);
1102       }
1103     }
1104   }
1105   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1106 }
1107 
1108 // Walk compiled methods and mark dependent methods for deoptimization.
1109 int CodeCache::mark_dependents_for_evol_deoptimization() {
1110   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1111   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1112   // So delete old method table and create a new one.
1113   reset_old_method_table();
1114 
1115   int number_of_marked_CodeBlobs = 0;
1116   CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
1117   while(iter.next()) {
1118     CompiledMethod* nm = iter.method();
1119     // Walk all alive nmethods to check for old Methods.
1120     // This includes methods whose inline caches point to old methods, so
1121     // inline cache clearing is unnecessary.
1122     if (nm->has_evol_metadata()) {
1123       nm->mark_for_deoptimization();
1124       add_to_old_table(nm);
1125       number_of_marked_CodeBlobs++;
1126     }
1127   }
1128 
1129   // return total count of nmethods marked for deoptimization, if zero the caller
1130   // can skip deoptimization
1131   return number_of_marked_CodeBlobs;
1132 }
1133 
1134 void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1135   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1136   CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
1137   while(iter.next()) {
1138     CompiledMethod* nm = iter.method();
1139     if (!nm->method()->is_method_handle_intrinsic()) {
1140       nm->mark_for_deoptimization();
1141       if (nm->has_evol_metadata()) {
1142         add_to_old_table(nm);
1143       }
1144     }
1145   }
1146 }
1147 
1148 // Flushes compiled methods dependent on redefined classes, that have already been
1149 // marked for deoptimization.
1150 void CodeCache::flush_evol_dependents() {
1151   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1152 
1153   // CodeCache can only be updated by a thread_in_VM and they will all be
1154   // stopped during the safepoint so CodeCache will be safe to update without
1155   // holding the CodeCache_lock.
1156 
1157   // At least one nmethod has been marked for deoptimization
1158 
1159   Deoptimization::deoptimize_all_marked();
1160 }
1161 #endif // INCLUDE_JVMTI
1162 
1163 // Mark methods for deopt (if safe or possible).
1164 void CodeCache::mark_all_nmethods_for_deoptimization() {
1165   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1166   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1167   while(iter.next()) {
1168     CompiledMethod* nm = iter.method();
1169     if (!nm->is_native_method()) {
1170       nm->mark_for_deoptimization();
1171     }
1172   }
1173 }
1174 
1175 int CodeCache::mark_for_deoptimization(Method* dependee) {
1176   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1177   int number_of_marked_CodeBlobs = 0;
1178 
1179   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1180   while(iter.next()) {
1181     CompiledMethod* nm = iter.method();
1182     if (nm->is_dependent_on_method(dependee)) {
1183       ResourceMark rm;
1184       nm->mark_for_deoptimization();
1185       number_of_marked_CodeBlobs++;
1186     }
1187   }
1188 
1189   return number_of_marked_CodeBlobs;
1190 }
1191 
1192 void CodeCache::make_marked_nmethods_not_entrant() {
1193   assert_locked_or_safepoint(CodeCache_lock);
1194   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1195   while(iter.next()) {
1196     CompiledMethod* nm = iter.method();
1197     if (nm->is_marked_for_deoptimization()) {
1198       nm->make_not_entrant();
1199     }
1200   }
1201 }
1202 
1203 // Flushes compiled methods dependent on dependee.
1204 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1205   assert_lock_strong(Compile_lock);
1206 
1207   if (number_of_nmethods_with_dependencies() == 0) return;
1208 
1209   int marked = 0;
1210   if (dependee->is_linked()) {
1211     // Class initialization state change.
1212     KlassInitDepChange changes(dependee);
1213     marked = mark_for_deoptimization(changes);
1214   } else {
1215     // New class is loaded.
1216     NewKlassDepChange changes(dependee);
1217     marked = mark_for_deoptimization(changes);
1218   }
1219 
1220   if (marked > 0) {
1221     // At least one nmethod has been marked for deoptimization
1222     Deoptimization::deoptimize_all_marked();
1223   }
1224 }
1225 
1226 // Flushes compiled methods dependent on dependee
1227 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
1228   // --- Compile_lock is not held. However we are at a safepoint.
1229   assert_locked_or_safepoint(Compile_lock);
1230 
1231   // Compute the dependent nmethods
1232   if (mark_for_deoptimization(m_h()) > 0) {
1233     Deoptimization::deoptimize_all_marked();
1234   }
1235 }
1236 
1237 void CodeCache::verify() {
1238   assert_locked_or_safepoint(CodeCache_lock);
1239   FOR_ALL_HEAPS(heap) {
1240     (*heap)->verify();
1241     FOR_ALL_BLOBS(cb, *heap) {
1242       if (cb->is_alive()) {
1243         cb->verify();
1244       }
1245     }
1246   }
1247 }
1248 
1249 // A CodeHeap is full. Print out warning and report event.
1250 PRAGMA_DIAG_PUSH
1251 PRAGMA_FORMAT_NONLITERAL_IGNORED
1252 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1253   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1254   CodeHeap* heap = get_code_heap(code_blob_type);
1255   assert(heap != NULL, "heap is null");
1256 
1257   int full_count = heap->report_full();
1258 
1259   if ((full_count == 1) || print) {
1260     // Not yet reported for this heap, report
1261     if (SegmentedCodeCache) {
1262       ResourceMark rm;
1263       stringStream msg1_stream, msg2_stream;
1264       msg1_stream.print("%s is full. Compiler has been disabled.",
1265                         get_code_heap_name(code_blob_type));
1266       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1267                  get_code_heap_flag_name(code_blob_type));
1268       const char *msg1 = msg1_stream.as_string();
1269       const char *msg2 = msg2_stream.as_string();
1270 
1271       log_warning(codecache)("%s", msg1);
1272       log_warning(codecache)("%s", msg2);
1273       warning("%s", msg1);
1274       warning("%s", msg2);
1275     } else {
1276       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1277       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1278 
1279       log_warning(codecache)("%s", msg1);
1280       log_warning(codecache)("%s", msg2);
1281       warning("%s", msg1);
1282       warning("%s", msg2);
1283     }
1284     ResourceMark rm;
1285     stringStream s;
1286     // Dump code cache into a buffer before locking the tty.
1287     {
1288       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1289       print_summary(&s);
1290     }
1291     {
1292       ttyLocker ttyl;
1293       tty->print("%s", s.as_string());
1294     }
1295 
1296     if (full_count == 1) {
1297       if (PrintCodeHeapAnalytics) {
1298         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1299       }
1300     }
1301   }
1302 
1303   EventCodeCacheFull event;
1304   if (event.should_commit()) {
1305     event.set_codeBlobType((u1)code_blob_type);
1306     event.set_startAddress((u8)heap->low_boundary());
1307     event.set_commitedTopAddress((u8)heap->high());
1308     event.set_reservedTopAddress((u8)heap->high_boundary());
1309     event.set_entryCount(heap->blob_count());
1310     event.set_methodCount(heap->nmethod_count());
1311     event.set_adaptorCount(heap->adapter_count());
1312     event.set_unallocatedCapacity(heap->unallocated_capacity());
1313     event.set_fullCount(heap->full_count());
1314     event.commit();
1315   }
1316 }
1317 PRAGMA_DIAG_POP
1318 
1319 void CodeCache::print_memory_overhead() {
1320   size_t wasted_bytes = 0;
1321   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1322       CodeHeap* curr_heap = *heap;
1323       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1324         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1325         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1326       }
1327   }
1328   // Print bytes that are allocated in the freelist
1329   ttyLocker ttl;
1330   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1331   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1332   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1333   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1334 }
1335 
1336 //------------------------------------------------------------------------------------------------
1337 // Non-product version
1338 
1339 #ifndef PRODUCT
1340 
1341 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1342   if (PrintCodeCache2) {  // Need to add a new flag
1343     ResourceMark rm;
1344     if (size == 0)  size = cb->size();
1345     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1346   }
1347 }
1348 
1349 void CodeCache::print_internals() {
1350   int nmethodCount = 0;
1351   int runtimeStubCount = 0;
1352   int adapterCount = 0;
1353   int deoptimizationStubCount = 0;
1354   int uncommonTrapStubCount = 0;
1355   int bufferBlobCount = 0;
1356   int total = 0;
1357   int nmethodAlive = 0;
1358   int nmethodNotEntrant = 0;
1359   int nmethodZombie = 0;
1360   int nmethodUnloaded = 0;
1361   int nmethodJava = 0;
1362   int nmethodNative = 0;
1363   int max_nm_size = 0;
1364   ResourceMark rm;
1365 
1366   int i = 0;
1367   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1368     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1369       tty->print_cr("-- %s --", (*heap)->name());
1370     }
1371     FOR_ALL_BLOBS(cb, *heap) {
1372       total++;
1373       if (cb->is_nmethod()) {
1374         nmethod* nm = (nmethod*)cb;
1375 
1376         if (Verbose && nm->method() != NULL) {
1377           ResourceMark rm;
1378           char *method_name = nm->method()->name_and_sig_as_C_string();
1379           tty->print("%s", method_name);
1380           if(nm->is_alive()) { tty->print_cr(" alive"); }
1381           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1382           if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1383         }
1384 
1385         nmethodCount++;
1386 
1387         if(nm->is_alive()) { nmethodAlive++; }
1388         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1389         if(nm->is_zombie()) { nmethodZombie++; }
1390         if(nm->is_unloaded()) { nmethodUnloaded++; }
1391         if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1392 
1393         if(nm->method() != NULL && nm->is_java_method()) {
1394           nmethodJava++;
1395           max_nm_size = MAX2(max_nm_size, nm->size());
1396         }
1397       } else if (cb->is_runtime_stub()) {
1398         runtimeStubCount++;
1399       } else if (cb->is_deoptimization_stub()) {
1400         deoptimizationStubCount++;
1401       } else if (cb->is_uncommon_trap_stub()) {
1402         uncommonTrapStubCount++;
1403       } else if (cb->is_adapter_blob()) {
1404         adapterCount++;
1405       } else if (cb->is_buffer_blob()) {
1406         bufferBlobCount++;
1407       }
1408     }
1409   }
1410 
1411   int bucketSize = 512;
1412   int bucketLimit = max_nm_size / bucketSize + 1;
1413   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1414   memset(buckets, 0, sizeof(int) * bucketLimit);
1415 
1416   NMethodIterator iter(NMethodIterator::all_blobs);
1417   while(iter.next()) {
1418     nmethod* nm = iter.method();
1419     if(nm->method() != NULL && nm->is_java_method()) {
1420       buckets[nm->size() / bucketSize]++;
1421     }
1422   }
1423 
1424   tty->print_cr("Code Cache Entries (total of %d)",total);
1425   tty->print_cr("-------------------------------------------------");
1426   tty->print_cr("nmethods: %d",nmethodCount);
1427   tty->print_cr("\talive: %d",nmethodAlive);
1428   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1429   tty->print_cr("\tzombie: %d",nmethodZombie);
1430   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1431   tty->print_cr("\tjava: %d",nmethodJava);
1432   tty->print_cr("\tnative: %d",nmethodNative);
1433   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1434   tty->print_cr("adapters: %d",adapterCount);
1435   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1436   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1437   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1438   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1439   tty->print_cr("-------------------------------------------------");
1440 
1441   for(int i=0; i<bucketLimit; i++) {
1442     if(buckets[i] != 0) {
1443       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1444       tty->fill_to(40);
1445       tty->print_cr("%d",buckets[i]);
1446     }
1447   }
1448 
1449   FREE_C_HEAP_ARRAY(int, buckets);
1450   print_memory_overhead();
1451 }
1452 
1453 #endif // !PRODUCT
1454 
1455 void CodeCache::print() {
1456   print_summary(tty);
1457 
1458 #ifndef PRODUCT
1459   if (!Verbose) return;
1460 
1461   CodeBlob_sizes live[CompLevel_full_optimization + 1];
1462   CodeBlob_sizes dead[CompLevel_full_optimization + 1];
1463   CodeBlob_sizes runtimeStub;
1464   CodeBlob_sizes uncommonTrapStub;
1465   CodeBlob_sizes deoptimizationStub;
1466   CodeBlob_sizes adapter;
1467   CodeBlob_sizes bufferBlob;
1468   CodeBlob_sizes other;
1469 
1470   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1471     FOR_ALL_BLOBS(cb, *heap) {
1472       if (cb->is_nmethod()) {
1473         const int level = cb->as_nmethod()->comp_level();
1474         assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1475         if (!cb->is_alive()) {
1476           dead[level].add(cb);
1477         } else {
1478           live[level].add(cb);
1479         }
1480       } else if (cb->is_runtime_stub()) {
1481         runtimeStub.add(cb);
1482       } else if (cb->is_deoptimization_stub()) {
1483         deoptimizationStub.add(cb);
1484       } else if (cb->is_uncommon_trap_stub()) {
1485         uncommonTrapStub.add(cb);
1486       } else if (cb->is_adapter_blob()) {
1487         adapter.add(cb);
1488       } else if (cb->is_buffer_blob()) {
1489         bufferBlob.add(cb);
1490       } else {
1491         other.add(cb);
1492       }
1493     }
1494   }
1495 
1496   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1497 
1498   tty->print_cr("nmethod blobs per compilation level:");
1499   for (int i = 0; i <= CompLevel_full_optimization; i++) {
1500     const char *level_name;
1501     switch (i) {
1502     case CompLevel_none:              level_name = "none";              break;
1503     case CompLevel_simple:            level_name = "simple";            break;
1504     case CompLevel_limited_profile:   level_name = "limited profile";   break;
1505     case CompLevel_full_profile:      level_name = "full profile";      break;
1506     case CompLevel_full_optimization: level_name = "full optimization"; break;
1507     default: assert(false, "invalid compilation level");
1508     }
1509     tty->print_cr("%s:", level_name);
1510     live[i].print("live");
1511     dead[i].print("dead");
1512   }
1513 
1514   struct {
1515     const char* name;
1516     const CodeBlob_sizes* sizes;
1517   } non_nmethod_blobs[] = {
1518     { "runtime",        &runtimeStub },
1519     { "uncommon trap",  &uncommonTrapStub },
1520     { "deoptimization", &deoptimizationStub },
1521     { "adapter",        &adapter },
1522     { "buffer blob",    &bufferBlob },
1523     { "other",          &other },
1524   };
1525   tty->print_cr("Non-nmethod blobs:");
1526   for (auto& blob: non_nmethod_blobs) {
1527     blob.sizes->print(blob.name);
1528   }
1529 
1530   if (WizardMode) {
1531      // print the oop_map usage
1532     int code_size = 0;
1533     int number_of_blobs = 0;
1534     int number_of_oop_maps = 0;
1535     int map_size = 0;
1536     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1537       FOR_ALL_BLOBS(cb, *heap) {
1538         if (cb->is_alive()) {
1539           number_of_blobs++;
1540           code_size += cb->code_size();
1541           ImmutableOopMapSet* set = cb->oop_maps();
1542           if (set != NULL) {
1543             number_of_oop_maps += set->count();
1544             map_size           += set->nr_of_bytes();
1545           }
1546         }
1547       }
1548     }
1549     tty->print_cr("OopMaps");
1550     tty->print_cr("  #blobs    = %d", number_of_blobs);
1551     tty->print_cr("  code size = %d", code_size);
1552     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1553     tty->print_cr("  map size  = %d", map_size);
1554   }
1555 
1556 #endif // !PRODUCT
1557 }
1558 
1559 void CodeCache::print_summary(outputStream* st, bool detailed) {
1560   int full_count = 0;
1561   FOR_ALL_HEAPS(heap_iterator) {
1562     CodeHeap* heap = (*heap_iterator);
1563     size_t total = (heap->high_boundary() - heap->low_boundary());
1564     if (_heaps->length() >= 1) {
1565       st->print("%s:", heap->name());
1566     } else {
1567       st->print("CodeCache:");
1568     }
1569     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1570                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1571                  total/K, (total - heap->unallocated_capacity())/K,
1572                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1573 
1574     if (detailed) {
1575       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1576                    p2i(heap->low_boundary()),
1577                    p2i(heap->high()),
1578                    p2i(heap->high_boundary()));
1579 
1580       full_count += get_codemem_full_count(heap->code_blob_type());
1581     }
1582   }
1583 
1584   if (detailed) {
1585     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1586                        " adapters=" UINT32_FORMAT,
1587                        blob_count(), nmethod_count(), adapter_count());
1588     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1589                  "enabled" : Arguments::mode() == Arguments::_int ?
1590                  "disabled (interpreter mode)" :
1591                  "disabled (not enough contiguous free space left)");
1592     st->print_cr("              stopped_count=%d, restarted_count=%d",
1593                  CompileBroker::get_total_compiler_stopped_count(),
1594                  CompileBroker::get_total_compiler_restarted_count());
1595     st->print_cr(" full_count=%d", full_count);
1596   }
1597 }
1598 
1599 void CodeCache::print_codelist(outputStream* st) {
1600   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1601 
1602   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1603   while (iter.next()) {
1604     CompiledMethod* cm = iter.method();
1605     ResourceMark rm;
1606     char* method_name = cm->method()->name_and_sig_as_C_string();
1607     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1608                  cm->compile_id(), cm->comp_level(), cm->get_state(),
1609                  method_name,
1610                  (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1611   }
1612 }
1613 
1614 void CodeCache::print_layout(outputStream* st) {
1615   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1616   ResourceMark rm;
1617   print_summary(st, true);
1618 }
1619 
1620 void CodeCache::log_state(outputStream* st) {
1621   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1622             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1623             blob_count(), nmethod_count(), adapter_count(),
1624             unallocated_capacity());
1625 }
1626 
1627 #ifdef LINUX
1628 void CodeCache::write_perf_map() {
1629   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1630 
1631   // Perf expects to find the map file at /tmp/perf-<pid>.map.
1632   char fname[32];
1633   jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1634 
1635   fileStream fs(fname, "w");
1636   if (!fs.is_open()) {
1637     log_warning(codecache)("Failed to create %s for perf map", fname);
1638     return;
1639   }
1640 
1641   AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading);
1642   while (iter.next()) {
1643     CodeBlob *cb = iter.method();
1644     ResourceMark rm;
1645     const char* method_name =
1646       cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
1647                         : cb->name();
1648     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1649                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1650                 method_name);
1651   }
1652 }
1653 #endif // LINUX
1654 
1655 //---<  BEGIN  >--- CodeHeap State Analytics.
1656 
1657 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1658   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1659     CodeHeapState::aggregate(out, (*heap), granularity);
1660   }
1661 }
1662 
1663 void CodeCache::discard(outputStream *out) {
1664   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1665     CodeHeapState::discard(out, (*heap));
1666   }
1667 }
1668 
1669 void CodeCache::print_usedSpace(outputStream *out) {
1670   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1671     CodeHeapState::print_usedSpace(out, (*heap));
1672   }
1673 }
1674 
1675 void CodeCache::print_freeSpace(outputStream *out) {
1676   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1677     CodeHeapState::print_freeSpace(out, (*heap));
1678   }
1679 }
1680 
1681 void CodeCache::print_count(outputStream *out) {
1682   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1683     CodeHeapState::print_count(out, (*heap));
1684   }
1685 }
1686 
1687 void CodeCache::print_space(outputStream *out) {
1688   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1689     CodeHeapState::print_space(out, (*heap));
1690   }
1691 }
1692 
1693 void CodeCache::print_age(outputStream *out) {
1694   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1695     CodeHeapState::print_age(out, (*heap));
1696   }
1697 }
1698 
1699 void CodeCache::print_names(outputStream *out) {
1700   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1701     CodeHeapState::print_names(out, (*heap));
1702   }
1703 }
1704 //---<  END  >--- CodeHeap State Analytics.