1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm_io.h"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nmethod.hpp"
  35 #include "code/pcDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/oopMap.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/iterator.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/method.inline.hpp"
  48 #include "oops/objArrayOop.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/verifyOopClosure.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/globals_extension.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/icache.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sweeper.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "services/memoryService.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/vmError.hpp"
  65 #include "utilities/xmlstream.hpp"
  66 #ifdef COMPILER1
  67 #include "c1/c1_Compilation.hpp"
  68 #include "c1/c1_Compiler.hpp"
  69 #endif
  70 #ifdef COMPILER2
  71 #include "opto/c2compiler.hpp"
  72 #include "opto/compile.hpp"
  73 #include "opto/node.hpp"
  74 #endif
  75 
  76 // Helper class for printing in CodeCache
  77 class CodeBlob_sizes {
  78  private:
  79   int count;
  80   int total_size;
  81   int header_size;
  82   int code_size;
  83   int stub_size;
  84   int relocation_size;
  85   int scopes_oop_size;
  86   int scopes_metadata_size;
  87   int scopes_data_size;
  88   int scopes_pcs_size;
  89 
  90  public:
  91   CodeBlob_sizes() {
  92     count            = 0;
  93     total_size       = 0;
  94     header_size      = 0;
  95     code_size        = 0;
  96     stub_size        = 0;
  97     relocation_size  = 0;
  98     scopes_oop_size  = 0;
  99     scopes_metadata_size  = 0;
 100     scopes_data_size = 0;
 101     scopes_pcs_size  = 0;
 102   }
 103 
 104   int total()                                    { return total_size; }
 105   bool is_empty()                                { return count == 0; }
 106 
 107   void print(const char* title) {
 108     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
 109                   count,
 110                   title,
 111                   (int)(total() / K),
 112                   header_size             * 100 / total_size,
 113                   relocation_size         * 100 / total_size,
 114                   code_size               * 100 / total_size,
 115                   stub_size               * 100 / total_size,
 116                   scopes_oop_size         * 100 / total_size,
 117                   scopes_metadata_size    * 100 / total_size,
 118                   scopes_data_size        * 100 / total_size,
 119                   scopes_pcs_size         * 100 / total_size);
 120   }
 121 
 122   void add(CodeBlob* cb) {
 123     count++;
 124     total_size       += cb->size();
 125     header_size      += cb->header_size();
 126     relocation_size  += cb->relocation_size();
 127     if (cb->is_nmethod()) {
 128       nmethod* nm = cb->as_nmethod_or_null();
 129       code_size        += nm->insts_size();
 130       stub_size        += nm->stub_size();
 131 
 132       scopes_oop_size  += nm->oops_size();
 133       scopes_metadata_size  += nm->metadata_size();
 134       scopes_data_size += nm->scopes_data_size();
 135       scopes_pcs_size  += nm->scopes_pcs_size();
 136     } else {
 137       code_size        += cb->code_size();
 138     }
 139   }
 140 };
 141 
 142 // Iterate over all CodeHeaps
 143 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 144 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 145 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 146 
 147 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 148 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 149 
 150 address CodeCache::_low_bound = 0;
 151 address CodeCache::_high_bound = 0;
 152 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 153 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
 154 
 155 // Initialize arrays of CodeHeap subsets
 156 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 157 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 158 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 159 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 160 
 161 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 162   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 163   // Prepare error message
 164   const char* error = "Invalid code heap sizes";
 165   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 166                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 167           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 168 
 169   if (total_size > cache_size) {
 170     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 171     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 172     vm_exit_during_initialization(error, message);
 173   } else if (all_set && total_size != cache_size) {
 174     // All code heap sizes were explicitly set: total_size must equal cache_size
 175     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 176     vm_exit_during_initialization(error, message);
 177   }
 178 }
 179 
 180 void CodeCache::initialize_heaps() {
 181   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 182   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 183   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 184   size_t min_size           = os::vm_page_size();
 185   size_t cache_size         = ReservedCodeCacheSize;
 186   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 187   size_t profiled_size      = ProfiledCodeHeapSize;
 188   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 189   // Check if total size set via command line flags exceeds the reserved size
 190   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 191                    (profiled_set     ? profiled_size     : min_size),
 192                    (non_profiled_set ? non_profiled_size : min_size),
 193                    cache_size,
 194                    non_nmethod_set && profiled_set && non_profiled_set);
 195 
 196   // Determine size of compiler buffers
 197   size_t code_buffers_size = 0;
 198 #ifdef COMPILER1
 199   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 200   const int c1_count = CompilationPolicy::c1_count();
 201   code_buffers_size += c1_count * Compiler::code_buffer_size();
 202 #endif
 203 #ifdef COMPILER2
 204   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 205   const int c2_count = CompilationPolicy::c2_count();
 206   // Initial size of constant table (this may be increased if a compiled method needs more space)
 207   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 208 #endif
 209 
 210   // Increase default non_nmethod_size to account for compiler buffers
 211   if (!non_nmethod_set) {
 212     non_nmethod_size += code_buffers_size;
 213   }
 214   // Calculate default CodeHeap sizes if not set by user
 215   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 216     // Check if we have enough space for the non-nmethod code heap
 217     if (cache_size > non_nmethod_size) {
 218       // Use the default value for non_nmethod_size and one half of the
 219       // remaining size for non-profiled and one half for profiled methods
 220       size_t remaining_size = cache_size - non_nmethod_size;
 221       profiled_size = remaining_size / 2;
 222       non_profiled_size = remaining_size - profiled_size;
 223     } else {
 224       // Use all space for the non-nmethod heap and set other heaps to minimal size
 225       non_nmethod_size = cache_size - 2 * min_size;
 226       profiled_size = min_size;
 227       non_profiled_size = min_size;
 228     }
 229   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
 230     // The user explicitly set some code heap sizes. Increase or decrease the (default)
 231     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
 232     // code heap sizes and then only change non-nmethod code heap size if still necessary.
 233     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
 234     if (non_profiled_set) {
 235       if (!profiled_set) {
 236         // Adapt size of profiled code heap
 237         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
 238           // Not enough space available, set to minimum size
 239           diff_size += profiled_size - min_size;
 240           profiled_size = min_size;
 241         } else {
 242           profiled_size += diff_size;
 243           diff_size = 0;
 244         }
 245       }
 246     } else if (profiled_set) {
 247       // Adapt size of non-profiled code heap
 248       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
 249         // Not enough space available, set to minimum size
 250         diff_size += non_profiled_size - min_size;
 251         non_profiled_size = min_size;
 252       } else {
 253         non_profiled_size += diff_size;
 254         diff_size = 0;
 255       }
 256     } else if (non_nmethod_set) {
 257       // Distribute remaining size between profiled and non-profiled code heaps
 258       diff_size = cache_size - non_nmethod_size;
 259       profiled_size = diff_size / 2;
 260       non_profiled_size = diff_size - profiled_size;
 261       diff_size = 0;
 262     }
 263     if (diff_size != 0) {
 264       // Use non-nmethod code heap for remaining space requirements
 265       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
 266       non_nmethod_size += diff_size;
 267     }
 268   }
 269 
 270   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 271   if (!heap_available(CodeBlobType::MethodProfiled)) {
 272     non_profiled_size += profiled_size;
 273     profiled_size = 0;
 274   }
 275   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 276   if (!heap_available(CodeBlobType::MethodNonProfiled)) {
 277     non_nmethod_size += non_profiled_size;
 278     non_profiled_size = 0;
 279   }
 280   // Make sure we have enough space for VM internal code
 281   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 282   if (non_nmethod_size < min_code_cache_size) {
 283     vm_exit_during_initialization(err_msg(
 284         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 285         non_nmethod_size/K, min_code_cache_size/K));
 286   }
 287 
 288   // Verify sizes and update flag values
 289   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 290   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 291   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 292   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 293 
 294   // If large page support is enabled, align code heaps according to large
 295   // page size to make sure that code cache is covered by large pages.
 296   const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
 297   non_nmethod_size = align_up(non_nmethod_size, alignment);
 298   profiled_size    = align_down(profiled_size, alignment);
 299 
 300   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 301   // parts for the individual heaps. The memory layout looks like this:
 302   // ---------- high -----------
 303   //    Non-profiled nmethods
 304   //      Profiled nmethods
 305   //         Non-nmethods
 306   // ---------- low ------------
 307   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
 308   ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
 309   ReservedSpace rest                = rs.last_part(non_nmethod_size);
 310   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 311   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 312 
 313   // Non-nmethods (stubs, adapters, ...)
 314   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 315   // Tier 2 and tier 3 (profiled) methods
 316   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 317   // Tier 1 and tier 4 (non-profiled) methods and native methods
 318   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 319 }
 320 
 321 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 322   if (os::can_execute_large_page_memory()) {
 323     if (InitialCodeCacheSize < ReservedCodeCacheSize) {
 324       // Make sure that the page size allows for an incremental commit of the reserved space
 325       min_pages = MAX2(min_pages, (size_t)8);
 326     }
 327     return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 328                      os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 329   } else {
 330     return os::vm_page_size();
 331   }
 332 }
 333 
 334 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 335   // Align and reserve space for code cache
 336   const size_t rs_ps = page_size();
 337   const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
 338   const size_t rs_size = align_up(size, rs_align);
 339   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 340   if (!rs.is_reserved()) {
 341     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 342                                           rs_size/K));
 343   }
 344 
 345   // Initialize bounds
 346   _low_bound = (address)rs.base();
 347   _high_bound = _low_bound + rs.size();
 348   return rs;
 349 }
 350 
 351 // Heaps available for allocation
 352 bool CodeCache::heap_available(int code_blob_type) {
 353   if (!SegmentedCodeCache) {
 354     // No segmentation: use a single code heap
 355     return (code_blob_type == CodeBlobType::All);
 356   } else if (Arguments::is_interpreter_only()) {
 357     // Interpreter only: we don't need any method code heaps
 358     return (code_blob_type == CodeBlobType::NonNMethod);
 359   } else if (CompilerConfig::is_c1_profiling()) {
 360     // Tiered compilation: use all code heaps
 361     return (code_blob_type < CodeBlobType::All);
 362   } else {
 363     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 364     return (code_blob_type == CodeBlobType::NonNMethod) ||
 365            (code_blob_type == CodeBlobType::MethodNonProfiled);
 366   }
 367 }
 368 
 369 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
 370   switch(code_blob_type) {
 371   case CodeBlobType::NonNMethod:
 372     return "NonNMethodCodeHeapSize";
 373     break;
 374   case CodeBlobType::MethodNonProfiled:
 375     return "NonProfiledCodeHeapSize";
 376     break;
 377   case CodeBlobType::MethodProfiled:
 378     return "ProfiledCodeHeapSize";
 379     break;
 380   }
 381   ShouldNotReachHere();
 382   return NULL;
 383 }
 384 
 385 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 386   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 387     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 388   } else {
 389     return lhs->code_blob_type() - rhs->code_blob_type();
 390   }
 391 }
 392 
 393 void CodeCache::add_heap(CodeHeap* heap) {
 394   assert(!Universe::is_fully_initialized(), "late heap addition?");
 395 
 396   _heaps->insert_sorted<code_heap_compare>(heap);
 397 
 398   int type = heap->code_blob_type();
 399   if (code_blob_type_accepts_compiled(type)) {
 400     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
 401   }
 402   if (code_blob_type_accepts_nmethod(type)) {
 403     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 404   }
 405   if (code_blob_type_accepts_allocable(type)) {
 406     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 407   }
 408 }
 409 
 410 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
 411   // Check if heap is needed
 412   if (!heap_available(code_blob_type)) {
 413     return;
 414   }
 415 
 416   // Create CodeHeap
 417   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 418   add_heap(heap);
 419 
 420   // Reserve Space
 421   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 422   size_initial = align_up(size_initial, os::vm_page_size());
 423   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 424     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 425                                           heap->name(), size_initial/K));
 426   }
 427 
 428   // Register the CodeHeap
 429   MemoryService::add_code_heap_memory_pool(heap, name);
 430 }
 431 
 432 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 433   FOR_ALL_HEAPS(heap) {
 434     if ((*heap)->contains(start)) {
 435       return *heap;
 436     }
 437   }
 438   return NULL;
 439 }
 440 
 441 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
 442   assert(cb != NULL, "CodeBlob is null");
 443   FOR_ALL_HEAPS(heap) {
 444     if ((*heap)->contains_blob(cb)) {
 445       return *heap;
 446     }
 447   }
 448   ShouldNotReachHere();
 449   return NULL;
 450 }
 451 
 452 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 453   FOR_ALL_HEAPS(heap) {
 454     if ((*heap)->accepts(code_blob_type)) {
 455       return *heap;
 456     }
 457   }
 458   return NULL;
 459 }
 460 
 461 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 462   assert_locked_or_safepoint(CodeCache_lock);
 463   assert(heap != NULL, "heap is null");
 464   return (CodeBlob*)heap->first();
 465 }
 466 
 467 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 468   if (heap_available(code_blob_type)) {
 469     return first_blob(get_code_heap(code_blob_type));
 470   } else {
 471     return NULL;
 472   }
 473 }
 474 
 475 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 476   assert_locked_or_safepoint(CodeCache_lock);
 477   assert(heap != NULL, "heap is null");
 478   return (CodeBlob*)heap->next(cb);
 479 }
 480 
 481 /**
 482  * Do not seize the CodeCache lock here--if the caller has not
 483  * already done so, we are going to lose bigtime, since the code
 484  * cache will contain a garbage CodeBlob until the caller can
 485  * run the constructor for the CodeBlob subclass he is busy
 486  * instantiating.
 487  */
 488 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {
 489   // Possibly wakes up the sweeper thread.
 490   NMethodSweeper::report_allocation(code_blob_type);
 491   assert_locked_or_safepoint(CodeCache_lock);
 492   assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
 493   if (size <= 0) {
 494     return NULL;
 495   }
 496   CodeBlob* cb = NULL;
 497 
 498   // Get CodeHeap for the given CodeBlobType
 499   CodeHeap* heap = get_code_heap(code_blob_type);
 500   assert(heap != NULL, "heap is null");
 501 
 502   while (true) {
 503     cb = (CodeBlob*)heap->allocate(size);
 504     if (cb != NULL) break;
 505     if (!heap->expand_by(CodeCacheExpansionSize)) {
 506       // Save original type for error reporting
 507       if (orig_code_blob_type == CodeBlobType::All) {
 508         orig_code_blob_type = code_blob_type;
 509       }
 510       // Expansion failed
 511       if (SegmentedCodeCache) {
 512         // Fallback solution: Try to store code in another code heap.
 513         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 514         // Note that in the sweeper, we check the reverse_free_ratio of the code heap
 515         // and force stack scanning if less than 10% of the code heap are free.
 516         int type = code_blob_type;
 517         switch (type) {
 518         case CodeBlobType::NonNMethod:
 519           type = CodeBlobType::MethodNonProfiled;
 520           break;
 521         case CodeBlobType::MethodNonProfiled:
 522           type = CodeBlobType::MethodProfiled;
 523           break;
 524         case CodeBlobType::MethodProfiled:
 525           // Avoid loop if we already tried that code heap
 526           if (type == orig_code_blob_type) {
 527             type = CodeBlobType::MethodNonProfiled;
 528           }
 529           break;
 530         }
 531         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 532           if (PrintCodeCacheExtension) {
 533             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 534                           heap->name(), get_code_heap(type)->name());
 535           }
 536           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
 537         }
 538       }
 539       if (handle_alloc_failure) {
 540         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 541         CompileBroker::handle_full_code_cache(orig_code_blob_type);
 542       }
 543       return NULL;
 544     }
 545     if (PrintCodeCacheExtension) {
 546       ResourceMark rm;
 547       if (_nmethod_heaps->length() >= 1) {
 548         tty->print("%s", heap->name());
 549       } else {
 550         tty->print("CodeCache");
 551       }
 552       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 553                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 554                     (address)heap->high() - (address)heap->low_boundary());
 555     }
 556   }
 557   print_trace("allocation", cb, size);
 558   return cb;
 559 }
 560 
 561 void CodeCache::free(CodeBlob* cb) {
 562   assert_locked_or_safepoint(CodeCache_lock);
 563   CodeHeap* heap = get_code_heap(cb);
 564   print_trace("free", cb);
 565   if (cb->is_nmethod()) {
 566     nmethod* ptr = (nmethod *)cb;
 567     heap->set_nmethod_count(heap->nmethod_count() - 1);
 568     if (ptr->has_dependencies()) {
 569       _number_of_nmethods_with_dependencies--;
 570     }
 571     ptr->free_native_invokers();
 572   }
 573   if (cb->is_adapter_blob()) {
 574     heap->set_adapter_count(heap->adapter_count() - 1);
 575   }
 576 
 577   // Get heap for given CodeBlob and deallocate
 578   get_code_heap(cb)->deallocate(cb);
 579 
 580   assert(heap->blob_count() >= 0, "sanity check");
 581 }
 582 
 583 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 584   assert_locked_or_safepoint(CodeCache_lock);
 585   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 586   print_trace("free_unused_tail", cb);
 587 
 588   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 589   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 590   used += CodeBlob::align_code_offset(cb->header_size());
 591 
 592   // Get heap for given CodeBlob and deallocate its unused tail
 593   get_code_heap(cb)->deallocate_tail(cb, used);
 594   // Adjust the sizes of the CodeBlob
 595   cb->adjust_size(used);
 596 }
 597 
 598 void CodeCache::commit(CodeBlob* cb) {
 599   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 600   assert_locked_or_safepoint(CodeCache_lock);
 601   CodeHeap* heap = get_code_heap(cb);
 602   if (cb->is_nmethod()) {
 603     heap->set_nmethod_count(heap->nmethod_count() + 1);
 604     if (((nmethod *)cb)->has_dependencies()) {
 605       _number_of_nmethods_with_dependencies++;
 606     }
 607   }
 608   if (cb->is_adapter_blob()) {
 609     heap->set_adapter_count(heap->adapter_count() + 1);
 610   }
 611 
 612   // flush the hardware I-cache
 613   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 614 }
 615 
 616 bool CodeCache::contains(void *p) {
 617   // S390 uses contains() in current_frame(), which is used before
 618   // code cache initialization if NativeMemoryTracking=detail is set.
 619   S390_ONLY(if (_heaps == NULL) return false;)
 620   // It should be ok to call contains without holding a lock.
 621   FOR_ALL_HEAPS(heap) {
 622     if ((*heap)->contains(p)) {
 623       return true;
 624     }
 625   }
 626   return false;
 627 }
 628 
 629 bool CodeCache::contains(nmethod *nm) {
 630   return contains((void *)nm);
 631 }
 632 
 633 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 634 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 635 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 636 CodeBlob* CodeCache::find_blob(void* start) {
 637   CodeBlob* result = find_blob_unsafe(start);
 638   // We could potentially look up non_entrant methods
 639   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 640   return result;
 641 }
 642 
 643 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 644 // what you are doing)
 645 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 646   // NMT can walk the stack before code cache is created
 647   if (_heaps != NULL) {
 648     CodeHeap* heap = get_code_heap_containing(start);
 649     if (heap != NULL) {
 650       return heap->find_blob_unsafe(start);
 651     }
 652   }
 653   return NULL;
 654 }
 655 
 656 nmethod* CodeCache::find_nmethod(void* start) {
 657   CodeBlob* cb = find_blob(start);
 658   assert(cb->is_nmethod(), "did not find an nmethod");
 659   return (nmethod*)cb;
 660 }
 661 
 662 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 663   assert_locked_or_safepoint(CodeCache_lock);
 664   FOR_ALL_HEAPS(heap) {
 665     FOR_ALL_BLOBS(cb, *heap) {
 666       f(cb);
 667     }
 668   }
 669 }
 670 
 671 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 672   assert_locked_or_safepoint(CodeCache_lock);
 673   NMethodIterator iter(NMethodIterator::all_blobs);
 674   while(iter.next()) {
 675     f(iter.method());
 676   }
 677 }
 678 
 679 void CodeCache::metadata_do(MetadataClosure* f) {
 680   assert_locked_or_safepoint(CodeCache_lock);
 681   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 682   while(iter.next()) {
 683     iter.method()->metadata_do(f);
 684   }
 685 }
 686 
 687 int CodeCache::alignment_unit() {
 688   return (int)_heaps->first()->alignment_unit();
 689 }
 690 
 691 int CodeCache::alignment_offset() {
 692   return (int)_heaps->first()->alignment_offset();
 693 }
 694 
 695 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 696 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 697   assert_locked_or_safepoint(CodeCache_lock);
 698   UnloadingScope scope(is_alive);
 699   CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
 700   while(iter.next()) {
 701     iter.method()->do_unloading(unloading_occurred);
 702   }
 703 }
 704 
 705 void CodeCache::blobs_do(CodeBlobClosure* f) {
 706   assert_locked_or_safepoint(CodeCache_lock);
 707   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 708     FOR_ALL_BLOBS(cb, *heap) {
 709       if (cb->is_alive()) {
 710         f->do_code_blob(cb);
 711 #ifdef ASSERT
 712         if (cb->is_nmethod()) {
 713           Universe::heap()->verify_nmethod((nmethod*)cb);
 714         }
 715 #endif //ASSERT
 716       }
 717     }
 718   }
 719 }
 720 
 721 void CodeCache::verify_clean_inline_caches() {
 722 #ifdef ASSERT
 723   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 724   while(iter.next()) {
 725     nmethod* nm = iter.method();
 726     assert(!nm->is_unloaded(), "Tautology");
 727     nm->verify_clean_inline_caches();
 728     nm->verify();
 729   }
 730 #endif
 731 }
 732 
 733 void CodeCache::verify_icholder_relocations() {
 734 #ifdef ASSERT
 735   // make sure that we aren't leaking icholders
 736   int count = 0;
 737   FOR_ALL_HEAPS(heap) {
 738     FOR_ALL_BLOBS(cb, *heap) {
 739       CompiledMethod *nm = cb->as_compiled_method_or_null();
 740       if (nm != NULL) {
 741         count += nm->verify_icholder_relocations();
 742       }
 743     }
 744   }
 745   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 746          CompiledICHolder::live_count(), "must agree");
 747 #endif
 748 }
 749 
 750 // Defer freeing of concurrently cleaned ExceptionCache entries until
 751 // after a global handshake operation.
 752 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 753   if (SafepointSynchronize::is_at_safepoint()) {
 754     delete entry;
 755   } else {
 756     for (;;) {
 757       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 758       entry->set_purge_list_next(purge_list_head);
 759       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 760         break;
 761       }
 762     }
 763   }
 764 }
 765 
 766 // Delete exception caches that have been concurrently unlinked,
 767 // followed by a global handshake operation.
 768 void CodeCache::purge_exception_caches() {
 769   ExceptionCache* curr = _exception_cache_purge_list;
 770   while (curr != NULL) {
 771     ExceptionCache* next = curr->purge_list_next();
 772     delete curr;
 773     curr = next;
 774   }
 775   _exception_cache_purge_list = NULL;
 776 }
 777 
 778 uint8_t CodeCache::_unloading_cycle = 1;
 779 
 780 void CodeCache::increment_unloading_cycle() {
 781   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 782   // 0 is reserved for new methods.
 783   _unloading_cycle = (_unloading_cycle + 1) % 4;
 784   if (_unloading_cycle == 0) {
 785     _unloading_cycle = 1;
 786   }
 787 }
 788 
 789 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
 790   : _is_unloading_behaviour(is_alive)
 791 {
 792   _saved_behaviour = IsUnloadingBehaviour::current();
 793   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 794   increment_unloading_cycle();
 795   DependencyContext::cleaning_start();
 796 }
 797 
 798 CodeCache::UnloadingScope::~UnloadingScope() {
 799   IsUnloadingBehaviour::set_current(_saved_behaviour);
 800   DependencyContext::cleaning_end();
 801 }
 802 
 803 void CodeCache::verify_oops() {
 804   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 805   VerifyOopClosure voc;
 806   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 807   while(iter.next()) {
 808     nmethod* nm = iter.method();
 809     nm->oops_do(&voc);
 810     nm->verify_oop_relocations();
 811   }
 812 }
 813 
 814 int CodeCache::blob_count(int code_blob_type) {
 815   CodeHeap* heap = get_code_heap(code_blob_type);
 816   return (heap != NULL) ? heap->blob_count() : 0;
 817 }
 818 
 819 int CodeCache::blob_count() {
 820   int count = 0;
 821   FOR_ALL_HEAPS(heap) {
 822     count += (*heap)->blob_count();
 823   }
 824   return count;
 825 }
 826 
 827 int CodeCache::nmethod_count(int code_blob_type) {
 828   CodeHeap* heap = get_code_heap(code_blob_type);
 829   return (heap != NULL) ? heap->nmethod_count() : 0;
 830 }
 831 
 832 int CodeCache::nmethod_count() {
 833   int count = 0;
 834   FOR_ALL_NMETHOD_HEAPS(heap) {
 835     count += (*heap)->nmethod_count();
 836   }
 837   return count;
 838 }
 839 
 840 int CodeCache::adapter_count(int code_blob_type) {
 841   CodeHeap* heap = get_code_heap(code_blob_type);
 842   return (heap != NULL) ? heap->adapter_count() : 0;
 843 }
 844 
 845 int CodeCache::adapter_count() {
 846   int count = 0;
 847   FOR_ALL_HEAPS(heap) {
 848     count += (*heap)->adapter_count();
 849   }
 850   return count;
 851 }
 852 
 853 address CodeCache::low_bound(int code_blob_type) {
 854   CodeHeap* heap = get_code_heap(code_blob_type);
 855   return (heap != NULL) ? (address)heap->low_boundary() : NULL;
 856 }
 857 
 858 address CodeCache::high_bound(int code_blob_type) {
 859   CodeHeap* heap = get_code_heap(code_blob_type);
 860   return (heap != NULL) ? (address)heap->high_boundary() : NULL;
 861 }
 862 
 863 size_t CodeCache::capacity() {
 864   size_t cap = 0;
 865   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 866     cap += (*heap)->capacity();
 867   }
 868   return cap;
 869 }
 870 
 871 size_t CodeCache::unallocated_capacity(int code_blob_type) {
 872   CodeHeap* heap = get_code_heap(code_blob_type);
 873   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 874 }
 875 
 876 size_t CodeCache::unallocated_capacity() {
 877   size_t unallocated_cap = 0;
 878   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 879     unallocated_cap += (*heap)->unallocated_capacity();
 880   }
 881   return unallocated_cap;
 882 }
 883 
 884 size_t CodeCache::max_capacity() {
 885   size_t max_cap = 0;
 886   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 887     max_cap += (*heap)->max_capacity();
 888   }
 889   return max_cap;
 890 }
 891 
 892 /**
 893  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
 894  * is free, reverse_free_ratio() returns 4.
 895  */
 896 double CodeCache::reverse_free_ratio(int code_blob_type) {
 897   CodeHeap* heap = get_code_heap(code_blob_type);
 898   if (heap == NULL) {
 899     return 0;
 900   }
 901 
 902   double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
 903   double max_capacity = (double)heap->max_capacity();
 904   double result = max_capacity / unallocated_capacity;
 905   assert (max_capacity >= unallocated_capacity, "Must be");
 906   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
 907   return result;
 908 }
 909 
 910 size_t CodeCache::bytes_allocated_in_freelists() {
 911   size_t allocated_bytes = 0;
 912   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 913     allocated_bytes += (*heap)->allocated_in_freelist();
 914   }
 915   return allocated_bytes;
 916 }
 917 
 918 int CodeCache::allocated_segments() {
 919   int number_of_segments = 0;
 920   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 921     number_of_segments += (*heap)->allocated_segments();
 922   }
 923   return number_of_segments;
 924 }
 925 
 926 size_t CodeCache::freelists_length() {
 927   size_t length = 0;
 928   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 929     length += (*heap)->freelist_length();
 930   }
 931   return length;
 932 }
 933 
 934 void icache_init();
 935 
 936 void CodeCache::initialize() {
 937   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 938 #ifdef COMPILER2
 939   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 940 #endif
 941   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 942   // This was originally just a check of the alignment, causing failure, instead, round
 943   // the code cache to the page size.  In particular, Solaris is moving to a larger
 944   // default page size.
 945   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
 946 
 947   if (SegmentedCodeCache) {
 948     // Use multiple code heaps
 949     initialize_heaps();
 950   } else {
 951     // Use a single code heap
 952     FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);
 953     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
 954     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
 955     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
 956     add_heap(rs, "CodeCache", CodeBlobType::All);
 957   }
 958 
 959   // Initialize ICache flush mechanism
 960   // This service is needed for os::register_code_area
 961   icache_init();
 962 
 963   // Give OS a chance to register generated code area.
 964   // This is used on Windows 64 bit platforms to register
 965   // Structured Exception Handlers for our generated code.
 966   os::register_code_area((char*)low_bound(), (char*)high_bound());
 967 }
 968 
 969 void codeCache_init() {
 970   CodeCache::initialize();
 971 }
 972 
 973 //------------------------------------------------------------------------------------------------
 974 
 975 int CodeCache::number_of_nmethods_with_dependencies() {
 976   return _number_of_nmethods_with_dependencies;
 977 }
 978 
 979 void CodeCache::clear_inline_caches() {
 980   assert_locked_or_safepoint(CodeCache_lock);
 981   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
 982   while(iter.next()) {
 983     iter.method()->clear_inline_caches();
 984   }
 985 }
 986 
 987 void CodeCache::cleanup_inline_caches() {
 988   assert_locked_or_safepoint(CodeCache_lock);
 989   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 990   while(iter.next()) {
 991     iter.method()->cleanup_inline_caches(/*clean_all=*/true);
 992   }
 993 }
 994 
 995 // Keeps track of time spent for checking dependencies
 996 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
 997 
 998 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
 999   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1000   int number_of_marked_CodeBlobs = 0;
1001 
1002   // search the hierarchy looking for nmethods which are affected by the loading of this class
1003 
1004   // then search the interfaces this class implements looking for nmethods
1005   // which might be dependent of the fact that an interface only had one
1006   // implementor.
1007   // nmethod::check_all_dependencies works only correctly, if no safepoint
1008   // can happen
1009   NoSafepointVerifier nsv;
1010   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1011     Klass* d = str.klass();
1012     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1013   }
1014 
1015 #ifndef PRODUCT
1016   if (VerifyDependencies) {
1017     // Object pointers are used as unique identifiers for dependency arguments. This
1018     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1019     dependentCheckTime.start();
1020     nmethod::check_all_dependencies(changes);
1021     dependentCheckTime.stop();
1022   }
1023 #endif
1024 
1025   return number_of_marked_CodeBlobs;
1026 }
1027 
1028 CompiledMethod* CodeCache::find_compiled(void* start) {
1029   CodeBlob *cb = find_blob(start);
1030   assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1031   return (CompiledMethod*)cb;
1032 }
1033 
1034 #if INCLUDE_JVMTI
1035 // RedefineClasses support for unloading nmethods that are dependent on "old" methods.
1036 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1037 static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL;
1038 
1039 static void add_to_old_table(CompiledMethod* c) {
1040   if (old_compiled_method_table == NULL) {
1041     old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
1042   }
1043   old_compiled_method_table->push(c);
1044 }
1045 
1046 static void reset_old_method_table() {
1047   if (old_compiled_method_table != NULL) {
1048     delete old_compiled_method_table;
1049     old_compiled_method_table = NULL;
1050   }
1051 }
1052 
1053 // Remove this method when zombied or unloaded.
1054 void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1055   assert_lock_strong(CodeCache_lock);
1056   if (old_compiled_method_table != NULL) {
1057     int index = old_compiled_method_table->find(c);
1058     if (index != -1) {
1059       old_compiled_method_table->delete_at(index);
1060     }
1061   }
1062 }
1063 
1064 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1065   // Walk old method table and mark those on stack.
1066   int length = 0;
1067   if (old_compiled_method_table != NULL) {
1068     length = old_compiled_method_table->length();
1069     for (int i = 0; i < length; i++) {
1070       CompiledMethod* cm = old_compiled_method_table->at(i);
1071       // Only walk alive nmethods, the dead ones will get removed by the sweeper or GC.
1072       if (cm->is_alive() && !cm->is_unloading()) {
1073         old_compiled_method_table->at(i)->metadata_do(f);
1074       }
1075     }
1076   }
1077   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1078 }
1079 
1080 // Walk compiled methods and mark dependent methods for deoptimization.
1081 int CodeCache::mark_dependents_for_evol_deoptimization() {
1082   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1083   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1084   // So delete old method table and create a new one.
1085   reset_old_method_table();
1086 
1087   int number_of_marked_CodeBlobs = 0;
1088   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1089   while(iter.next()) {
1090     CompiledMethod* nm = iter.method();
1091     // Walk all alive nmethods to check for old Methods.
1092     // This includes methods whose inline caches point to old methods, so
1093     // inline cache clearing is unnecessary.
1094     if (nm->has_evol_metadata()) {
1095       nm->mark_for_deoptimization();
1096       add_to_old_table(nm);
1097       number_of_marked_CodeBlobs++;
1098     }
1099   }
1100 
1101   // return total count of nmethods marked for deoptimization, if zero the caller
1102   // can skip deoptimization
1103   return number_of_marked_CodeBlobs;
1104 }
1105 
1106 void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1107   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1108   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1109   while(iter.next()) {
1110     CompiledMethod* nm = iter.method();
1111     if (!nm->method()->is_method_handle_intrinsic()) {
1112       nm->mark_for_deoptimization();
1113       if (nm->has_evol_metadata()) {
1114         add_to_old_table(nm);
1115       }
1116     }
1117   }
1118 }
1119 
1120 // Flushes compiled methods dependent on redefined classes, that have already been
1121 // marked for deoptimization.
1122 void CodeCache::flush_evol_dependents() {
1123   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1124 
1125   // CodeCache can only be updated by a thread_in_VM and they will all be
1126   // stopped during the safepoint so CodeCache will be safe to update without
1127   // holding the CodeCache_lock.
1128 
1129   // At least one nmethod has been marked for deoptimization
1130 
1131   Deoptimization::deoptimize_all_marked();
1132 }
1133 #endif // INCLUDE_JVMTI
1134 
1135 // Mark methods for deopt (if safe or possible).
1136 void CodeCache::mark_all_nmethods_for_deoptimization() {
1137   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1138   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1139   while(iter.next()) {
1140     CompiledMethod* nm = iter.method();
1141     if (!nm->is_native_method()) {
1142       nm->mark_for_deoptimization();
1143     }
1144   }
1145 }
1146 
1147 int CodeCache::mark_for_deoptimization(Method* dependee) {
1148   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1149   int number_of_marked_CodeBlobs = 0;
1150 
1151   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1152   while(iter.next()) {
1153     CompiledMethod* nm = iter.method();
1154     if (nm->is_dependent_on_method(dependee)) {
1155       ResourceMark rm;
1156       nm->mark_for_deoptimization();
1157       number_of_marked_CodeBlobs++;
1158     }
1159   }
1160 
1161   return number_of_marked_CodeBlobs;
1162 }
1163 
1164 void CodeCache::make_marked_nmethods_not_entrant() {
1165   assert_locked_or_safepoint(CodeCache_lock);
1166   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1167   while(iter.next()) {
1168     CompiledMethod* nm = iter.method();
1169     if (nm->is_marked_for_deoptimization()) {
1170       nm->make_not_entrant();
1171     }
1172   }
1173 }
1174 
1175 // Flushes compiled methods dependent on dependee.
1176 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1177   assert_lock_strong(Compile_lock);
1178 
1179   if (number_of_nmethods_with_dependencies() == 0) return;
1180 
1181   int marked = 0;
1182   if (dependee->is_linked()) {
1183     // Class initialization state change.
1184     KlassInitDepChange changes(dependee);
1185     marked = mark_for_deoptimization(changes);
1186   } else {
1187     // New class is loaded.
1188     NewKlassDepChange changes(dependee);
1189     marked = mark_for_deoptimization(changes);
1190   }
1191 
1192   if (marked > 0) {
1193     // At least one nmethod has been marked for deoptimization
1194     Deoptimization::deoptimize_all_marked();
1195   }
1196 }
1197 
1198 // Flushes compiled methods dependent on dependee
1199 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
1200   // --- Compile_lock is not held. However we are at a safepoint.
1201   assert_locked_or_safepoint(Compile_lock);
1202 
1203   // Compute the dependent nmethods
1204   if (mark_for_deoptimization(m_h()) > 0) {
1205     Deoptimization::deoptimize_all_marked();
1206   }
1207 }
1208 
1209 void CodeCache::verify() {
1210   assert_locked_or_safepoint(CodeCache_lock);
1211   FOR_ALL_HEAPS(heap) {
1212     (*heap)->verify();
1213     FOR_ALL_BLOBS(cb, *heap) {
1214       if (cb->is_alive()) {
1215         cb->verify();
1216       }
1217     }
1218   }
1219 }
1220 
1221 // A CodeHeap is full. Print out warning and report event.
1222 PRAGMA_DIAG_PUSH
1223 PRAGMA_FORMAT_NONLITERAL_IGNORED
1224 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1225   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1226   CodeHeap* heap = get_code_heap(code_blob_type);
1227   assert(heap != NULL, "heap is null");
1228 
1229   heap->report_full();
1230 
1231   if ((heap->full_count() == 1) || print) {
1232     // Not yet reported for this heap, report
1233     if (SegmentedCodeCache) {
1234       ResourceMark rm;
1235       stringStream msg1_stream, msg2_stream;
1236       msg1_stream.print("%s is full. Compiler has been disabled.",
1237                         get_code_heap_name(code_blob_type));
1238       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1239                  get_code_heap_flag_name(code_blob_type));
1240       const char *msg1 = msg1_stream.as_string();
1241       const char *msg2 = msg2_stream.as_string();
1242 
1243       log_warning(codecache)("%s", msg1);
1244       log_warning(codecache)("%s", msg2);
1245       warning("%s", msg1);
1246       warning("%s", msg2);
1247     } else {
1248       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1249       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1250 
1251       log_warning(codecache)("%s", msg1);
1252       log_warning(codecache)("%s", msg2);
1253       warning("%s", msg1);
1254       warning("%s", msg2);
1255     }
1256     ResourceMark rm;
1257     stringStream s;
1258     // Dump code cache into a buffer before locking the tty.
1259     {
1260       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1261       print_summary(&s);
1262     }
1263     {
1264       ttyLocker ttyl;
1265       tty->print("%s", s.as_string());
1266     }
1267 
1268     if (heap->full_count() == 1) {
1269       if (PrintCodeHeapAnalytics) {
1270         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1271       }
1272     }
1273   }
1274 
1275   EventCodeCacheFull event;
1276   if (event.should_commit()) {
1277     event.set_codeBlobType((u1)code_blob_type);
1278     event.set_startAddress((u8)heap->low_boundary());
1279     event.set_commitedTopAddress((u8)heap->high());
1280     event.set_reservedTopAddress((u8)heap->high_boundary());
1281     event.set_entryCount(heap->blob_count());
1282     event.set_methodCount(heap->nmethod_count());
1283     event.set_adaptorCount(heap->adapter_count());
1284     event.set_unallocatedCapacity(heap->unallocated_capacity());
1285     event.set_fullCount(heap->full_count());
1286     event.commit();
1287   }
1288 }
1289 PRAGMA_DIAG_POP
1290 
1291 void CodeCache::print_memory_overhead() {
1292   size_t wasted_bytes = 0;
1293   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1294       CodeHeap* curr_heap = *heap;
1295       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1296         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1297         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1298       }
1299   }
1300   // Print bytes that are allocated in the freelist
1301   ttyLocker ttl;
1302   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1303   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1304   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1305   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1306 }
1307 
1308 //------------------------------------------------------------------------------------------------
1309 // Non-product version
1310 
1311 #ifndef PRODUCT
1312 
1313 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1314   if (PrintCodeCache2) {  // Need to add a new flag
1315     ResourceMark rm;
1316     if (size == 0)  size = cb->size();
1317     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1318   }
1319 }
1320 
1321 void CodeCache::print_internals() {
1322   int nmethodCount = 0;
1323   int runtimeStubCount = 0;
1324   int adapterCount = 0;
1325   int deoptimizationStubCount = 0;
1326   int uncommonTrapStubCount = 0;
1327   int bufferBlobCount = 0;
1328   int total = 0;
1329   int nmethodAlive = 0;
1330   int nmethodNotEntrant = 0;
1331   int nmethodZombie = 0;
1332   int nmethodUnloaded = 0;
1333   int nmethodJava = 0;
1334   int nmethodNative = 0;
1335   int max_nm_size = 0;
1336   ResourceMark rm;
1337 
1338   int i = 0;
1339   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1340     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1341       tty->print_cr("-- %s --", (*heap)->name());
1342     }
1343     FOR_ALL_BLOBS(cb, *heap) {
1344       total++;
1345       if (cb->is_nmethod()) {
1346         nmethod* nm = (nmethod*)cb;
1347 
1348         if (Verbose && nm->method() != NULL) {
1349           ResourceMark rm;
1350           char *method_name = nm->method()->name_and_sig_as_C_string();
1351           tty->print("%s", method_name);
1352           if(nm->is_alive()) { tty->print_cr(" alive"); }
1353           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1354           if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1355         }
1356 
1357         nmethodCount++;
1358 
1359         if(nm->is_alive()) { nmethodAlive++; }
1360         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1361         if(nm->is_zombie()) { nmethodZombie++; }
1362         if(nm->is_unloaded()) { nmethodUnloaded++; }
1363         if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1364 
1365         if(nm->method() != NULL && nm->is_java_method()) {
1366           nmethodJava++;
1367           max_nm_size = MAX2(max_nm_size, nm->size());
1368         }
1369       } else if (cb->is_runtime_stub()) {
1370         runtimeStubCount++;
1371       } else if (cb->is_deoptimization_stub()) {
1372         deoptimizationStubCount++;
1373       } else if (cb->is_uncommon_trap_stub()) {
1374         uncommonTrapStubCount++;
1375       } else if (cb->is_adapter_blob()) {
1376         adapterCount++;
1377       } else if (cb->is_buffer_blob()) {
1378         bufferBlobCount++;
1379       }
1380     }
1381   }
1382 
1383   int bucketSize = 512;
1384   int bucketLimit = max_nm_size / bucketSize + 1;
1385   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1386   memset(buckets, 0, sizeof(int) * bucketLimit);
1387 
1388   NMethodIterator iter(NMethodIterator::all_blobs);
1389   while(iter.next()) {
1390     nmethod* nm = iter.method();
1391     if(nm->method() != NULL && nm->is_java_method()) {
1392       buckets[nm->size() / bucketSize]++;
1393     }
1394   }
1395 
1396   tty->print_cr("Code Cache Entries (total of %d)",total);
1397   tty->print_cr("-------------------------------------------------");
1398   tty->print_cr("nmethods: %d",nmethodCount);
1399   tty->print_cr("\talive: %d",nmethodAlive);
1400   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1401   tty->print_cr("\tzombie: %d",nmethodZombie);
1402   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1403   tty->print_cr("\tjava: %d",nmethodJava);
1404   tty->print_cr("\tnative: %d",nmethodNative);
1405   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1406   tty->print_cr("adapters: %d",adapterCount);
1407   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1408   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1409   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1410   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1411   tty->print_cr("-------------------------------------------------");
1412 
1413   for(int i=0; i<bucketLimit; i++) {
1414     if(buckets[i] != 0) {
1415       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1416       tty->fill_to(40);
1417       tty->print_cr("%d",buckets[i]);
1418     }
1419   }
1420 
1421   FREE_C_HEAP_ARRAY(int, buckets);
1422   print_memory_overhead();
1423 }
1424 
1425 #endif // !PRODUCT
1426 
1427 void CodeCache::print() {
1428   print_summary(tty);
1429 
1430 #ifndef PRODUCT
1431   if (!Verbose) return;
1432 
1433   CodeBlob_sizes live;
1434   CodeBlob_sizes dead;
1435 
1436   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1437     FOR_ALL_BLOBS(cb, *heap) {
1438       if (!cb->is_alive()) {
1439         dead.add(cb);
1440       } else {
1441         live.add(cb);
1442       }
1443     }
1444   }
1445 
1446   tty->print_cr("CodeCache:");
1447   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1448 
1449   if (!live.is_empty()) {
1450     live.print("live");
1451   }
1452   if (!dead.is_empty()) {
1453     dead.print("dead");
1454   }
1455 
1456   if (WizardMode) {
1457      // print the oop_map usage
1458     int code_size = 0;
1459     int number_of_blobs = 0;
1460     int number_of_oop_maps = 0;
1461     int map_size = 0;
1462     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1463       FOR_ALL_BLOBS(cb, *heap) {
1464         if (cb->is_alive()) {
1465           number_of_blobs++;
1466           code_size += cb->code_size();
1467           ImmutableOopMapSet* set = cb->oop_maps();
1468           if (set != NULL) {
1469             number_of_oop_maps += set->count();
1470             map_size           += set->nr_of_bytes();
1471           }
1472         }
1473       }
1474     }
1475     tty->print_cr("OopMaps");
1476     tty->print_cr("  #blobs    = %d", number_of_blobs);
1477     tty->print_cr("  code size = %d", code_size);
1478     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1479     tty->print_cr("  map size  = %d", map_size);
1480   }
1481 
1482 #endif // !PRODUCT
1483 }
1484 
1485 void CodeCache::print_summary(outputStream* st, bool detailed) {
1486   int full_count = 0;
1487   FOR_ALL_HEAPS(heap_iterator) {
1488     CodeHeap* heap = (*heap_iterator);
1489     size_t total = (heap->high_boundary() - heap->low_boundary());
1490     if (_heaps->length() >= 1) {
1491       st->print("%s:", heap->name());
1492     } else {
1493       st->print("CodeCache:");
1494     }
1495     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1496                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1497                  total/K, (total - heap->unallocated_capacity())/K,
1498                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1499 
1500     if (detailed) {
1501       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1502                    p2i(heap->low_boundary()),
1503                    p2i(heap->high()),
1504                    p2i(heap->high_boundary()));
1505 
1506       full_count += get_codemem_full_count(heap->code_blob_type());
1507     }
1508   }
1509 
1510   if (detailed) {
1511     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1512                        " adapters=" UINT32_FORMAT,
1513                        blob_count(), nmethod_count(), adapter_count());
1514     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1515                  "enabled" : Arguments::mode() == Arguments::_int ?
1516                  "disabled (interpreter mode)" :
1517                  "disabled (not enough contiguous free space left)");
1518     st->print_cr("              stopped_count=%d, restarted_count=%d",
1519                  CompileBroker::get_total_compiler_stopped_count(),
1520                  CompileBroker::get_total_compiler_restarted_count());
1521     st->print_cr(" full_count=%d", full_count);
1522   }
1523 }
1524 
1525 void CodeCache::print_codelist(outputStream* st) {
1526   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1527 
1528   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1529   while (iter.next()) {
1530     CompiledMethod* cm = iter.method();
1531     ResourceMark rm;
1532     char* method_name = cm->method()->name_and_sig_as_C_string();
1533     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1534                  cm->compile_id(), cm->comp_level(), cm->get_state(),
1535                  method_name,
1536                  (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1537   }
1538 }
1539 
1540 void CodeCache::print_layout(outputStream* st) {
1541   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1542   ResourceMark rm;
1543   print_summary(st, true);
1544 }
1545 
1546 void CodeCache::log_state(outputStream* st) {
1547   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1548             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1549             blob_count(), nmethod_count(), adapter_count(),
1550             unallocated_capacity());
1551 }
1552 
1553 #ifdef LINUX
1554 void CodeCache::write_perf_map() {
1555   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1556 
1557   // Perf expects to find the map file at /tmp/perf-<pid>.map.
1558   char fname[32];
1559   jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1560 
1561   fileStream fs(fname, "w");
1562   if (!fs.is_open()) {
1563     log_warning(codecache)("Failed to create %s for perf map", fname);
1564     return;
1565   }
1566 
1567   AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading);
1568   while (iter.next()) {
1569     CodeBlob *cb = iter.method();
1570     ResourceMark rm;
1571     const char* method_name =
1572       cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
1573                         : cb->name();
1574     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1575                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1576                 method_name);
1577   }
1578 }
1579 #endif // LINUX
1580 
1581 //---<  BEGIN  >--- CodeHeap State Analytics.
1582 
1583 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1584   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1585     CodeHeapState::aggregate(out, (*heap), granularity);
1586   }
1587 }
1588 
1589 void CodeCache::discard(outputStream *out) {
1590   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1591     CodeHeapState::discard(out, (*heap));
1592   }
1593 }
1594 
1595 void CodeCache::print_usedSpace(outputStream *out) {
1596   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1597     CodeHeapState::print_usedSpace(out, (*heap));
1598   }
1599 }
1600 
1601 void CodeCache::print_freeSpace(outputStream *out) {
1602   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1603     CodeHeapState::print_freeSpace(out, (*heap));
1604   }
1605 }
1606 
1607 void CodeCache::print_count(outputStream *out) {
1608   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1609     CodeHeapState::print_count(out, (*heap));
1610   }
1611 }
1612 
1613 void CodeCache::print_space(outputStream *out) {
1614   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1615     CodeHeapState::print_space(out, (*heap));
1616   }
1617 }
1618 
1619 void CodeCache::print_age(outputStream *out) {
1620   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1621     CodeHeapState::print_age(out, (*heap));
1622   }
1623 }
1624 
1625 void CodeCache::print_names(outputStream *out) {
1626   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1627     CodeHeapState::print_names(out, (*heap));
1628   }
1629 }
1630 //---<  END  >--- CodeHeap State Analytics.