< prev index next >

src/hotspot/share/code/codeCache.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 

  25 #include "code/codeBlob.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/codeHeapState.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/dependencyContext.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"

  33 #include "compiler/compilationPolicy.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSetNMethod.hpp"
  38 #include "gc/shared/classUnloadingContext.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "jvm_io.h"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/iterator.hpp"
  46 #include "memory/memoryReserver.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/method.inline.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"

 153       scopes_data_size += nm->scopes_data_size();
 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 164 
 165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 167 
 168 address CodeCache::_low_bound = nullptr;
 169 address CodeCache::_high_bound = nullptr;
 170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 172 


 173 // Initialize arrays of CodeHeap subsets
 174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 
 178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 179   if (size < required_size) {
 180     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 181                          codeheap, size/K, required_size/K);
 182     err_msg title("Not enough space in %s to run VM", codeheap);
 183     err_msg message("%zuK < %zuK", size/K, required_size/K);
 184     vm_exit_during_initialization(title, message);
 185   }
 186 }
 187 
 188 struct CodeHeapInfo {
 189   size_t size;
 190   bool set;
 191   bool enabled;
 192 };
 193 
 194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 195   assert(!heap->set, "sanity");
 196   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200 
 201   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 202   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 203   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 204 
 205   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 206   const size_t ps             = page_size(false, 8);
 207   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 208   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 209   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 210 
 211   // Prerequisites
 212   if (!heap_available(CodeBlobType::MethodProfiled)) {
 213     // For compatibility reasons, disabled tiered compilation overrides
 214     // segment size even if it is set explicitly.
 215     non_profiled.size += profiled.size;
 216     // Profiled code heap is not available, forcibly set size to 0
 217     profiled.size = 0;
 218     profiled.set = true;
 219     profiled.enabled = false;
 220   }
 221 
 222   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 223 
 224   size_t compiler_buffer_size = 0;
 225   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 226   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());

 227 
 228   if (!non_nmethod.set) {
 229     non_nmethod.size += compiler_buffer_size;
 230     // Further down, just before FLAG_SET_ERGO(), all segment sizes are
 231     // aligned down to the next lower multiple of min_size. For large page
 232     // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
 233     // Therefore, force non_nmethod.size to at least min_size.
 234     non_nmethod.size = MAX2(non_nmethod.size, min_size);
 235   }
 236 
 237   if (!profiled.set && !non_profiled.set) {
 238     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 239                                         (cache_size - non_nmethod.size) / 2 : min_size;
 240   }
 241 
 242   if (profiled.set && !non_profiled.set) {
 243     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 244   }
 245 
 246   if (!profiled.set && non_profiled.set) {

 301     if (ps < lg_ps) {
 302       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 303                              "Reverting to smaller page size (" PROPERFMT ").",
 304                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 305     }
 306   }
 307 
 308   // Note: if large page support is enabled, min_size is at least the large
 309   // page size. This ensures that the code cache is covered by large pages.
 310   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 311   non_profiled.size += profiled.size & alignment_mask(min_size);
 312   non_nmethod.size = align_down(non_nmethod.size, min_size);
 313   profiled.size = align_down(profiled.size, min_size);
 314   non_profiled.size = align_down(non_profiled.size, min_size);
 315 
 316   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 317   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 318   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 319   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 320 



 321   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 322 
 323   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 324   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 325 
 326   size_t offset = 0;






 327   if (profiled.enabled) {
 328     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 329     offset += profiled.size;
 330     // Tier 2 and tier 3 (profiled) methods
 331     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 332   }
 333 
 334   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 335   offset += non_nmethod.size;
 336   // Non-nmethods (stubs, adapters, ...)
 337   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 338 
 339   if (non_profiled.enabled) {
 340     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 341     // Tier 1 and tier 4 (non-profiled) methods and native methods
 342     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 343   }
 344 }
 345 








 346 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 347   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 348                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 349 }
 350 
 351 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 352   // Align and reserve space for code cache
 353   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 354   const size_t rs_size = align_up(size, rs_align);
 355 
 356   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 357   if (!rs.is_reserved()) {
 358     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 359                                           rs_size/K));
 360   }
 361 
 362   // Initialize bounds
 363   _low_bound = (address)rs.base();
 364   _high_bound = _low_bound + rs.size();
 365   return rs;

1191                             AnyObj::RESOURCE_AREA, mtInternal,
1192                             &DependencySignature::hash,
1193                             &DependencySignature::equals> DepTable;
1194 
1195   DepTable* table = new DepTable();
1196 
1197   // Iterate over live nmethods and check dependencies of all nmethods that are not
1198   // marked for deoptimization. A particular dependency is only checked once.
1199   NMethodIterator iter(NMethodIterator::not_unloading);
1200   while(iter.next()) {
1201     nmethod* nm = iter.method();
1202     // Only notify for live nmethods
1203     if (!nm->is_marked_for_deoptimization()) {
1204       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1205         // Construct abstraction of a dependency.
1206         DependencySignature* current_sig = new DependencySignature(deps);
1207 
1208         // Determine if dependency is already checked. table->put(...) returns
1209         // 'true' if the dependency is added (i.e., was not in the hashtable).
1210         if (table->put(*current_sig, 1)) {
1211           if (deps.check_dependency() != nullptr) {

1212             // Dependency checking failed. Print out information about the failed
1213             // dependency and finally fail with an assert. We can fail here, since
1214             // dependency checking is never done in a product build.
1215             tty->print_cr("Failed dependency:");
1216             changes.print();
1217             nm->print();
1218             nm->print_dependencies_on(tty);
1219             assert(false, "Should have been marked for deoptimization");
1220           }
1221         }
1222       }
1223     }
1224   }
1225 }
1226 #endif
1227 
1228 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1229   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1230 
1231   // search the hierarchy looking for nmethods which are affected by the loading of this class
1232 
1233   // then search the interfaces this class implements looking for nmethods
1234   // which might be dependent of the fact that an interface only had one
1235   // implementor.
1236   // nmethod::check_all_dependencies works only correctly, if no safepoint
1237   // can happen
1238   NoSafepointVerifier nsv;
1239   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1240     InstanceKlass* d = str.klass();







1241     d->mark_dependent_nmethods(deopt_scope, changes);
1242   }
1243 
1244 #ifndef PRODUCT
1245   if (VerifyDependencies) {
1246     // Object pointers are used as unique identifiers for dependency arguments. This
1247     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1248     dependentCheckTime.start();
1249     check_live_nmethods_dependencies(changes);
1250     dependentCheckTime.stop();
1251   }
1252 #endif
1253 }
1254 
1255 #if INCLUDE_JVMTI
1256 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1257 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1258 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1259 
1260 static void add_to_old_table(nmethod* c) {

1479 }
1480 PRAGMA_DIAG_POP
1481 
1482 void CodeCache::print_memory_overhead() {
1483   size_t wasted_bytes = 0;
1484   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1485       CodeHeap* curr_heap = *heap;
1486       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1487         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1488         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1489       }
1490   }
1491   // Print bytes that are allocated in the freelist
1492   ttyLocker ttl;
1493   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1494   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1495   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1496   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1497 }
1498 















































































1499 //------------------------------------------------------------------------------------------------
1500 // Non-product version
1501 
1502 #ifndef PRODUCT
1503 
1504 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1505   if (PrintCodeCache2) {  // Need to add a new flag
1506     ResourceMark rm;
1507     if (size == 0) {
1508       int s = cb->size();
1509       assert(s >= 0, "CodeBlob size is negative: %d", s);
1510       size = (uint) s;
1511     }
1512     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1513   }
1514 }
1515 
1516 void CodeCache::print_internals() {
1517   int nmethodCount = 0;
1518   int runtimeStubCount = 0;
1519   int upcallStubCount = 0;
1520   int adapterCount = 0;
1521   int mhAdapterCount = 0;
1522   int vtableBlobCount = 0;
1523   int deoptimizationStubCount = 0;
1524   int uncommonTrapStubCount = 0;
1525   int exceptionStubCount = 0;
1526   int safepointStubCount = 0;
1527   int bufferBlobCount = 0;
1528   int total = 0;
1529   int nmethodNotEntrant = 0;
1530   int nmethodJava = 0;
1531   int nmethodNative = 0;
1532   int max_nm_size = 0;
1533   ResourceMark rm;
1534 
1535   int i = 0;
1536   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1537     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1538       tty->print_cr("-- %s --", (*heap)->name());
1539     }
1540     FOR_ALL_BLOBS(cb, *heap) {
1541       total++;

1542       if (cb->is_nmethod()) {
1543         nmethod* nm = (nmethod*)cb;
1544 
1545         if (Verbose && nm->method() != nullptr) {
1546           ResourceMark rm;
1547           char *method_name = nm->method()->name_and_sig_as_C_string();
1548           tty->print("%s", method_name);
1549           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1550         }
1551 
1552         nmethodCount++;
1553 
1554         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1555         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1556 
1557         if(nm->method() != nullptr && nm->is_java_method()) {
1558           nmethodJava++;
1559           max_nm_size = MAX2(max_nm_size, nm->size());
1560         }
1561       } else if (cb->is_runtime_stub()) {
1562         runtimeStubCount++;
1563       } else if (cb->is_upcall_stub()) {
1564         upcallStubCount++;
1565       } else if (cb->is_deoptimization_stub()) {
1566         deoptimizationStubCount++;
1567       } else if (cb->is_uncommon_trap_stub()) {
1568         uncommonTrapStubCount++;
1569       } else if (cb->is_exception_stub()) {
1570         exceptionStubCount++;

1727       FOR_ALL_BLOBS(cb, *heap) {
1728         number_of_blobs++;
1729         code_size += cb->code_size();
1730         ImmutableOopMapSet* set = cb->oop_maps();
1731         if (set != nullptr) {
1732           number_of_oop_maps += set->count();
1733           map_size           += set->nr_of_bytes();
1734         }
1735       }
1736     }
1737     tty->print_cr("OopMaps");
1738     tty->print_cr("  #blobs    = %d", number_of_blobs);
1739     tty->print_cr("  code size = %d", code_size);
1740     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1741     tty->print_cr("  map size  = %d", map_size);
1742   }
1743 
1744 #endif // !PRODUCT
1745 }
1746 



















1747 void CodeCache::print_summary(outputStream* st, bool detailed) {
1748   int full_count = 0;
1749   julong total_used = 0;
1750   julong total_max_used = 0;
1751   julong total_free = 0;
1752   julong total_size = 0;
1753   FOR_ALL_HEAPS(heap_iterator) {
1754     CodeHeap* heap = (*heap_iterator);
1755     size_t total = (heap->high_boundary() - heap->low_boundary());
1756     if (_heaps->length() >= 1) {
1757       st->print("%s:", heap->name());
1758     } else {
1759       st->print("CodeCache:");
1760     }
1761     size_t size = total/K;
1762     size_t used = (total - heap->unallocated_capacity())/K;
1763     size_t max_used = heap->max_allocated_capacity()/K;
1764     size_t free = heap->unallocated_capacity()/K;
1765     total_size += size;
1766     total_used += used;

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/cdsAccess.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "code/SCCache.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "compiler/oopMap.hpp"
  39 #include "gc/shared/barrierSetNMethod.hpp"
  40 #include "gc/shared/classUnloadingContext.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "jfr/jfrEvents.hpp"
  43 #include "jvm_io.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/iterator.hpp"
  48 #include "memory/memoryReserver.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/objArrayOop.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "oops/verifyOopClosure.hpp"

 155       scopes_data_size += nm->scopes_data_size();
 156       scopes_pcs_size  += nm->scopes_pcs_size();
 157     } else {
 158       code_size        += cb->code_size();
 159     }
 160   }
 161 };
 162 
 163 // Iterate over all CodeHeaps
 164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 165 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 166 
 167 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 168 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 169 
 170 address CodeCache::_low_bound = nullptr;
 171 address CodeCache::_high_bound = nullptr;
 172 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 173 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 174 
 175 static ReservedSpace _cds_code_space;
 176 
 177 // Initialize arrays of CodeHeap subsets
 178 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 181 
 182 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 183   if (size < required_size) {
 184     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 185                          codeheap, size/K, required_size/K);
 186     err_msg title("Not enough space in %s to run VM", codeheap);
 187     err_msg message("%zuK < %zuK", size/K, required_size/K);
 188     vm_exit_during_initialization(title, message);
 189   }
 190 }
 191 
 192 struct CodeHeapInfo {
 193   size_t size;
 194   bool set;
 195   bool enabled;
 196 };
 197 
 198 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 199   assert(!heap->set, "sanity");
 200   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 201 }
 202 
 203 void CodeCache::initialize_heaps() {

 204   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 205   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 206   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 207 
 208   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 209   const size_t ps             = page_size(false, 8);
 210   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 211   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 212   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 213 
 214   // Prerequisites
 215   if (!heap_available(CodeBlobType::MethodProfiled)) {
 216     // For compatibility reasons, disabled tiered compilation overrides
 217     // segment size even if it is set explicitly.
 218     non_profiled.size += profiled.size;
 219     // Profiled code heap is not available, forcibly set size to 0
 220     profiled.size = 0;
 221     profiled.set = true;
 222     profiled.enabled = false;
 223   }
 224 
 225   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 226 
 227   size_t compiler_buffer_size = 0;
 228   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 229   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 230   COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size());
 231 
 232   if (!non_nmethod.set) {
 233     non_nmethod.size += compiler_buffer_size;
 234     // Further down, just before FLAG_SET_ERGO(), all segment sizes are
 235     // aligned down to the next lower multiple of min_size. For large page
 236     // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
 237     // Therefore, force non_nmethod.size to at least min_size.
 238     non_nmethod.size = MAX2(non_nmethod.size, min_size);
 239   }
 240 
 241   if (!profiled.set && !non_profiled.set) {
 242     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 243                                         (cache_size - non_nmethod.size) / 2 : min_size;
 244   }
 245 
 246   if (profiled.set && !non_profiled.set) {
 247     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 248   }
 249 
 250   if (!profiled.set && non_profiled.set) {

 305     if (ps < lg_ps) {
 306       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 307                              "Reverting to smaller page size (" PROPERFMT ").",
 308                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 309     }
 310   }
 311 
 312   // Note: if large page support is enabled, min_size is at least the large
 313   // page size. This ensures that the code cache is covered by large pages.
 314   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 315   non_profiled.size += profiled.size & alignment_mask(min_size);
 316   non_nmethod.size = align_down(non_nmethod.size, min_size);
 317   profiled.size = align_down(profiled.size, min_size);
 318   non_profiled.size = align_down(non_profiled.size, min_size);
 319 
 320   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 321   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 322   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 323   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 324 
 325   const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
 326   cache_size += cds_code_size;
 327 
 328   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 329 
 330   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 331   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 332 
 333   size_t offset = 0;
 334   if (cds_code_size > 0) {
 335     // FIXME: use CodeHeapInfo for this hack ...
 336     _cds_code_space = rs.partition(offset, cds_code_size);
 337     offset += cds_code_size;
 338   }
 339 
 340   if (profiled.enabled) {
 341     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 342     offset += profiled.size;
 343     // Tier 2 and tier 3 (profiled) methods
 344     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 345   }
 346 
 347   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 348   offset += non_nmethod.size;
 349   // Non-nmethods (stubs, adapters, ...)
 350   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 351 
 352   if (non_profiled.enabled) {
 353     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 354     // Tier 1 and tier 4 (non-profiled) methods and native methods
 355     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 356   }
 357 }
 358 
 359 void* CodeCache::map_cached_code() {
 360   if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
 361     return _cds_code_space.base();
 362   } else {
 363     return nullptr;
 364   }
 365 }
 366 
 367 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 368   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 369                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 370 }
 371 
 372 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 373   // Align and reserve space for code cache
 374   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 375   const size_t rs_size = align_up(size, rs_align);
 376 
 377   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 378   if (!rs.is_reserved()) {
 379     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 380                                           rs_size/K));
 381   }
 382 
 383   // Initialize bounds
 384   _low_bound = (address)rs.base();
 385   _high_bound = _low_bound + rs.size();
 386   return rs;

1212                             AnyObj::RESOURCE_AREA, mtInternal,
1213                             &DependencySignature::hash,
1214                             &DependencySignature::equals> DepTable;
1215 
1216   DepTable* table = new DepTable();
1217 
1218   // Iterate over live nmethods and check dependencies of all nmethods that are not
1219   // marked for deoptimization. A particular dependency is only checked once.
1220   NMethodIterator iter(NMethodIterator::not_unloading);
1221   while(iter.next()) {
1222     nmethod* nm = iter.method();
1223     // Only notify for live nmethods
1224     if (!nm->is_marked_for_deoptimization()) {
1225       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1226         // Construct abstraction of a dependency.
1227         DependencySignature* current_sig = new DependencySignature(deps);
1228 
1229         // Determine if dependency is already checked. table->put(...) returns
1230         // 'true' if the dependency is added (i.e., was not in the hashtable).
1231         if (table->put(*current_sig, 1)) {
1232           Klass* witness = deps.check_dependency();
1233           if (witness != nullptr) {
1234             // Dependency checking failed. Print out information about the failed
1235             // dependency and finally fail with an assert. We can fail here, since
1236             // dependency checking is never done in a product build.
1237             deps.print_dependency(tty, witness, true);
1238             changes.print();
1239             nm->print();
1240             nm->print_dependencies_on(tty);
1241             assert(false, "Should have been marked for deoptimization");
1242           }
1243         }
1244       }
1245     }
1246   }
1247 }
1248 #endif
1249 
1250 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1251   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1252 
1253   // search the hierarchy looking for nmethods which are affected by the loading of this class
1254 
1255   // then search the interfaces this class implements looking for nmethods
1256   // which might be dependent of the fact that an interface only had one
1257   // implementor.
1258   // nmethod::check_all_dependencies works only correctly, if no safepoint
1259   // can happen
1260   NoSafepointVerifier nsv;
1261   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1262     InstanceKlass* d = str.klass();
1263     {
1264       LogStreamHandle(Trace, dependencies) log;
1265       if (log.is_enabled()) {
1266         log.print("Processing context ");
1267         d->name()->print_value_on(&log);
1268       }
1269     }
1270     d->mark_dependent_nmethods(deopt_scope, changes);
1271   }
1272 
1273 #ifndef PRODUCT
1274   if (VerifyDependencies) {
1275     // Object pointers are used as unique identifiers for dependency arguments. This
1276     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1277     dependentCheckTime.start();
1278     check_live_nmethods_dependencies(changes);
1279     dependentCheckTime.stop();
1280   }
1281 #endif
1282 }
1283 
1284 #if INCLUDE_JVMTI
1285 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1286 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1287 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1288 
1289 static void add_to_old_table(nmethod* c) {

1508 }
1509 PRAGMA_DIAG_POP
1510 
1511 void CodeCache::print_memory_overhead() {
1512   size_t wasted_bytes = 0;
1513   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1514       CodeHeap* curr_heap = *heap;
1515       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1516         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1517         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1518       }
1519   }
1520   // Print bytes that are allocated in the freelist
1521   ttyLocker ttl;
1522   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1523   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1524   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1525   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1526 }
1527 
1528 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1529   if (total > 0) {
1530     double ratio = (100.0 * used) / total;
1531     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1532   }
1533 }
1534 
1535 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1536   int stats     [2][6][3][2] = {0};
1537   int stats_used[2][6][3][2] = {0};
1538 
1539   int total_osr = 0;
1540   int total_entrant = 0;
1541   int total_non_entrant = 0;
1542   int total_other = 0;
1543   int total_used = 0;
1544 
1545   NMethodIterator iter(NMethodIterator::all);
1546   while (iter.next()) {
1547     nmethod* nm = iter.method();
1548     if (nm->is_in_use()) {
1549       ++total_entrant;
1550     } else if (nm->is_not_entrant()) {
1551       ++total_non_entrant;
1552     } else {
1553       ++total_other;
1554     }
1555     if (nm->is_osr_method()) {
1556       ++total_osr;
1557     }
1558     if (nm->used()) {
1559       ++total_used;
1560     }
1561     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1562 
1563     int idx1 = nm->is_scc() ? 1 : 0;
1564     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1565     int idx3 = (nm->is_in_use()      ? 0 :
1566                (nm->is_not_entrant() ? 1 :
1567                                        2));
1568     int idx4 = (nm->is_osr_method() ? 1 : 0);
1569     stats[idx1][idx2][idx3][idx4] += 1;
1570     if (nm->used()) {
1571       stats_used[idx1][idx2][idx3][idx4] += 1;
1572     }
1573   }
1574 
1575   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1576                total_entrant + total_non_entrant + total_other,
1577                total_entrant, total_non_entrant, total_osr);
1578   if (total_other > 0) {
1579     st->print("; %d other", total_other);
1580   }
1581   st->print_cr(")");
1582 
1583   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1584     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1585     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1586     if (total_normal + total_osr > 0) {
1587       st->print("  Tier%d:", i);
1588       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1589       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1590       st->cr();
1591     }
1592   }
1593   st->cr();
1594   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1595     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1596     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1597     assert(total_osr == 0, "sanity");
1598     if (total_normal + total_osr > 0) {
1599       st->print("  SC T%d:", i);
1600       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1601       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1602       st->cr();
1603     }
1604   }
1605 }
1606 
1607 //------------------------------------------------------------------------------------------------
1608 // Non-product version
1609 
1610 #ifndef PRODUCT
1611 
1612 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1613   if (PrintCodeCache2) {  // Need to add a new flag
1614     ResourceMark rm;
1615     if (size == 0) {
1616       int s = cb->size();
1617       assert(s >= 0, "CodeBlob size is negative: %d", s);
1618       size = (uint) s;
1619     }
1620     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1621   }
1622 }
1623 
1624 void CodeCache::print_internals() {
1625   int nmethodCount = 0;
1626   int runtimeStubCount = 0;
1627   int upcallStubCount = 0;
1628   int adapterCount = 0;
1629   int mhAdapterCount = 0;
1630   int vtableBlobCount = 0;
1631   int deoptimizationStubCount = 0;
1632   int uncommonTrapStubCount = 0;
1633   int exceptionStubCount = 0;
1634   int safepointStubCount = 0;
1635   int bufferBlobCount = 0;
1636   int total = 0;
1637   int nmethodNotEntrant = 0;
1638   int nmethodJava = 0;
1639   int nmethodNative = 0;
1640   int max_nm_size = 0;
1641   ResourceMark rm;
1642 
1643   int i = 0;
1644   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1645     int heap_total = 0;
1646     tty->print_cr("-- %s --", (*heap)->name());

1647     FOR_ALL_BLOBS(cb, *heap) {
1648       total++;
1649       heap_total++;
1650       if (cb->is_nmethod()) {
1651         nmethod* nm = (nmethod*)cb;
1652 
1653         tty->print("%4d: ", heap_total);
1654         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);




1655 
1656         nmethodCount++;
1657 
1658         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1659         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1660 
1661         if(nm->method() != nullptr && nm->is_java_method()) {
1662           nmethodJava++;
1663           max_nm_size = MAX2(max_nm_size, nm->size());
1664         }
1665       } else if (cb->is_runtime_stub()) {
1666         runtimeStubCount++;
1667       } else if (cb->is_upcall_stub()) {
1668         upcallStubCount++;
1669       } else if (cb->is_deoptimization_stub()) {
1670         deoptimizationStubCount++;
1671       } else if (cb->is_uncommon_trap_stub()) {
1672         uncommonTrapStubCount++;
1673       } else if (cb->is_exception_stub()) {
1674         exceptionStubCount++;

1831       FOR_ALL_BLOBS(cb, *heap) {
1832         number_of_blobs++;
1833         code_size += cb->code_size();
1834         ImmutableOopMapSet* set = cb->oop_maps();
1835         if (set != nullptr) {
1836           number_of_oop_maps += set->count();
1837           map_size           += set->nr_of_bytes();
1838         }
1839       }
1840     }
1841     tty->print_cr("OopMaps");
1842     tty->print_cr("  #blobs    = %d", number_of_blobs);
1843     tty->print_cr("  code size = %d", code_size);
1844     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1845     tty->print_cr("  map size  = %d", map_size);
1846   }
1847 
1848 #endif // !PRODUCT
1849 }
1850 
1851 void CodeCache::print_nmethods_on(outputStream* st) {
1852   ResourceMark rm;
1853   int i = 0;
1854   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1855     st->print_cr("-- %s --", (*heap)->name());
1856     FOR_ALL_BLOBS(cb, *heap) {
1857       i++;
1858       if (cb->is_nmethod()) {
1859         nmethod* nm = (nmethod*)cb;
1860         st->print("%4d: ", i);
1861         CompileTask::print(st, nm, nullptr, true, false);
1862 
1863         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1864         st->print_cr(" %c", non_entrant_char);
1865       }
1866     }
1867   }
1868 }
1869 
1870 void CodeCache::print_summary(outputStream* st, bool detailed) {
1871   int full_count = 0;
1872   julong total_used = 0;
1873   julong total_max_used = 0;
1874   julong total_free = 0;
1875   julong total_size = 0;
1876   FOR_ALL_HEAPS(heap_iterator) {
1877     CodeHeap* heap = (*heap_iterator);
1878     size_t total = (heap->high_boundary() - heap->low_boundary());
1879     if (_heaps->length() >= 1) {
1880       st->print("%s:", heap->name());
1881     } else {
1882       st->print("CodeCache:");
1883     }
1884     size_t size = total/K;
1885     size_t used = (total - heap->unallocated_capacity())/K;
1886     size_t max_used = heap->max_allocated_capacity()/K;
1887     size_t free = heap->unallocated_capacity()/K;
1888     total_size += size;
1889     total_used += used;
< prev index next >