< prev index next >

src/hotspot/share/code/codeCache.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 

  25 #include "code/codeBlob.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/codeHeapState.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/dependencyContext.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compilationPolicy.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSetNMethod.hpp"
  38 #include "gc/shared/classUnloadingContext.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "jvm_io.h"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"

 153       scopes_data_size += nm->scopes_data_size();
 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 164 
 165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 167 
 168 address CodeCache::_low_bound = nullptr;
 169 address CodeCache::_high_bound = nullptr;
 170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 172 


 173 // Initialize arrays of CodeHeap subsets
 174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 
 178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 179   if (size < required_size) {
 180     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 181                          codeheap, size/K, required_size/K);
 182     err_msg title("Not enough space in %s to run VM", codeheap);
 183     err_msg message("%zuK < %zuK", size/K, required_size/K);
 184     vm_exit_during_initialization(title, message);
 185   }
 186 }
 187 
 188 struct CodeHeapInfo {
 189   size_t size;
 190   bool set;
 191   bool enabled;
 192 };
 193 
 194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 195   assert(!heap->set, "sanity");
 196   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200 
 201   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 202   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 203   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 204 
 205   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 206   const size_t ps             = page_size(false, 8);
 207   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 208   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 209   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 210 
 211   // Prerequisites
 212   if (!heap_available(CodeBlobType::MethodProfiled)) {
 213     // For compatibility reasons, disabled tiered compilation overrides
 214     // segment size even if it is set explicitly.
 215     non_profiled.size += profiled.size;
 216     // Profiled code heap is not available, forcibly set size to 0
 217     profiled.size = 0;
 218     profiled.set = true;
 219     profiled.enabled = false;
 220   }

 301     if (ps < lg_ps) {
 302       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 303                              "Reverting to smaller page size (" PROPERFMT ").",
 304                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 305     }
 306   }
 307 
 308   // Note: if large page support is enabled, min_size is at least the large
 309   // page size. This ensures that the code cache is covered by large pages.
 310   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 311   non_profiled.size += profiled.size & alignment_mask(min_size);
 312   non_nmethod.size = align_down(non_nmethod.size, min_size);
 313   profiled.size = align_down(profiled.size, min_size);
 314   non_profiled.size = align_down(non_profiled.size, min_size);
 315 
 316   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 317   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 318   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 319   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 320 






 321   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 322 
 323   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 324   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 325 
 326   size_t offset = 0;






 327   if (profiled.enabled) {
 328     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 329     offset += profiled.size;
 330     // Tier 2 and tier 3 (profiled) methods
 331     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 332   }
 333 
 334   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 335   offset += non_nmethod.size;
 336   // Non-nmethods (stubs, adapters, ...)
 337   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 338 
 339   if (non_profiled.enabled) {
 340     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 341     // Tier 1 and tier 4 (non-profiled) methods and native methods
 342     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 343   }
 344 }
 345 








 346 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 347   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 348                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 349 }
 350 
 351 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 352   // Align and reserve space for code cache
 353   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 354   const size_t rs_size = align_up(size, rs_align);
 355 
 356   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 357   if (!rs.is_reserved()) {
 358     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 359                                           rs_size/K));
 360   }
 361 
 362   // Initialize bounds
 363   _low_bound = (address)rs.base();
 364   _high_bound = _low_bound + rs.size();
 365   return rs;

1043 size_t CodeCache::max_capacity() {
1044   size_t max_cap = 0;
1045   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1046     max_cap += (*heap)->max_capacity();
1047   }
1048   return max_cap;
1049 }
1050 
1051 bool CodeCache::is_non_nmethod(address addr) {
1052   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1053   return blob->contains(addr);
1054 }
1055 
1056 size_t CodeCache::max_distance_to_non_nmethod() {
1057   if (!SegmentedCodeCache) {
1058     return ReservedCodeCacheSize;
1059   } else {
1060     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1061     // the max distance is minimized by placing the NonNMethod segment
1062     // in between MethodProfiled and MethodNonProfiled segments
1063     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1064     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1065     return dist1 > dist2 ? dist1 : dist2;
1066   }
1067 }
1068 
1069 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1070 // is free, reverse_free_ratio() returns 4.
1071 // Since code heap for each type of code blobs falls forward to the next
1072 // type of code heap, return the reverse free ratio for the entire
1073 // code cache.
1074 double CodeCache::reverse_free_ratio() {
1075   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1076   double max = (double)max_capacity();
1077   double result = max / unallocated;
1078   assert (max >= unallocated, "Must be");
1079   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1080   return result;
1081 }
1082 
1083 size_t CodeCache::bytes_allocated_in_freelists() {
1084   size_t allocated_bytes = 0;

1189                             AnyObj::RESOURCE_AREA, mtInternal,
1190                             &DependencySignature::hash,
1191                             &DependencySignature::equals> DepTable;
1192 
1193   DepTable* table = new DepTable();
1194 
1195   // Iterate over live nmethods and check dependencies of all nmethods that are not
1196   // marked for deoptimization. A particular dependency is only checked once.
1197   NMethodIterator iter(NMethodIterator::not_unloading);
1198   while(iter.next()) {
1199     nmethod* nm = iter.method();
1200     // Only notify for live nmethods
1201     if (!nm->is_marked_for_deoptimization()) {
1202       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1203         // Construct abstraction of a dependency.
1204         DependencySignature* current_sig = new DependencySignature(deps);
1205 
1206         // Determine if dependency is already checked. table->put(...) returns
1207         // 'true' if the dependency is added (i.e., was not in the hashtable).
1208         if (table->put(*current_sig, 1)) {
1209           if (deps.check_dependency() != nullptr) {

1210             // Dependency checking failed. Print out information about the failed
1211             // dependency and finally fail with an assert. We can fail here, since
1212             // dependency checking is never done in a product build.
1213             tty->print_cr("Failed dependency:");
1214             changes.print();
1215             nm->print();
1216             nm->print_dependencies_on(tty);
1217             assert(false, "Should have been marked for deoptimization");
1218           }
1219         }
1220       }
1221     }
1222   }
1223 }
1224 #endif
1225 
1226 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1227   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1228 
1229   // search the hierarchy looking for nmethods which are affected by the loading of this class
1230 
1231   // then search the interfaces this class implements looking for nmethods
1232   // which might be dependent of the fact that an interface only had one
1233   // implementor.
1234   // nmethod::check_all_dependencies works only correctly, if no safepoint
1235   // can happen
1236   NoSafepointVerifier nsv;
1237   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1238     InstanceKlass* d = str.klass();







1239     d->mark_dependent_nmethods(deopt_scope, changes);
1240   }
1241 
1242 #ifndef PRODUCT
1243   if (VerifyDependencies) {
1244     // Object pointers are used as unique identifiers for dependency arguments. This
1245     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1246     dependentCheckTime.start();
1247     check_live_nmethods_dependencies(changes);
1248     dependentCheckTime.stop();
1249   }
1250 #endif
1251 }
1252 
1253 #if INCLUDE_JVMTI
1254 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1255 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1256 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1257 
1258 static void add_to_old_table(nmethod* c) {

1477 }
1478 PRAGMA_DIAG_POP
1479 
1480 void CodeCache::print_memory_overhead() {
1481   size_t wasted_bytes = 0;
1482   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1483       CodeHeap* curr_heap = *heap;
1484       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1485         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1486         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1487       }
1488   }
1489   // Print bytes that are allocated in the freelist
1490   ttyLocker ttl;
1491   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1492   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1493   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1494   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1495 }
1496 















































































1497 //------------------------------------------------------------------------------------------------
1498 // Non-product version
1499 
1500 #ifndef PRODUCT
1501 
1502 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1503   if (PrintCodeCache2) {  // Need to add a new flag
1504     ResourceMark rm;
1505     if (size == 0) {
1506       int s = cb->size();
1507       assert(s >= 0, "CodeBlob size is negative: %d", s);
1508       size = (uint) s;
1509     }
1510     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1511   }
1512 }
1513 
1514 void CodeCache::print_internals() {
1515   int nmethodCount = 0;
1516   int runtimeStubCount = 0;
1517   int upcallStubCount = 0;
1518   int adapterCount = 0;
1519   int mhAdapterCount = 0;
1520   int vtableBlobCount = 0;
1521   int deoptimizationStubCount = 0;
1522   int uncommonTrapStubCount = 0;
1523   int exceptionStubCount = 0;
1524   int safepointStubCount = 0;
1525   int bufferBlobCount = 0;
1526   int total = 0;
1527   int nmethodNotEntrant = 0;
1528   int nmethodJava = 0;
1529   int nmethodNative = 0;
1530   int max_nm_size = 0;
1531   ResourceMark rm;
1532 
1533   int i = 0;
1534   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1535     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1536       tty->print_cr("-- %s --", (*heap)->name());
1537     }
1538     FOR_ALL_BLOBS(cb, *heap) {
1539       total++;

1540       if (cb->is_nmethod()) {
1541         nmethod* nm = (nmethod*)cb;
1542 
1543         if (Verbose && nm->method() != nullptr) {
1544           ResourceMark rm;
1545           char *method_name = nm->method()->name_and_sig_as_C_string();
1546           tty->print("%s", method_name);
1547           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1548         }
1549 
1550         nmethodCount++;
1551 
1552         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1553         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1554 
1555         if(nm->method() != nullptr && nm->is_java_method()) {
1556           nmethodJava++;
1557           max_nm_size = MAX2(max_nm_size, nm->size());
1558         }
1559       } else if (cb->is_runtime_stub()) {
1560         runtimeStubCount++;
1561       } else if (cb->is_upcall_stub()) {
1562         upcallStubCount++;
1563       } else if (cb->is_deoptimization_stub()) {
1564         deoptimizationStubCount++;
1565       } else if (cb->is_uncommon_trap_stub()) {
1566         uncommonTrapStubCount++;
1567       } else if (cb->is_exception_stub()) {
1568         exceptionStubCount++;

1725       FOR_ALL_BLOBS(cb, *heap) {
1726         number_of_blobs++;
1727         code_size += cb->code_size();
1728         ImmutableOopMapSet* set = cb->oop_maps();
1729         if (set != nullptr) {
1730           number_of_oop_maps += set->count();
1731           map_size           += set->nr_of_bytes();
1732         }
1733       }
1734     }
1735     tty->print_cr("OopMaps");
1736     tty->print_cr("  #blobs    = %d", number_of_blobs);
1737     tty->print_cr("  code size = %d", code_size);
1738     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1739     tty->print_cr("  map size  = %d", map_size);
1740   }
1741 
1742 #endif // !PRODUCT
1743 }
1744 



















1745 void CodeCache::print_summary(outputStream* st, bool detailed) {
1746   int full_count = 0;
1747   julong total_used = 0;
1748   julong total_max_used = 0;
1749   julong total_free = 0;
1750   julong total_size = 0;
1751   FOR_ALL_HEAPS(heap_iterator) {
1752     CodeHeap* heap = (*heap_iterator);
1753     size_t total = (heap->high_boundary() - heap->low_boundary());
1754     if (_heaps->length() >= 1) {
1755       st->print("%s:", heap->name());
1756     } else {
1757       st->print("CodeCache:");
1758     }
1759     size_t size = total/K;
1760     size_t used = (total - heap->unallocated_capacity())/K;
1761     size_t max_used = heap->max_allocated_capacity()/K;
1762     size_t free = heap->unallocated_capacity()/K;
1763     total_size += size;
1764     total_used += used;

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotCacheAccess.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/barrierSetNMethod.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "jvm_io.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"

 154       scopes_data_size += nm->scopes_data_size();
 155       scopes_pcs_size  += nm->scopes_pcs_size();
 156     } else {
 157       code_size        += cb->code_size();
 158     }
 159   }
 160 };
 161 
 162 // Iterate over all CodeHeaps
 163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 165 
 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 168 
 169 address CodeCache::_low_bound = nullptr;
 170 address CodeCache::_high_bound = nullptr;
 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 173 
 174 static ReservedSpace _cds_code_space;
 175 
 176 // Initialize arrays of CodeHeap subsets
 177 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 178 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 
 181 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 182   if (size < required_size) {
 183     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 184                          codeheap, size/K, required_size/K);
 185     err_msg title("Not enough space in %s to run VM", codeheap);
 186     err_msg message("%zuK < %zuK", size/K, required_size/K);
 187     vm_exit_during_initialization(title, message);
 188   }
 189 }
 190 
 191 struct CodeHeapInfo {
 192   size_t size;
 193   bool set;
 194   bool enabled;
 195 };
 196 
 197 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 198   assert(!heap->set, "sanity");
 199   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 200 }
 201 
 202 void CodeCache::initialize_heaps() {

 203   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 204   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 205   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 206 
 207   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 208   const size_t ps             = page_size(false, 8);
 209   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 210   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 211   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 212 
 213   // Prerequisites
 214   if (!heap_available(CodeBlobType::MethodProfiled)) {
 215     // For compatibility reasons, disabled tiered compilation overrides
 216     // segment size even if it is set explicitly.
 217     non_profiled.size += profiled.size;
 218     // Profiled code heap is not available, forcibly set size to 0
 219     profiled.size = 0;
 220     profiled.set = true;
 221     profiled.enabled = false;
 222   }

 303     if (ps < lg_ps) {
 304       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 305                              "Reverting to smaller page size (" PROPERFMT ").",
 306                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 307     }
 308   }
 309 
 310   // Note: if large page support is enabled, min_size is at least the large
 311   // page size. This ensures that the code cache is covered by large pages.
 312   non_profiled.size += non_nmethod.size & alignment_mask(min_size);
 313   non_profiled.size += profiled.size & alignment_mask(min_size);
 314   non_nmethod.size = align_down(non_nmethod.size, min_size);
 315   profiled.size = align_down(profiled.size, min_size);
 316   non_profiled.size = align_down(non_profiled.size, min_size);
 317 
 318   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 319   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 320   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 321   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 322 
 323   const size_t cds_code_size = 0;
 324   // FIXME: we should not increase CodeCache size - it affects branches.
 325   // Instead we need to create separate code heap in CodeCache for AOT code.
 326   // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size);
 327   // cache_size += cds_code_size;
 328 
 329   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 330 
 331   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 332   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 333 
 334   size_t offset = 0;
 335   if (cds_code_size > 0) {
 336     // FIXME: use CodeHeapInfo for this hack ...
 337     _cds_code_space = rs.partition(offset, cds_code_size);
 338     offset += cds_code_size;
 339   }
 340 
 341   if (profiled.enabled) {
 342     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 343     offset += profiled.size;
 344     // Tier 2 and tier 3 (profiled) methods
 345     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 346   }
 347 
 348   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 349   offset += non_nmethod.size;
 350   // Non-nmethods (stubs, adapters, ...)
 351   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 352 
 353   if (non_profiled.enabled) {
 354     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 355     // Tier 1 and tier 4 (non-profiled) methods and native methods
 356     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 357   }
 358 }
 359 
 360 void* CodeCache::map_aot_code() {
 361   if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) {
 362     return _cds_code_space.base();
 363   } else {
 364     return nullptr;
 365   }
 366 }
 367 
 368 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 369   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 370                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 371 }
 372 
 373 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 374   // Align and reserve space for code cache
 375   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 376   const size_t rs_size = align_up(size, rs_align);
 377 
 378   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 379   if (!rs.is_reserved()) {
 380     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 381                                           rs_size/K));
 382   }
 383 
 384   // Initialize bounds
 385   _low_bound = (address)rs.base();
 386   _high_bound = _low_bound + rs.size();
 387   return rs;

1065 size_t CodeCache::max_capacity() {
1066   size_t max_cap = 0;
1067   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1068     max_cap += (*heap)->max_capacity();
1069   }
1070   return max_cap;
1071 }
1072 
1073 bool CodeCache::is_non_nmethod(address addr) {
1074   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1075   return blob->contains(addr);
1076 }
1077 
1078 size_t CodeCache::max_distance_to_non_nmethod() {
1079   if (!SegmentedCodeCache) {
1080     return ReservedCodeCacheSize;
1081   } else {
1082     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1083     // the max distance is minimized by placing the NonNMethod segment
1084     // in between MethodProfiled and MethodNonProfiled segments
1085     size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound;
1086     size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary();
1087     return dist1 > dist2 ? dist1 : dist2;
1088   }
1089 }
1090 
1091 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1092 // is free, reverse_free_ratio() returns 4.
1093 // Since code heap for each type of code blobs falls forward to the next
1094 // type of code heap, return the reverse free ratio for the entire
1095 // code cache.
1096 double CodeCache::reverse_free_ratio() {
1097   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1098   double max = (double)max_capacity();
1099   double result = max / unallocated;
1100   assert (max >= unallocated, "Must be");
1101   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1102   return result;
1103 }
1104 
1105 size_t CodeCache::bytes_allocated_in_freelists() {
1106   size_t allocated_bytes = 0;

1211                             AnyObj::RESOURCE_AREA, mtInternal,
1212                             &DependencySignature::hash,
1213                             &DependencySignature::equals> DepTable;
1214 
1215   DepTable* table = new DepTable();
1216 
1217   // Iterate over live nmethods and check dependencies of all nmethods that are not
1218   // marked for deoptimization. A particular dependency is only checked once.
1219   NMethodIterator iter(NMethodIterator::not_unloading);
1220   while(iter.next()) {
1221     nmethod* nm = iter.method();
1222     // Only notify for live nmethods
1223     if (!nm->is_marked_for_deoptimization()) {
1224       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1225         // Construct abstraction of a dependency.
1226         DependencySignature* current_sig = new DependencySignature(deps);
1227 
1228         // Determine if dependency is already checked. table->put(...) returns
1229         // 'true' if the dependency is added (i.e., was not in the hashtable).
1230         if (table->put(*current_sig, 1)) {
1231           Klass* witness = deps.check_dependency();
1232           if (witness != nullptr) {
1233             // Dependency checking failed. Print out information about the failed
1234             // dependency and finally fail with an assert. We can fail here, since
1235             // dependency checking is never done in a product build.
1236             deps.print_dependency(tty, witness, true);
1237             changes.print();
1238             nm->print();
1239             nm->print_dependencies_on(tty);
1240             assert(false, "Should have been marked for deoptimization");
1241           }
1242         }
1243       }
1244     }
1245   }
1246 }
1247 #endif
1248 
1249 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1250   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1251 
1252   // search the hierarchy looking for nmethods which are affected by the loading of this class
1253 
1254   // then search the interfaces this class implements looking for nmethods
1255   // which might be dependent of the fact that an interface only had one
1256   // implementor.
1257   // nmethod::check_all_dependencies works only correctly, if no safepoint
1258   // can happen
1259   NoSafepointVerifier nsv;
1260   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1261     InstanceKlass* d = str.klass();
1262     {
1263       LogStreamHandle(Trace, dependencies) log;
1264       if (log.is_enabled()) {
1265         log.print("Processing context ");
1266         d->name()->print_value_on(&log);
1267       }
1268     }
1269     d->mark_dependent_nmethods(deopt_scope, changes);
1270   }
1271 
1272 #ifndef PRODUCT
1273   if (VerifyDependencies) {
1274     // Object pointers are used as unique identifiers for dependency arguments. This
1275     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1276     dependentCheckTime.start();
1277     check_live_nmethods_dependencies(changes);
1278     dependentCheckTime.stop();
1279   }
1280 #endif
1281 }
1282 
1283 #if INCLUDE_JVMTI
1284 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1285 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1286 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1287 
1288 static void add_to_old_table(nmethod* c) {

1507 }
1508 PRAGMA_DIAG_POP
1509 
1510 void CodeCache::print_memory_overhead() {
1511   size_t wasted_bytes = 0;
1512   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1513       CodeHeap* curr_heap = *heap;
1514       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1515         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1516         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1517       }
1518   }
1519   // Print bytes that are allocated in the freelist
1520   ttyLocker ttl;
1521   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1522   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1523   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1524   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1525 }
1526 
1527 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1528   if (total > 0) {
1529     double ratio = (100.0 * used) / total;
1530     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1531   }
1532 }
1533 
1534 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1535   int stats     [2][6][3][2] = {0};
1536   int stats_used[2][6][3][2] = {0};
1537 
1538   int total_osr = 0;
1539   int total_entrant = 0;
1540   int total_non_entrant = 0;
1541   int total_other = 0;
1542   int total_used = 0;
1543 
1544   NMethodIterator iter(NMethodIterator::all);
1545   while (iter.next()) {
1546     nmethod* nm = iter.method();
1547     if (nm->is_in_use()) {
1548       ++total_entrant;
1549     } else if (nm->is_not_entrant()) {
1550       ++total_non_entrant;
1551     } else {
1552       ++total_other;
1553     }
1554     if (nm->is_osr_method()) {
1555       ++total_osr;
1556     }
1557     if (nm->used()) {
1558       ++total_used;
1559     }
1560     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1561 
1562     int idx1 = nm->is_aot() ? 1 : 0;
1563     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1564     int idx3 = (nm->is_in_use()      ? 0 :
1565                (nm->is_not_entrant() ? 1 :
1566                                        2));
1567     int idx4 = (nm->is_osr_method() ? 1 : 0);
1568     stats[idx1][idx2][idx3][idx4] += 1;
1569     if (nm->used()) {
1570       stats_used[idx1][idx2][idx3][idx4] += 1;
1571     }
1572   }
1573 
1574   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1575                total_entrant + total_non_entrant + total_other,
1576                total_entrant, total_non_entrant, total_osr);
1577   if (total_other > 0) {
1578     st->print("; %d other", total_other);
1579   }
1580   st->print_cr(")");
1581 
1582   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1583     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1584     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1585     if (total_normal + total_osr > 0) {
1586       st->print("  Tier%d:", i);
1587       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1588       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1589       st->cr();
1590     }
1591   }
1592   st->cr();
1593   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1594     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1595     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1596     assert(total_osr == 0, "sanity");
1597     if (total_normal + total_osr > 0) {
1598       st->print("  AOT Code T%d:", i);
1599       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1600       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1601       st->cr();
1602     }
1603   }
1604 }
1605 
1606 //------------------------------------------------------------------------------------------------
1607 // Non-product version
1608 
1609 #ifndef PRODUCT
1610 
1611 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1612   if (PrintCodeCache2) {  // Need to add a new flag
1613     ResourceMark rm;
1614     if (size == 0) {
1615       int s = cb->size();
1616       assert(s >= 0, "CodeBlob size is negative: %d", s);
1617       size = (uint) s;
1618     }
1619     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1620   }
1621 }
1622 
1623 void CodeCache::print_internals() {
1624   int nmethodCount = 0;
1625   int runtimeStubCount = 0;
1626   int upcallStubCount = 0;
1627   int adapterCount = 0;
1628   int mhAdapterCount = 0;
1629   int vtableBlobCount = 0;
1630   int deoptimizationStubCount = 0;
1631   int uncommonTrapStubCount = 0;
1632   int exceptionStubCount = 0;
1633   int safepointStubCount = 0;
1634   int bufferBlobCount = 0;
1635   int total = 0;
1636   int nmethodNotEntrant = 0;
1637   int nmethodJava = 0;
1638   int nmethodNative = 0;
1639   int max_nm_size = 0;
1640   ResourceMark rm;
1641 
1642   int i = 0;
1643   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1644     int heap_total = 0;
1645     tty->print_cr("-- %s --", (*heap)->name());

1646     FOR_ALL_BLOBS(cb, *heap) {
1647       total++;
1648       heap_total++;
1649       if (cb->is_nmethod()) {
1650         nmethod* nm = (nmethod*)cb;
1651 
1652         tty->print("%4d: ", heap_total);
1653         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);




1654 
1655         nmethodCount++;
1656 
1657         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1658         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1659 
1660         if(nm->method() != nullptr && nm->is_java_method()) {
1661           nmethodJava++;
1662           max_nm_size = MAX2(max_nm_size, nm->size());
1663         }
1664       } else if (cb->is_runtime_stub()) {
1665         runtimeStubCount++;
1666       } else if (cb->is_upcall_stub()) {
1667         upcallStubCount++;
1668       } else if (cb->is_deoptimization_stub()) {
1669         deoptimizationStubCount++;
1670       } else if (cb->is_uncommon_trap_stub()) {
1671         uncommonTrapStubCount++;
1672       } else if (cb->is_exception_stub()) {
1673         exceptionStubCount++;

1830       FOR_ALL_BLOBS(cb, *heap) {
1831         number_of_blobs++;
1832         code_size += cb->code_size();
1833         ImmutableOopMapSet* set = cb->oop_maps();
1834         if (set != nullptr) {
1835           number_of_oop_maps += set->count();
1836           map_size           += set->nr_of_bytes();
1837         }
1838       }
1839     }
1840     tty->print_cr("OopMaps");
1841     tty->print_cr("  #blobs    = %d", number_of_blobs);
1842     tty->print_cr("  code size = %d", code_size);
1843     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1844     tty->print_cr("  map size  = %d", map_size);
1845   }
1846 
1847 #endif // !PRODUCT
1848 }
1849 
1850 void CodeCache::print_nmethods_on(outputStream* st) {
1851   ResourceMark rm;
1852   int i = 0;
1853   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1854     st->print_cr("-- %s --", (*heap)->name());
1855     FOR_ALL_BLOBS(cb, *heap) {
1856       i++;
1857       if (cb->is_nmethod()) {
1858         nmethod* nm = (nmethod*)cb;
1859         st->print("%4d: ", i);
1860         CompileTask::print(st, nm, nullptr, true, false);
1861 
1862         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1863         st->print_cr(" %c", non_entrant_char);
1864       }
1865     }
1866   }
1867 }
1868 
1869 void CodeCache::print_summary(outputStream* st, bool detailed) {
1870   int full_count = 0;
1871   julong total_used = 0;
1872   julong total_max_used = 0;
1873   julong total_free = 0;
1874   julong total_size = 0;
1875   FOR_ALL_HEAPS(heap_iterator) {
1876     CodeHeap* heap = (*heap_iterator);
1877     size_t total = (heap->high_boundary() - heap->low_boundary());
1878     if (_heaps->length() >= 1) {
1879       st->print("%s:", heap->name());
1880     } else {
1881       st->print("CodeCache:");
1882     }
1883     size_t size = total/K;
1884     size_t used = (total - heap->unallocated_capacity())/K;
1885     size_t max_used = heap->max_allocated_capacity()/K;
1886     size_t free = heap->unallocated_capacity()/K;
1887     total_size += size;
1888     total_used += used;
< prev index next >