< prev index next >

src/hotspot/share/code/codeCache.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/codeHeapState.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/dependencyContext.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"

  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/barrierSetNMethod.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "jvm_io.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/method.inline.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "runtime/arguments.hpp"

 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 165 
 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 168 
 169 address CodeCache::_low_bound = 0;
 170 address CodeCache::_high_bound = 0;
 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 173 


 174 // Initialize arrays of CodeHeap subsets
 175 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 178 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 
 180 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 181   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 182   // Prepare error message
 183   const char* error = "Invalid code heap sizes";
 184   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 185                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 186           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 187 
 188   if (total_size > cache_size) {
 189     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 190     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 191     vm_exit_during_initialization(error, message);
 192   } else if (all_set && total_size != cache_size) {
 193     // All code heap sizes were explicitly set: total_size must equal cache_size
 194     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 195     vm_exit_during_initialization(error, message);
 196   }
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 201   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 202   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 203   const size_t ps           = page_size(false, 8);
 204   const size_t min_size     = MAX2(os::vm_allocation_granularity(), ps);
 205   const size_t cache_size   = ReservedCodeCacheSize;
 206   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 207   size_t profiled_size      = ProfiledCodeHeapSize;
 208   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 209   // Check if total size set via command line flags exceeds the reserved size
 210   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 211                    (profiled_set     ? profiled_size     : min_size),
 212                    (non_profiled_set ? non_profiled_size : min_size),
 213                    cache_size,
 214                    non_nmethod_set && profiled_set && non_profiled_set);
 215 
 216   // Determine size of compiler buffers
 217   size_t code_buffers_size = 0;
 218 #ifdef COMPILER1
 219   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 220   const int c1_count = CompilationPolicy::c1_count();
 221   code_buffers_size += c1_count * Compiler::code_buffer_size();
 222 #endif
 223 #ifdef COMPILER2
 224   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 225   const int c2_count = CompilationPolicy::c2_count();
 226   // Initial size of constant table (this may be increased if a compiled method needs more space)
 227   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 228 #endif
 229 
 230   // Increase default non_nmethod_size to account for compiler buffers
 231   if (!non_nmethod_set) {
 232     non_nmethod_size += code_buffers_size;
 233   }
 234   // Calculate default CodeHeap sizes if not set by user
 235   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 236     // Leave room for the other two parts of the code cache
 237     const size_t max_non_nmethod_size = cache_size - 2 * min_size;
 238     // Check if we have enough space for the non-nmethod code heap
 239     if (max_non_nmethod_size >= non_nmethod_size) {
 240       // Use the default value for non_nmethod_size and one half of the
 241       // remaining size for non-profiled and one half for profiled methods
 242       size_t remaining_size = cache_size - non_nmethod_size;
 243       profiled_size = remaining_size / 2;
 244       non_profiled_size = remaining_size - profiled_size;
 245     } else {

 312   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 313   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 314   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 315 
 316   // Print warning if using large pages but not able to use the size given
 317   if (UseLargePages) {
 318     const size_t lg_ps = page_size(false, 1);
 319     if (ps < lg_ps) {
 320       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 321                              "Reverting to smaller page size (" PROPERFMT ").",
 322                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 323     }
 324   }
 325 
 326   // Note: if large page support is enabled, min_size is at least the large
 327   // page size. This ensures that the code cache is covered by large pages.
 328   non_nmethod_size = align_up(non_nmethod_size, min_size);
 329   profiled_size    = align_down(profiled_size, min_size);
 330   non_profiled_size = align_down(non_profiled_size, min_size);
 331 



 332   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 333   // parts for the individual heaps. The memory layout looks like this:
 334   // ---------- high -----------
 335   //    Non-profiled nmethods
 336   //         Non-nmethods
 337   //      Profiled nmethods
 338   // ---------- low ------------
 339   ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
 340   ReservedSpace profiled_space      = rs.first_part(profiled_size);
 341   ReservedSpace rest                = rs.last_part(profiled_size);
 342   ReservedSpace non_method_space    = rest.first_part(non_nmethod_size);
 343   ReservedSpace non_profiled_space  = rest.last_part(non_nmethod_size);


 344 
 345   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 346   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 347 
 348   // Non-nmethods (stubs, adapters, ...)
 349   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 350   // Tier 2 and tier 3 (profiled) methods
 351   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 352   // Tier 1 and tier 4 (non-profiled) methods and native methods
 353   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 354 }
 355 








 356 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 357   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 358                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 359 }
 360 
 361 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 362   // Align and reserve space for code cache
 363   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 364   const size_t rs_size = align_up(size, rs_align);
 365   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 366   if (!rs.is_reserved()) {
 367     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 368                                           rs_size/K));
 369   }
 370 
 371   // Initialize bounds
 372   _low_bound = (address)rs.base();
 373   _high_bound = _low_bound + rs.size();
 374   return rs;
 375 }

1209                             AnyObj::RESOURCE_AREA, mtInternal,
1210                             &DependencySignature::hash,
1211                             &DependencySignature::equals> DepTable;
1212 
1213   DepTable* table = new DepTable();
1214 
1215   // Iterate over live nmethods and check dependencies of all nmethods that are not
1216   // marked for deoptimization. A particular dependency is only checked once.
1217   NMethodIterator iter(NMethodIterator::only_not_unloading);
1218   while(iter.next()) {
1219     nmethod* nm = iter.method();
1220     // Only notify for live nmethods
1221     if (!nm->is_marked_for_deoptimization()) {
1222       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1223         // Construct abstraction of a dependency.
1224         DependencySignature* current_sig = new DependencySignature(deps);
1225 
1226         // Determine if dependency is already checked. table->put(...) returns
1227         // 'true' if the dependency is added (i.e., was not in the hashtable).
1228         if (table->put(*current_sig, 1)) {
1229           if (deps.check_dependency() != nullptr) {

1230             // Dependency checking failed. Print out information about the failed
1231             // dependency and finally fail with an assert. We can fail here, since
1232             // dependency checking is never done in a product build.
1233             tty->print_cr("Failed dependency:");
1234             changes.print();
1235             nm->print();
1236             nm->print_dependencies_on(tty);
1237             assert(false, "Should have been marked for deoptimization");
1238           }
1239         }
1240       }
1241     }
1242   }
1243 }
1244 #endif
1245 
1246 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1247   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1248 
1249   // search the hierarchy looking for nmethods which are affected by the loading of this class
1250 
1251   // then search the interfaces this class implements looking for nmethods
1252   // which might be dependent of the fact that an interface only had one
1253   // implementor.
1254   // nmethod::check_all_dependencies works only correctly, if no safepoint
1255   // can happen
1256   NoSafepointVerifier nsv;
1257   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1258     InstanceKlass* d = str.klass();







1259     d->mark_dependent_nmethods(deopt_scope, changes);
1260   }
1261 
1262 #ifndef PRODUCT
1263   if (VerifyDependencies) {
1264     // Object pointers are used as unique identifiers for dependency arguments. This
1265     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1266     dependentCheckTime.start();
1267     check_live_nmethods_dependencies(changes);
1268     dependentCheckTime.stop();
1269   }
1270 #endif
1271 }
1272 
1273 CompiledMethod* CodeCache::find_compiled(void* start) {
1274   CodeBlob *cb = find_blob(start);
1275   assert(cb == nullptr || cb->is_compiled(), "did not find an compiled_method");
1276   return (CompiledMethod*)cb;
1277 }
1278 

1503 }
1504 PRAGMA_DIAG_POP
1505 
1506 void CodeCache::print_memory_overhead() {
1507   size_t wasted_bytes = 0;
1508   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1509       CodeHeap* curr_heap = *heap;
1510       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1511         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1512         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1513       }
1514   }
1515   // Print bytes that are allocated in the freelist
1516   ttyLocker ttl;
1517   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1518   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1519   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1520   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1521 }
1522 















































































1523 //------------------------------------------------------------------------------------------------
1524 // Non-product version
1525 
1526 #ifndef PRODUCT
1527 
1528 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1529   if (PrintCodeCache2) {  // Need to add a new flag
1530     ResourceMark rm;
1531     if (size == 0) {
1532       int s = cb->size();
1533       assert(s >= 0, "CodeBlob size is negative: %d", s);
1534       size = (uint) s;
1535     }
1536     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1537   }
1538 }
1539 
1540 void CodeCache::print_internals() {
1541   int nmethodCount = 0;
1542   int runtimeStubCount = 0;
1543   int adapterCount = 0;
1544   int deoptimizationStubCount = 0;
1545   int uncommonTrapStubCount = 0;
1546   int bufferBlobCount = 0;
1547   int total = 0;
1548   int nmethodNotEntrant = 0;
1549   int nmethodJava = 0;
1550   int nmethodNative = 0;
1551   int max_nm_size = 0;
1552   ResourceMark rm;
1553 
1554   int i = 0;
1555   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1556     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1557       tty->print_cr("-- %s --", (*heap)->name());
1558     }
1559     FOR_ALL_BLOBS(cb, *heap) {
1560       total++;

1561       if (cb->is_nmethod()) {
1562         nmethod* nm = (nmethod*)cb;
1563 
1564         if (Verbose && nm->method() != nullptr) {
1565           ResourceMark rm;
1566           char *method_name = nm->method()->name_and_sig_as_C_string();
1567           tty->print("%s", method_name);
1568           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1569         }
1570 
1571         nmethodCount++;
1572 
1573         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1574         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1575 
1576         if(nm->method() != nullptr && nm->is_java_method()) {
1577           nmethodJava++;
1578           max_nm_size = MAX2(max_nm_size, nm->size());
1579         }
1580       } else if (cb->is_runtime_stub()) {
1581         runtimeStubCount++;
1582       } else if (cb->is_deoptimization_stub()) {
1583         deoptimizationStubCount++;
1584       } else if (cb->is_uncommon_trap_stub()) {
1585         uncommonTrapStubCount++;
1586       } else if (cb->is_adapter_blob()) {
1587         adapterCount++;
1588       } else if (cb->is_buffer_blob()) {
1589         bufferBlobCount++;

1711       FOR_ALL_BLOBS(cb, *heap) {
1712         number_of_blobs++;
1713         code_size += cb->code_size();
1714         ImmutableOopMapSet* set = cb->oop_maps();
1715         if (set != nullptr) {
1716           number_of_oop_maps += set->count();
1717           map_size           += set->nr_of_bytes();
1718         }
1719       }
1720     }
1721     tty->print_cr("OopMaps");
1722     tty->print_cr("  #blobs    = %d", number_of_blobs);
1723     tty->print_cr("  code size = %d", code_size);
1724     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1725     tty->print_cr("  map size  = %d", map_size);
1726   }
1727 
1728 #endif // !PRODUCT
1729 }
1730 



















1731 void CodeCache::print_summary(outputStream* st, bool detailed) {
1732   int full_count = 0;
1733   julong total_used = 0;
1734   julong total_max_used = 0;
1735   julong total_free = 0;
1736   julong total_size = 0;
1737   FOR_ALL_HEAPS(heap_iterator) {
1738     CodeHeap* heap = (*heap_iterator);
1739     size_t total = (heap->high_boundary() - heap->low_boundary());
1740     if (_heaps->length() >= 1) {
1741       st->print("%s:", heap->name());
1742     } else {
1743       st->print("CodeCache:");
1744     }
1745     size_t size = total/K;
1746     size_t used = (total - heap->unallocated_capacity())/K;
1747     size_t max_used = heap->max_allocated_capacity()/K;
1748     size_t free = heap->unallocated_capacity()/K;
1749     total_size += size;
1750     total_used += used;

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/cdsAccess.hpp"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/SCCache.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "compiler/oopMap.hpp"
  40 #include "gc/shared/barrierSetNMethod.hpp"
  41 #include "gc/shared/classUnloadingContext.hpp"
  42 #include "gc/shared/collectedHeap.hpp"
  43 #include "jfr/jfrEvents.hpp"
  44 #include "jvm_io.h"
  45 #include "logging/log.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/objArrayOop.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "oops/verifyOopClosure.hpp"
  55 #include "runtime/arguments.hpp"

 156       scopes_pcs_size  += nm->scopes_pcs_size();
 157     } else {
 158       code_size        += cb->code_size();
 159     }
 160   }
 161 };
 162 
 163 // Iterate over all CodeHeaps
 164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 165 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 166 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 167 
 168 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 169 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 170 
 171 address CodeCache::_low_bound = 0;
 172 address CodeCache::_high_bound = 0;
 173 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 174 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 175 
 176 static ReservedSpace _cds_code_space;
 177 
 178 // Initialize arrays of CodeHeap subsets
 179 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 181 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 182 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 183 
 184 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 185   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 186   // Prepare error message
 187   const char* error = "Invalid code heap sizes";
 188   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 189                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 190           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 191 
 192   if (total_size > cache_size) {
 193     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 194     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 195     vm_exit_during_initialization(error, message);
 196   } else if (all_set && total_size != cache_size) {
 197     // All code heap sizes were explicitly set: total_size must equal cache_size
 198     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 199     vm_exit_during_initialization(error, message);
 200   }
 201 }
 202 
 203 void CodeCache::initialize_heaps() {
 204   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 205   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 206   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 207   const size_t ps           = page_size(false, 8);
 208   const size_t min_size     = MAX2(os::vm_allocation_granularity(), ps);
 209   size_t cache_size         = ReservedCodeCacheSize;
 210   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 211   size_t profiled_size      = ProfiledCodeHeapSize;
 212   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 213   // Check if total size set via command line flags exceeds the reserved size
 214   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 215                    (profiled_set     ? profiled_size     : min_size),
 216                    (non_profiled_set ? non_profiled_size : min_size),
 217                    cache_size,
 218                    non_nmethod_set && profiled_set && non_profiled_set);
 219 
 220   // Determine size of compiler buffers
 221   size_t code_buffers_size = 0;
 222 #ifdef COMPILER1
 223   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 224   const int c1_count = CompilationPolicy::c1_count();
 225   code_buffers_size += c1_count * Compiler::code_buffer_size();
 226 #endif
 227 #ifdef COMPILER2
 228   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 229   const int c2_count = CompilationPolicy::c2_count() + CompilationPolicy::c3_count();
 230   // Initial size of constant table (this may be increased if a compiled method needs more space)
 231   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 232 #endif
 233 
 234   // Increase default non_nmethod_size to account for compiler buffers
 235   if (!non_nmethod_set) {
 236     non_nmethod_size += code_buffers_size;
 237   }
 238   // Calculate default CodeHeap sizes if not set by user
 239   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 240     // Leave room for the other two parts of the code cache
 241     const size_t max_non_nmethod_size = cache_size - 2 * min_size;
 242     // Check if we have enough space for the non-nmethod code heap
 243     if (max_non_nmethod_size >= non_nmethod_size) {
 244       // Use the default value for non_nmethod_size and one half of the
 245       // remaining size for non-profiled and one half for profiled methods
 246       size_t remaining_size = cache_size - non_nmethod_size;
 247       profiled_size = remaining_size / 2;
 248       non_profiled_size = remaining_size - profiled_size;
 249     } else {

 316   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 317   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 318   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 319 
 320   // Print warning if using large pages but not able to use the size given
 321   if (UseLargePages) {
 322     const size_t lg_ps = page_size(false, 1);
 323     if (ps < lg_ps) {
 324       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 325                              "Reverting to smaller page size (" PROPERFMT ").",
 326                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 327     }
 328   }
 329 
 330   // Note: if large page support is enabled, min_size is at least the large
 331   // page size. This ensures that the code cache is covered by large pages.
 332   non_nmethod_size = align_up(non_nmethod_size, min_size);
 333   profiled_size    = align_down(profiled_size, min_size);
 334   non_profiled_size = align_down(non_profiled_size, min_size);
 335 
 336   const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
 337   cache_size += cds_code_size;
 338 
 339   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 340   // parts for the individual heaps. The memory layout looks like this:
 341   // ---------- high -----------
 342   //    Non-profiled nmethods
 343   //         Non-nmethods
 344   //      Profiled nmethods
 345   // ---------- low ------------
 346   ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
 347   _cds_code_space                   = rs.first_part(cds_code_size);
 348   ReservedSpace rest                = rs.last_part(cds_code_size);
 349   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 350   ReservedSpace rest2               = rest.last_part(profiled_size);
 351   ReservedSpace non_method_space    = rest2.first_part(non_nmethod_size);
 352   ReservedSpace non_profiled_space  = rest2.last_part(non_nmethod_size);
 353 
 354   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 355   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 356 
 357   // Non-nmethods (stubs, adapters, ...)
 358   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 359   // Tier 2 and tier 3 (profiled) methods
 360   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 361   // Tier 1 and tier 4 (non-profiled) methods and native methods
 362   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 363 }
 364 
 365 void* CodeCache::map_cached_code() {
 366   if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
 367     return _cds_code_space.base();
 368   } else {
 369     return nullptr;
 370   }
 371 }
 372 
 373 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 374   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 375                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 376 }
 377 
 378 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 379   // Align and reserve space for code cache
 380   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 381   const size_t rs_size = align_up(size, rs_align);
 382   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
 383   if (!rs.is_reserved()) {
 384     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 385                                           rs_size/K));
 386   }
 387 
 388   // Initialize bounds
 389   _low_bound = (address)rs.base();
 390   _high_bound = _low_bound + rs.size();
 391   return rs;
 392 }

1226                             AnyObj::RESOURCE_AREA, mtInternal,
1227                             &DependencySignature::hash,
1228                             &DependencySignature::equals> DepTable;
1229 
1230   DepTable* table = new DepTable();
1231 
1232   // Iterate over live nmethods and check dependencies of all nmethods that are not
1233   // marked for deoptimization. A particular dependency is only checked once.
1234   NMethodIterator iter(NMethodIterator::only_not_unloading);
1235   while(iter.next()) {
1236     nmethod* nm = iter.method();
1237     // Only notify for live nmethods
1238     if (!nm->is_marked_for_deoptimization()) {
1239       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1240         // Construct abstraction of a dependency.
1241         DependencySignature* current_sig = new DependencySignature(deps);
1242 
1243         // Determine if dependency is already checked. table->put(...) returns
1244         // 'true' if the dependency is added (i.e., was not in the hashtable).
1245         if (table->put(*current_sig, 1)) {
1246           Klass* witness = deps.check_dependency();
1247           if (witness != nullptr) {
1248             // Dependency checking failed. Print out information about the failed
1249             // dependency and finally fail with an assert. We can fail here, since
1250             // dependency checking is never done in a product build.
1251             deps.print_dependency(tty, witness, true);
1252             changes.print();
1253             nm->print();
1254             nm->print_dependencies_on(tty);
1255             assert(false, "Should have been marked for deoptimization");
1256           }
1257         }
1258       }
1259     }
1260   }
1261 }
1262 #endif
1263 
1264 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1265   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1266 
1267   // search the hierarchy looking for nmethods which are affected by the loading of this class
1268 
1269   // then search the interfaces this class implements looking for nmethods
1270   // which might be dependent of the fact that an interface only had one
1271   // implementor.
1272   // nmethod::check_all_dependencies works only correctly, if no safepoint
1273   // can happen
1274   NoSafepointVerifier nsv;
1275   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1276     InstanceKlass* d = str.klass();
1277     {
1278       LogStreamHandle(Trace, dependencies) log;
1279       if (log.is_enabled()) {
1280         log.print("Processing context ");
1281         d->name()->print_value_on(&log);
1282       }
1283     }
1284     d->mark_dependent_nmethods(deopt_scope, changes);
1285   }
1286 
1287 #ifndef PRODUCT
1288   if (VerifyDependencies) {
1289     // Object pointers are used as unique identifiers for dependency arguments. This
1290     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1291     dependentCheckTime.start();
1292     check_live_nmethods_dependencies(changes);
1293     dependentCheckTime.stop();
1294   }
1295 #endif
1296 }
1297 
1298 CompiledMethod* CodeCache::find_compiled(void* start) {
1299   CodeBlob *cb = find_blob(start);
1300   assert(cb == nullptr || cb->is_compiled(), "did not find an compiled_method");
1301   return (CompiledMethod*)cb;
1302 }
1303 

1528 }
1529 PRAGMA_DIAG_POP
1530 
1531 void CodeCache::print_memory_overhead() {
1532   size_t wasted_bytes = 0;
1533   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1534       CodeHeap* curr_heap = *heap;
1535       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1536         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1537         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1538       }
1539   }
1540   // Print bytes that are allocated in the freelist
1541   ttyLocker ttl;
1542   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1543   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1544   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1545   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1546 }
1547 
1548 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1549   if (total > 0) {
1550     double ratio = (100.0 * used) / total;
1551     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1552   }
1553 }
1554 
1555 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1556   int stats     [2][6][3][2] = {0};
1557   int stats_used[2][6][3][2] = {0};
1558 
1559   int total_osr = 0;
1560   int total_entrant = 0;
1561   int total_non_entrant = 0;
1562   int total_other = 0;
1563   int total_used = 0;
1564 
1565   NMethodIterator iter(NMethodIterator::all_blobs);
1566   while (iter.next()) {
1567     nmethod* nm = iter.method();
1568     if (nm->is_in_use()) {
1569       ++total_entrant;
1570     } else if (nm->is_not_entrant()) {
1571       ++total_non_entrant;
1572     } else {
1573       ++total_other;
1574     }
1575     if (nm->is_osr_method()) {
1576       ++total_osr;
1577     }
1578     if (nm->used()) {
1579       ++total_used;
1580     }
1581     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1582 
1583     int idx1 = nm->is_scc() ? 1 : 0;
1584     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1585     int idx3 = (nm->is_in_use()      ? 0 :
1586                (nm->is_not_entrant() ? 1 :
1587                                        2));
1588     int idx4 = (nm->is_osr_method() ? 1 : 0);
1589     stats[idx1][idx2][idx3][idx4] += 1;
1590     if (nm->used()) {
1591       stats_used[idx1][idx2][idx3][idx4] += 1;
1592     }
1593   }
1594 
1595   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1596                total_entrant + total_non_entrant + total_other,
1597                total_entrant, total_non_entrant, total_osr);
1598   if (total_other > 0) {
1599     st->print("; %d other", total_other);
1600   }
1601   st->print_cr(")");
1602 
1603   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1604     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1605     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1606     if (total_normal + total_osr > 0) {
1607       st->print("  Tier%d:", i);
1608       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1609       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1610       st->cr();
1611     }
1612   }
1613   st->cr();
1614   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1615     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1616     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1617     assert(total_osr == 0, "sanity");
1618     if (total_normal + total_osr > 0) {
1619       st->print("  SC T%d:", i);
1620       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1621       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1622       st->cr();
1623     }
1624   }
1625 }
1626 
1627 //------------------------------------------------------------------------------------------------
1628 // Non-product version
1629 
1630 #ifndef PRODUCT
1631 
1632 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1633   if (PrintCodeCache2) {  // Need to add a new flag
1634     ResourceMark rm;
1635     if (size == 0) {
1636       int s = cb->size();
1637       assert(s >= 0, "CodeBlob size is negative: %d", s);
1638       size = (uint) s;
1639     }
1640     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1641   }
1642 }
1643 
1644 void CodeCache::print_internals() {
1645   int nmethodCount = 0;
1646   int runtimeStubCount = 0;
1647   int adapterCount = 0;
1648   int deoptimizationStubCount = 0;
1649   int uncommonTrapStubCount = 0;
1650   int bufferBlobCount = 0;
1651   int total = 0;
1652   int nmethodNotEntrant = 0;
1653   int nmethodJava = 0;
1654   int nmethodNative = 0;
1655   int max_nm_size = 0;
1656   ResourceMark rm;
1657 
1658   int i = 0;
1659   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1660     int heap_total = 0;
1661     tty->print_cr("-- %s --", (*heap)->name());

1662     FOR_ALL_BLOBS(cb, *heap) {
1663       total++;
1664       heap_total++;
1665       if (cb->is_nmethod()) {
1666         nmethod* nm = (nmethod*)cb;
1667 
1668         tty->print("%4d: ", heap_total);
1669         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);




1670 
1671         nmethodCount++;
1672 
1673         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1674         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1675 
1676         if(nm->method() != nullptr && nm->is_java_method()) {
1677           nmethodJava++;
1678           max_nm_size = MAX2(max_nm_size, nm->size());
1679         }
1680       } else if (cb->is_runtime_stub()) {
1681         runtimeStubCount++;
1682       } else if (cb->is_deoptimization_stub()) {
1683         deoptimizationStubCount++;
1684       } else if (cb->is_uncommon_trap_stub()) {
1685         uncommonTrapStubCount++;
1686       } else if (cb->is_adapter_blob()) {
1687         adapterCount++;
1688       } else if (cb->is_buffer_blob()) {
1689         bufferBlobCount++;

1811       FOR_ALL_BLOBS(cb, *heap) {
1812         number_of_blobs++;
1813         code_size += cb->code_size();
1814         ImmutableOopMapSet* set = cb->oop_maps();
1815         if (set != nullptr) {
1816           number_of_oop_maps += set->count();
1817           map_size           += set->nr_of_bytes();
1818         }
1819       }
1820     }
1821     tty->print_cr("OopMaps");
1822     tty->print_cr("  #blobs    = %d", number_of_blobs);
1823     tty->print_cr("  code size = %d", code_size);
1824     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1825     tty->print_cr("  map size  = %d", map_size);
1826   }
1827 
1828 #endif // !PRODUCT
1829 }
1830 
1831 void CodeCache::print_nmethods_on(outputStream* st) {
1832   ResourceMark rm;
1833   int i = 0;
1834   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1835     st->print_cr("-- %s --", (*heap)->name());
1836     FOR_ALL_BLOBS(cb, *heap) {
1837       i++;
1838       if (cb->is_nmethod()) {
1839         nmethod* nm = (nmethod*)cb;
1840         st->print("%4d: ", i);
1841         CompileTask::print(st, nm, nullptr, true, false);
1842 
1843         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1844         st->print_cr(" %c", non_entrant_char);
1845       }
1846     }
1847   }
1848 }
1849 
1850 void CodeCache::print_summary(outputStream* st, bool detailed) {
1851   int full_count = 0;
1852   julong total_used = 0;
1853   julong total_max_used = 0;
1854   julong total_free = 0;
1855   julong total_size = 0;
1856   FOR_ALL_HEAPS(heap_iterator) {
1857     CodeHeap* heap = (*heap_iterator);
1858     size_t total = (heap->high_boundary() - heap->low_boundary());
1859     if (_heaps->length() >= 1) {
1860       st->print("%s:", heap->name());
1861     } else {
1862       st->print("CodeCache:");
1863     }
1864     size_t size = total/K;
1865     size_t used = (total - heap->unallocated_capacity())/K;
1866     size_t max_used = heap->max_allocated_capacity()/K;
1867     size_t free = heap->unallocated_capacity()/K;
1868     total_size += size;
1869     total_used += used;
< prev index next >