1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotCacheAccess.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/codeHeapState.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/dependencyContext.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compilationPolicy.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/compilerDefinitions.inline.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/barrierSetNMethod.hpp" 39 #include "gc/shared/classUnloadingContext.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "jfr/jfrEvents.hpp" 42 #include "jvm_io.h" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/allocation.inline.hpp" 46 #include "memory/iterator.hpp" 47 #include "memory/memoryReserver.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "memory/universe.hpp" 50 #include "oops/method.inline.hpp" 51 #include "oops/objArrayOop.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "oops/verifyOopClosure.hpp" 54 #include "runtime/arguments.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/deoptimization.hpp" 57 #include "runtime/globals_extension.hpp" 58 #include "runtime/handles.inline.hpp" 59 #include "runtime/icache.hpp" 60 #include "runtime/init.hpp" 61 #include "runtime/java.hpp" 62 #include "runtime/mutexLocker.hpp" 63 #include "runtime/os.inline.hpp" 64 #include "runtime/safepointVerifiers.hpp" 65 #include "runtime/vmThread.hpp" 66 #include "sanitizers/leak.hpp" 67 #include "services/memoryService.hpp" 68 #include "utilities/align.hpp" 69 #include "utilities/vmError.hpp" 70 #include "utilities/xmlstream.hpp" 71 #ifdef COMPILER1 72 #include "c1/c1_Compilation.hpp" 73 #include "c1/c1_Compiler.hpp" 74 #endif 75 #ifdef COMPILER2 76 #include "opto/c2compiler.hpp" 77 #include "opto/compile.hpp" 78 #include "opto/node.hpp" 79 #endif 80 81 // Helper class for printing in CodeCache 82 class CodeBlob_sizes { 83 private: 84 int count; 85 int total_size; 86 int header_size; 87 int code_size; 88 int stub_size; 89 int relocation_size; 90 int scopes_oop_size; 91 int scopes_metadata_size; 92 int scopes_data_size; 93 int scopes_pcs_size; 94 95 public: 96 CodeBlob_sizes() { 97 count = 0; 98 total_size = 0; 99 header_size = 0; 100 code_size = 0; 101 stub_size = 0; 102 relocation_size = 0; 103 scopes_oop_size = 0; 104 scopes_metadata_size = 0; 105 scopes_data_size = 0; 106 scopes_pcs_size = 0; 107 } 108 109 int total() const { return total_size; } 110 bool is_empty() const { return count == 0; } 111 112 void print(const char* title) const { 113 if (is_empty()) { 114 tty->print_cr(" #%d %s = %dK", 115 count, 116 title, 117 total() / (int)K); 118 } else { 119 tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])", 120 count, 121 title, 122 total() / (int)K, 123 header_size / (int)K, 124 header_size * 100 / total_size, 125 relocation_size / (int)K, 126 relocation_size * 100 / total_size, 127 code_size / (int)K, 128 code_size * 100 / total_size, 129 stub_size / (int)K, 130 stub_size * 100 / total_size, 131 scopes_oop_size / (int)K, 132 scopes_oop_size * 100 / total_size, 133 scopes_metadata_size / (int)K, 134 scopes_metadata_size * 100 / total_size, 135 scopes_data_size / (int)K, 136 scopes_data_size * 100 / total_size, 137 scopes_pcs_size / (int)K, 138 scopes_pcs_size * 100 / total_size); 139 } 140 } 141 142 void add(CodeBlob* cb) { 143 count++; 144 total_size += cb->size(); 145 header_size += cb->header_size(); 146 relocation_size += cb->relocation_size(); 147 if (cb->is_nmethod()) { 148 nmethod* nm = cb->as_nmethod_or_null(); 149 code_size += nm->insts_size(); 150 stub_size += nm->stub_size(); 151 152 scopes_oop_size += nm->oops_size(); 153 scopes_metadata_size += nm->metadata_size(); 154 scopes_data_size += nm->scopes_data_size(); 155 scopes_pcs_size += nm->scopes_pcs_size(); 156 } else { 157 code_size += cb->code_size(); 158 } 159 } 160 }; 161 162 // Iterate over all CodeHeaps 163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 165 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb)) 168 169 address CodeCache::_low_bound = nullptr; 170 address CodeCache::_high_bound = nullptr; 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0; 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr; 173 174 static ReservedSpace _cds_code_space; 175 176 // Initialize arrays of CodeHeap subsets 177 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 178 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 179 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 180 181 static void check_min_size(const char* codeheap, size_t size, size_t required_size) { 182 if (size < required_size) { 183 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK", 184 codeheap, size/K, required_size/K); 185 err_msg title("Not enough space in %s to run VM", codeheap); 186 err_msg message("%zuK < %zuK", size/K, required_size/K); 187 vm_exit_during_initialization(title, message); 188 } 189 } 190 191 struct CodeHeapInfo { 192 size_t size; 193 bool set; 194 bool enabled; 195 }; 196 197 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) { 198 assert(!heap->set, "sanity"); 199 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size; 200 } 201 202 void CodeCache::initialize_heaps() { 203 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true}; 204 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true}; 205 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true}; 206 207 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize); 208 const size_t ps = page_size(false, 8); 209 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps); 210 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code 211 size_t cache_size = align_up(ReservedCodeCacheSize, min_size); 212 213 // Prerequisites 214 if (!heap_available(CodeBlobType::MethodProfiled)) { 215 // For compatibility reasons, disabled tiered compilation overrides 216 // segment size even if it is set explicitly. 217 non_profiled.size += profiled.size; 218 // Profiled code heap is not available, forcibly set size to 0 219 profiled.size = 0; 220 profiled.set = true; 221 profiled.enabled = false; 222 } 223 224 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap"); 225 226 size_t compiler_buffer_size = 0; 227 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size()); 228 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size()); 229 230 if (!non_nmethod.set) { 231 non_nmethod.size += compiler_buffer_size; 232 // Further down, just before FLAG_SET_ERGO(), all segment sizes are 233 // aligned down to the next lower multiple of min_size. For large page 234 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable. 235 // Therefore, force non_nmethod.size to at least min_size. 236 non_nmethod.size = MAX2(non_nmethod.size, min_size); 237 } 238 239 if (!profiled.set && !non_profiled.set) { 240 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ? 241 (cache_size - non_nmethod.size) / 2 : min_size; 242 } 243 244 if (profiled.set && !non_profiled.set) { 245 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size); 246 } 247 248 if (!profiled.set && non_profiled.set) { 249 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size); 250 } 251 252 // Compatibility. 253 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size; 254 if (!non_nmethod.set && profiled.set && non_profiled.set) { 255 set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size); 256 } 257 258 size_t total = non_nmethod.size + profiled.size + non_profiled.size; 259 if (total != cache_size && !cache_size_set) { 260 log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod " 261 "%zuK NonProfiled %zuK Profiled %zuK = %zuK", 262 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K); 263 // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly 264 cache_size = total; 265 } 266 267 log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK" 268 " NonProfiled %zuK Profiled %zuK", 269 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K); 270 271 // Validation 272 // Check minimal required sizes 273 check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size); 274 if (profiled.enabled) { 275 check_min_size("profiled code heap", profiled.size, min_size); 276 } 277 if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity 278 check_min_size("non-profiled code heap", non_profiled.size, min_size); 279 } 280 if (cache_size_set) { 281 check_min_size("reserved code cache", cache_size, min_cache_size); 282 } 283 284 // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes 285 if (total != cache_size && cache_size_set) { 286 err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K); 287 if (profiled.enabled) { 288 message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K); 289 } 290 if (non_profiled.enabled) { 291 message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K); 292 } 293 message.append(" = %zuK", total/K); 294 message.append((total > cache_size) ? " is greater than " : " is less than "); 295 message.append("ReservedCodeCacheSize (%zuK).", cache_size/K); 296 297 vm_exit_during_initialization("Invalid code heap sizes", message); 298 } 299 300 // Compatibility. Print warning if using large pages but not able to use the size given 301 if (UseLargePages) { 302 const size_t lg_ps = page_size(false, 1); 303 if (ps < lg_ps) { 304 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. " 305 "Reverting to smaller page size (" PROPERFMT ").", 306 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps)); 307 } 308 } 309 310 // Note: if large page support is enabled, min_size is at least the large 311 // page size. This ensures that the code cache is covered by large pages. 312 non_profiled.size += non_nmethod.size & alignment_mask(min_size); 313 non_profiled.size += profiled.size & alignment_mask(min_size); 314 non_nmethod.size = align_down(non_nmethod.size, min_size); 315 profiled.size = align_down(profiled.size, min_size); 316 non_profiled.size = align_down(non_profiled.size, min_size); 317 318 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size); 319 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size); 320 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size); 321 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size); 322 323 const size_t cds_code_size = 0; 324 // FIXME: we should not increase CodeCache size - it affects branches. 325 // Instead we need to create separate code heap in CodeCache for AOT code. 326 // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size); 327 // cache_size += cds_code_size; 328 329 ReservedSpace rs = reserve_heap_memory(cache_size, ps); 330 331 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory. 332 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size()); 333 334 size_t offset = 0; 335 if (cds_code_size > 0) { 336 // FIXME: use CodeHeapInfo for this hack ... 337 _cds_code_space = rs.partition(offset, cds_code_size); 338 offset += cds_code_size; 339 } 340 341 if (profiled.enabled) { 342 ReservedSpace profiled_space = rs.partition(offset, profiled.size); 343 offset += profiled.size; 344 // Tier 2 and tier 3 (profiled) methods 345 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 346 } 347 348 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size); 349 offset += non_nmethod.size; 350 // Non-nmethods (stubs, adapters, ...) 351 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 352 353 if (non_profiled.enabled) { 354 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size); 355 // Tier 1 and tier 4 (non-profiled) methods and native methods 356 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 357 } 358 } 359 360 void* CodeCache::map_aot_code() { 361 if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) { 362 return _cds_code_space.base(); 363 } else { 364 return nullptr; 365 } 366 } 367 368 size_t CodeCache::page_size(bool aligned, size_t min_pages) { 369 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : 370 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); 371 } 372 373 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) { 374 // Align and reserve space for code cache 375 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity()); 376 const size_t rs_size = align_up(size, rs_align); 377 378 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps); 379 if (!rs.is_reserved()) { 380 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)", 381 rs_size/K)); 382 } 383 384 // Initialize bounds 385 _low_bound = (address)rs.base(); 386 _high_bound = _low_bound + rs.size(); 387 return rs; 388 } 389 390 // Heaps available for allocation 391 bool CodeCache::heap_available(CodeBlobType code_blob_type) { 392 if (!SegmentedCodeCache) { 393 // No segmentation: use a single code heap 394 return (code_blob_type == CodeBlobType::All); 395 } else if (CompilerConfig::is_interpreter_only()) { 396 // Interpreter only: we don't need any method code heaps 397 return (code_blob_type == CodeBlobType::NonNMethod); 398 } else if (CompilerConfig::is_c1_profiling()) { 399 // Tiered compilation: use all code heaps 400 return (code_blob_type < CodeBlobType::All); 401 } else { 402 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 403 return (code_blob_type == CodeBlobType::NonNMethod) || 404 (code_blob_type == CodeBlobType::MethodNonProfiled); 405 } 406 } 407 408 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) { 409 switch(code_blob_type) { 410 case CodeBlobType::NonNMethod: 411 return "NonNMethodCodeHeapSize"; 412 break; 413 case CodeBlobType::MethodNonProfiled: 414 return "NonProfiledCodeHeapSize"; 415 break; 416 case CodeBlobType::MethodProfiled: 417 return "ProfiledCodeHeapSize"; 418 break; 419 default: 420 ShouldNotReachHere(); 421 return nullptr; 422 } 423 } 424 425 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 426 if (lhs->code_blob_type() == rhs->code_blob_type()) { 427 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 428 } else { 429 return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type()); 430 } 431 } 432 433 void CodeCache::add_heap(CodeHeap* heap) { 434 assert(!Universe::is_fully_initialized(), "late heap addition?"); 435 436 _heaps->insert_sorted<code_heap_compare>(heap); 437 438 CodeBlobType type = heap->code_blob_type(); 439 if (code_blob_type_accepts_nmethod(type)) { 440 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 441 } 442 if (code_blob_type_accepts_allocable(type)) { 443 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 444 } 445 } 446 447 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) { 448 // Check if heap is needed 449 if (!heap_available(code_blob_type)) { 450 return; 451 } 452 453 // Create CodeHeap 454 CodeHeap* heap = new CodeHeap(name, code_blob_type); 455 add_heap(heap); 456 457 // Reserve Space 458 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 459 size_initial = align_up(size_initial, rs.page_size()); 460 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 461 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)", 462 heap->name(), size_initial/K)); 463 } 464 465 // Register the CodeHeap 466 MemoryService::add_code_heap_memory_pool(heap, name); 467 } 468 469 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 470 FOR_ALL_HEAPS(heap) { 471 if ((*heap)->contains(start)) { 472 return *heap; 473 } 474 } 475 return nullptr; 476 } 477 478 CodeHeap* CodeCache::get_code_heap(const void* cb) { 479 assert(cb != nullptr, "CodeBlob is null"); 480 FOR_ALL_HEAPS(heap) { 481 if ((*heap)->contains(cb)) { 482 return *heap; 483 } 484 } 485 ShouldNotReachHere(); 486 return nullptr; 487 } 488 489 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) { 490 FOR_ALL_HEAPS(heap) { 491 if ((*heap)->accepts(code_blob_type)) { 492 return *heap; 493 } 494 } 495 return nullptr; 496 } 497 498 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 499 assert_locked_or_safepoint(CodeCache_lock); 500 assert(heap != nullptr, "heap is null"); 501 return (CodeBlob*)heap->first(); 502 } 503 504 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) { 505 if (heap_available(code_blob_type)) { 506 return first_blob(get_code_heap(code_blob_type)); 507 } else { 508 return nullptr; 509 } 510 } 511 512 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 513 assert_locked_or_safepoint(CodeCache_lock); 514 assert(heap != nullptr, "heap is null"); 515 return (CodeBlob*)heap->next(cb); 516 } 517 518 /** 519 * Do not seize the CodeCache lock here--if the caller has not 520 * already done so, we are going to lose bigtime, since the code 521 * cache will contain a garbage CodeBlob until the caller can 522 * run the constructor for the CodeBlob subclass he is busy 523 * instantiating. 524 */ 525 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) { 526 assert_locked_or_safepoint(CodeCache_lock); 527 assert(size > 0, "Code cache allocation request must be > 0"); 528 if (size == 0) { 529 return nullptr; 530 } 531 CodeBlob* cb = nullptr; 532 533 // Get CodeHeap for the given CodeBlobType 534 CodeHeap* heap = get_code_heap(code_blob_type); 535 assert(heap != nullptr, "heap is null"); 536 537 while (true) { 538 cb = (CodeBlob*)heap->allocate(size); 539 if (cb != nullptr) break; 540 if (!heap->expand_by(CodeCacheExpansionSize)) { 541 // Save original type for error reporting 542 if (orig_code_blob_type == CodeBlobType::All) { 543 orig_code_blob_type = code_blob_type; 544 } 545 // Expansion failed 546 if (SegmentedCodeCache) { 547 // Fallback solution: Try to store code in another code heap. 548 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 549 CodeBlobType type = code_blob_type; 550 switch (type) { 551 case CodeBlobType::NonNMethod: 552 type = CodeBlobType::MethodNonProfiled; 553 break; 554 case CodeBlobType::MethodNonProfiled: 555 type = CodeBlobType::MethodProfiled; 556 break; 557 case CodeBlobType::MethodProfiled: 558 // Avoid loop if we already tried that code heap 559 if (type == orig_code_blob_type) { 560 type = CodeBlobType::MethodNonProfiled; 561 } 562 break; 563 default: 564 break; 565 } 566 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 567 if (PrintCodeCacheExtension) { 568 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 569 heap->name(), get_code_heap(type)->name()); 570 } 571 return allocate(size, type, handle_alloc_failure, orig_code_blob_type); 572 } 573 } 574 if (handle_alloc_failure) { 575 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 576 CompileBroker::handle_full_code_cache(orig_code_blob_type); 577 } 578 return nullptr; 579 } else { 580 OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage()) 581 } 582 if (PrintCodeCacheExtension) { 583 ResourceMark rm; 584 if (_nmethod_heaps->length() >= 1) { 585 tty->print("%s", heap->name()); 586 } else { 587 tty->print("CodeCache"); 588 } 589 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)", 590 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 591 (address)heap->high() - (address)heap->low_boundary()); 592 } 593 } 594 print_trace("allocation", cb, size); 595 return cb; 596 } 597 598 void CodeCache::free(CodeBlob* cb) { 599 assert_locked_or_safepoint(CodeCache_lock); 600 CodeHeap* heap = get_code_heap(cb); 601 print_trace("free", cb); 602 if (cb->is_nmethod()) { 603 heap->set_nmethod_count(heap->nmethod_count() - 1); 604 if (((nmethod *)cb)->has_dependencies()) { 605 Atomic::dec(&_number_of_nmethods_with_dependencies); 606 } 607 } 608 if (cb->is_adapter_blob()) { 609 heap->set_adapter_count(heap->adapter_count() - 1); 610 } 611 612 cb->~CodeBlob(); 613 // Get heap for given CodeBlob and deallocate 614 heap->deallocate(cb); 615 616 assert(heap->blob_count() >= 0, "sanity check"); 617 } 618 619 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 620 assert_locked_or_safepoint(CodeCache_lock); 621 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 622 print_trace("free_unused_tail", cb); 623 624 // We also have to account for the extra space (i.e. header) used by the CodeBlob 625 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 626 used += CodeBlob::align_code_offset(cb->header_size()); 627 628 // Get heap for given CodeBlob and deallocate its unused tail 629 get_code_heap(cb)->deallocate_tail(cb, used); 630 // Adjust the sizes of the CodeBlob 631 cb->adjust_size(used); 632 } 633 634 void CodeCache::commit(CodeBlob* cb) { 635 // this is called by nmethod::nmethod, which must already own CodeCache_lock 636 assert_locked_or_safepoint(CodeCache_lock); 637 CodeHeap* heap = get_code_heap(cb); 638 if (cb->is_nmethod()) { 639 heap->set_nmethod_count(heap->nmethod_count() + 1); 640 if (((nmethod *)cb)->has_dependencies()) { 641 Atomic::inc(&_number_of_nmethods_with_dependencies); 642 } 643 } 644 if (cb->is_adapter_blob()) { 645 heap->set_adapter_count(heap->adapter_count() + 1); 646 } 647 } 648 649 bool CodeCache::contains(void *p) { 650 // S390 uses contains() in current_frame(), which is used before 651 // code cache initialization if NativeMemoryTracking=detail is set. 652 S390_ONLY(if (_heaps == nullptr) return false;) 653 // It should be ok to call contains without holding a lock. 654 FOR_ALL_HEAPS(heap) { 655 if ((*heap)->contains(p)) { 656 return true; 657 } 658 } 659 return false; 660 } 661 662 bool CodeCache::contains(nmethod *nm) { 663 return contains((void *)nm); 664 } 665 666 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain 667 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 668 CodeBlob* CodeCache::find_blob(void* start) { 669 // NMT can walk the stack before code cache is created 670 if (_heaps != nullptr) { 671 CodeHeap* heap = get_code_heap_containing(start); 672 if (heap != nullptr) { 673 return heap->find_blob(start); 674 } 675 } 676 return nullptr; 677 } 678 679 nmethod* CodeCache::find_nmethod(void* start) { 680 CodeBlob* cb = find_blob(start); 681 assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod"); 682 return (nmethod*)cb; 683 } 684 685 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 686 assert_locked_or_safepoint(CodeCache_lock); 687 FOR_ALL_HEAPS(heap) { 688 FOR_ALL_BLOBS(cb, *heap) { 689 f(cb); 690 } 691 } 692 } 693 694 void CodeCache::nmethods_do(void f(nmethod* nm)) { 695 assert_locked_or_safepoint(CodeCache_lock); 696 NMethodIterator iter(NMethodIterator::all); 697 while(iter.next()) { 698 f(iter.method()); 699 } 700 } 701 702 void CodeCache::nmethods_do(NMethodClosure* cl) { 703 assert_locked_or_safepoint(CodeCache_lock); 704 NMethodIterator iter(NMethodIterator::all); 705 while(iter.next()) { 706 cl->do_nmethod(iter.method()); 707 } 708 } 709 710 void CodeCache::metadata_do(MetadataClosure* f) { 711 assert_locked_or_safepoint(CodeCache_lock); 712 NMethodIterator iter(NMethodIterator::all); 713 while(iter.next()) { 714 iter.method()->metadata_do(f); 715 } 716 } 717 718 // Calculate the number of GCs after which an nmethod is expected to have been 719 // used in order to not be classed as cold. 720 void CodeCache::update_cold_gc_count() { 721 if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) { 722 // No aging 723 return; 724 } 725 726 size_t last_used = _last_unloading_used; 727 double last_time = _last_unloading_time; 728 729 double time = os::elapsedTime(); 730 731 size_t free = unallocated_capacity(); 732 size_t max = max_capacity(); 733 size_t used = max - free; 734 double gc_interval = time - last_time; 735 736 _unloading_threshold_gc_requested = false; 737 _last_unloading_time = time; 738 _last_unloading_used = used; 739 740 if (last_time == 0.0) { 741 // The first GC doesn't have enough information to make good 742 // decisions, so just keep everything afloat 743 log_info(codecache)("Unknown code cache pressure; don't age code"); 744 return; 745 } 746 747 if (gc_interval <= 0.0 || last_used >= used) { 748 // Dodge corner cases where there is no pressure or negative pressure 749 // on the code cache. Just don't unload when this happens. 750 _cold_gc_count = INT_MAX; 751 log_info(codecache)("No code cache pressure; don't age code"); 752 return; 753 } 754 755 double allocation_rate = (used - last_used) / gc_interval; 756 757 _unloading_allocation_rates.add(allocation_rate); 758 _unloading_gc_intervals.add(gc_interval); 759 760 size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max; 761 if (free < aggressive_sweeping_free_threshold) { 762 // We are already in the red zone; be very aggressive to avoid disaster 763 // But not more aggressive than 2. This ensures that an nmethod must 764 // have been unused at least between two GCs to be considered cold still. 765 _cold_gc_count = 2; 766 log_info(codecache)("Code cache critically low; use aggressive aging"); 767 return; 768 } 769 770 // The code cache has an expected time for cold nmethods to "time out" 771 // when they have not been used. The time for nmethods to time out 772 // depends on how long we expect we can keep allocating code until 773 // aggressive sweeping starts, based on sampled allocation rates. 774 double average_gc_interval = _unloading_gc_intervals.avg(); 775 double average_allocation_rate = _unloading_allocation_rates.avg(); 776 double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate; 777 double cold_timeout = time_to_aggressive / NmethodSweepActivity; 778 779 // Convert time to GC cycles, and crop at INT_MAX. The reason for 780 // that is that the _cold_gc_count will be added to an epoch number 781 // and that addition must not overflow, or we can crash the VM. 782 // But not more aggressive than 2. This ensures that an nmethod must 783 // have been unused at least between two GCs to be considered cold still. 784 _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2); 785 786 double used_ratio = double(used) / double(max); 787 double last_used_ratio = double(last_used) / double(max); 788 log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT 789 ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s", 790 average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count, 791 double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval); 792 793 } 794 795 uint64_t CodeCache::cold_gc_count() { 796 return _cold_gc_count; 797 } 798 799 void CodeCache::gc_on_allocation() { 800 if (!is_init_completed()) { 801 // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what 802 return; 803 } 804 805 size_t free = unallocated_capacity(); 806 size_t max = max_capacity(); 807 size_t used = max - free; 808 double free_ratio = double(free) / double(max); 809 if (free_ratio <= StartAggressiveSweepingAt / 100.0) { 810 // In case the GC is concurrent, we make sure only one thread requests the GC. 811 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { 812 log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0); 813 Universe::heap()->collect(GCCause::_codecache_GC_aggressive); 814 } 815 return; 816 } 817 818 size_t last_used = _last_unloading_used; 819 if (last_used >= used) { 820 // No increase since last GC; no need to sweep yet 821 return; 822 } 823 size_t allocated_since_last = used - last_used; 824 double allocated_since_last_ratio = double(allocated_since_last) / double(max); 825 double threshold = SweeperThreshold / 100.0; 826 double used_ratio = double(used) / double(max); 827 double last_used_ratio = double(last_used) / double(max); 828 if (used_ratio > threshold) { 829 // After threshold is reached, scale it by free_ratio so that more aggressive 830 // GC is triggered as we approach code cache exhaustion 831 threshold *= free_ratio; 832 } 833 // If code cache has been allocated without any GC at all, let's make sure 834 // it is eventually invoked to avoid trouble. 835 if (allocated_since_last_ratio > threshold) { 836 // In case the GC is concurrent, we make sure only one thread requests the GC. 837 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { 838 log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)", 839 threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0); 840 Universe::heap()->collect(GCCause::_codecache_GC_threshold); 841 } 842 } 843 } 844 845 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle 846 // subtracts the value by 2, and the type is unsigned. We don't want underflow. 847 // 848 // Odd values mean that marking is in progress, and even values mean that no 849 // marking is currently active. 850 uint64_t CodeCache::_gc_epoch = 2; 851 852 // How many GCs after an nmethod has not been used, do we consider it cold? 853 uint64_t CodeCache::_cold_gc_count = INT_MAX; 854 855 double CodeCache::_last_unloading_time = 0.0; 856 size_t CodeCache::_last_unloading_used = 0; 857 volatile bool CodeCache::_unloading_threshold_gc_requested = false; 858 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */); 859 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */); 860 861 uint64_t CodeCache::gc_epoch() { 862 return _gc_epoch; 863 } 864 865 bool CodeCache::is_gc_marking_cycle_active() { 866 // Odd means that marking is active 867 return (_gc_epoch % 2) == 1; 868 } 869 870 uint64_t CodeCache::previous_completed_gc_marking_cycle() { 871 if (is_gc_marking_cycle_active()) { 872 return _gc_epoch - 2; 873 } else { 874 return _gc_epoch - 1; 875 } 876 } 877 878 void CodeCache::on_gc_marking_cycle_start() { 879 assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended"); 880 ++_gc_epoch; 881 } 882 883 // Once started the code cache marking cycle must only be finished after marking of 884 // the java heap is complete. Otherwise nmethods could appear to be not on stack even 885 // if they have frames in continuation StackChunks that were not yet visited. 886 void CodeCache::on_gc_marking_cycle_finish() { 887 assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished"); 888 ++_gc_epoch; 889 update_cold_gc_count(); 890 } 891 892 void CodeCache::arm_all_nmethods() { 893 BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods(); 894 } 895 896 // Mark nmethods for unloading if they contain otherwise unreachable oops. 897 void CodeCache::do_unloading(bool unloading_occurred) { 898 assert_locked_or_safepoint(CodeCache_lock); 899 NMethodIterator iter(NMethodIterator::all); 900 while(iter.next()) { 901 iter.method()->do_unloading(unloading_occurred); 902 } 903 } 904 905 void CodeCache::verify_clean_inline_caches() { 906 #ifdef ASSERT 907 NMethodIterator iter(NMethodIterator::not_unloading); 908 while(iter.next()) { 909 nmethod* nm = iter.method(); 910 nm->verify_clean_inline_caches(); 911 nm->verify(); 912 } 913 #endif 914 } 915 916 // Defer freeing of concurrently cleaned ExceptionCache entries until 917 // after a global handshake operation. 918 void CodeCache::release_exception_cache(ExceptionCache* entry) { 919 if (SafepointSynchronize::is_at_safepoint()) { 920 delete entry; 921 } else { 922 for (;;) { 923 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); 924 entry->set_purge_list_next(purge_list_head); 925 if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { 926 break; 927 } 928 } 929 } 930 } 931 932 // Delete exception caches that have been concurrently unlinked, 933 // followed by a global handshake operation. 934 void CodeCache::purge_exception_caches() { 935 ExceptionCache* curr = _exception_cache_purge_list; 936 while (curr != nullptr) { 937 ExceptionCache* next = curr->purge_list_next(); 938 delete curr; 939 curr = next; 940 } 941 _exception_cache_purge_list = nullptr; 942 } 943 944 // Restart compiler if possible and required.. 945 void CodeCache::maybe_restart_compiler(size_t freed_memory) { 946 947 // Try to start the compiler again if we freed any memory 948 if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) { 949 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 950 log_info(codecache)("Restarting compiler"); 951 EventJITRestart event; 952 event.set_freedMemory(freed_memory); 953 event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); 954 event.commit(); 955 } 956 } 957 958 uint8_t CodeCache::_unloading_cycle = 1; 959 960 void CodeCache::increment_unloading_cycle() { 961 // 2-bit value (see IsUnloadingState in nmethod.cpp for details) 962 // 0 is reserved for new methods. 963 _unloading_cycle = (_unloading_cycle + 1) % 4; 964 if (_unloading_cycle == 0) { 965 _unloading_cycle = 1; 966 } 967 } 968 969 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive) 970 : _is_unloading_behaviour(is_alive) 971 { 972 _saved_behaviour = IsUnloadingBehaviour::current(); 973 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); 974 increment_unloading_cycle(); 975 DependencyContext::cleaning_start(); 976 } 977 978 CodeCache::UnlinkingScope::~UnlinkingScope() { 979 IsUnloadingBehaviour::set_current(_saved_behaviour); 980 DependencyContext::cleaning_end(); 981 } 982 983 void CodeCache::verify_oops() { 984 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 985 VerifyOopClosure voc; 986 NMethodIterator iter(NMethodIterator::not_unloading); 987 while(iter.next()) { 988 nmethod* nm = iter.method(); 989 nm->oops_do(&voc); 990 nm->verify_oop_relocations(); 991 } 992 } 993 994 int CodeCache::blob_count(CodeBlobType code_blob_type) { 995 CodeHeap* heap = get_code_heap(code_blob_type); 996 return (heap != nullptr) ? heap->blob_count() : 0; 997 } 998 999 int CodeCache::blob_count() { 1000 int count = 0; 1001 FOR_ALL_HEAPS(heap) { 1002 count += (*heap)->blob_count(); 1003 } 1004 return count; 1005 } 1006 1007 int CodeCache::nmethod_count(CodeBlobType code_blob_type) { 1008 CodeHeap* heap = get_code_heap(code_blob_type); 1009 return (heap != nullptr) ? heap->nmethod_count() : 0; 1010 } 1011 1012 int CodeCache::nmethod_count() { 1013 int count = 0; 1014 for (CodeHeap* heap : *_nmethod_heaps) { 1015 count += heap->nmethod_count(); 1016 } 1017 return count; 1018 } 1019 1020 int CodeCache::adapter_count(CodeBlobType code_blob_type) { 1021 CodeHeap* heap = get_code_heap(code_blob_type); 1022 return (heap != nullptr) ? heap->adapter_count() : 0; 1023 } 1024 1025 int CodeCache::adapter_count() { 1026 int count = 0; 1027 FOR_ALL_HEAPS(heap) { 1028 count += (*heap)->adapter_count(); 1029 } 1030 return count; 1031 } 1032 1033 address CodeCache::low_bound(CodeBlobType code_blob_type) { 1034 CodeHeap* heap = get_code_heap(code_blob_type); 1035 return (heap != nullptr) ? (address)heap->low_boundary() : nullptr; 1036 } 1037 1038 address CodeCache::high_bound(CodeBlobType code_blob_type) { 1039 CodeHeap* heap = get_code_heap(code_blob_type); 1040 return (heap != nullptr) ? (address)heap->high_boundary() : nullptr; 1041 } 1042 1043 size_t CodeCache::capacity() { 1044 size_t cap = 0; 1045 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1046 cap += (*heap)->capacity(); 1047 } 1048 return cap; 1049 } 1050 1051 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) { 1052 CodeHeap* heap = get_code_heap(code_blob_type); 1053 return (heap != nullptr) ? heap->unallocated_capacity() : 0; 1054 } 1055 1056 size_t CodeCache::unallocated_capacity() { 1057 size_t unallocated_cap = 0; 1058 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1059 unallocated_cap += (*heap)->unallocated_capacity(); 1060 } 1061 return unallocated_cap; 1062 } 1063 1064 size_t CodeCache::max_capacity() { 1065 size_t max_cap = 0; 1066 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1067 max_cap += (*heap)->max_capacity(); 1068 } 1069 return max_cap; 1070 } 1071 1072 bool CodeCache::is_non_nmethod(address addr) { 1073 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod); 1074 return blob->contains(addr); 1075 } 1076 1077 size_t CodeCache::max_distance_to_non_nmethod() { 1078 if (!SegmentedCodeCache) { 1079 return ReservedCodeCacheSize; 1080 } else { 1081 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod); 1082 // the max distance is minimized by placing the NonNMethod segment 1083 // in between MethodProfiled and MethodNonProfiled segments 1084 size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound; 1085 size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary(); 1086 return dist1 > dist2 ? dist1 : dist2; 1087 } 1088 } 1089 1090 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 1091 // is free, reverse_free_ratio() returns 4. 1092 // Since code heap for each type of code blobs falls forward to the next 1093 // type of code heap, return the reverse free ratio for the entire 1094 // code cache. 1095 double CodeCache::reverse_free_ratio() { 1096 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0; 1097 double max = (double)max_capacity(); 1098 double result = max / unallocated; 1099 assert (max >= unallocated, "Must be"); 1100 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1101 return result; 1102 } 1103 1104 size_t CodeCache::bytes_allocated_in_freelists() { 1105 size_t allocated_bytes = 0; 1106 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1107 allocated_bytes += (*heap)->allocated_in_freelist(); 1108 } 1109 return allocated_bytes; 1110 } 1111 1112 int CodeCache::allocated_segments() { 1113 int number_of_segments = 0; 1114 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1115 number_of_segments += (*heap)->allocated_segments(); 1116 } 1117 return number_of_segments; 1118 } 1119 1120 size_t CodeCache::freelists_length() { 1121 size_t length = 0; 1122 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1123 length += (*heap)->freelist_length(); 1124 } 1125 return length; 1126 } 1127 1128 void icache_init(); 1129 1130 void CodeCache::initialize() { 1131 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1132 #ifdef COMPILER2 1133 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1134 #endif 1135 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1136 // This was originally just a check of the alignment, causing failure, instead, round 1137 // the code cache to the page size. In particular, Solaris is moving to a larger 1138 // default page size. 1139 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1140 1141 if (SegmentedCodeCache) { 1142 // Use multiple code heaps 1143 initialize_heaps(); 1144 } else { 1145 // Use a single code heap 1146 FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size()); 1147 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); 1148 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0); 1149 1150 // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely 1151 // users want to use the largest available page. 1152 const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8; 1153 ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages)); 1154 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory. 1155 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size()); 1156 add_heap(rs, "CodeCache", CodeBlobType::All); 1157 } 1158 1159 // Initialize ICache flush mechanism 1160 // This service is needed for os::register_code_area 1161 icache_init(); 1162 1163 // Give OS a chance to register generated code area. 1164 // This is used on Windows 64 bit platforms to register 1165 // Structured Exception Handlers for our generated code. 1166 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1167 } 1168 1169 void codeCache_init() { 1170 CodeCache::initialize(); 1171 } 1172 1173 //------------------------------------------------------------------------------------------------ 1174 1175 bool CodeCache::has_nmethods_with_dependencies() { 1176 return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0; 1177 } 1178 1179 void CodeCache::clear_inline_caches() { 1180 assert_locked_or_safepoint(CodeCache_lock); 1181 NMethodIterator iter(NMethodIterator::not_unloading); 1182 while(iter.next()) { 1183 iter.method()->clear_inline_caches(); 1184 } 1185 } 1186 1187 // Only used by whitebox API 1188 void CodeCache::cleanup_inline_caches_whitebox() { 1189 assert_locked_or_safepoint(CodeCache_lock); 1190 NMethodIterator iter(NMethodIterator::not_unloading); 1191 while(iter.next()) { 1192 iter.method()->cleanup_inline_caches_whitebox(); 1193 } 1194 } 1195 1196 // Keeps track of time spent for checking dependencies 1197 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1198 1199 #ifndef PRODUCT 1200 // Check if any of live methods dependencies have been invalidated. 1201 // (this is expensive!) 1202 static void check_live_nmethods_dependencies(DepChange& changes) { 1203 // Checked dependencies are allocated into this ResourceMark 1204 ResourceMark rm; 1205 1206 // Turn off dependency tracing while actually testing dependencies. 1207 FlagSetting fs(Dependencies::_verify_in_progress, true); 1208 1209 typedef ResourceHashtable<DependencySignature, int, 11027, 1210 AnyObj::RESOURCE_AREA, mtInternal, 1211 &DependencySignature::hash, 1212 &DependencySignature::equals> DepTable; 1213 1214 DepTable* table = new DepTable(); 1215 1216 // Iterate over live nmethods and check dependencies of all nmethods that are not 1217 // marked for deoptimization. A particular dependency is only checked once. 1218 NMethodIterator iter(NMethodIterator::not_unloading); 1219 while(iter.next()) { 1220 nmethod* nm = iter.method(); 1221 // Only notify for live nmethods 1222 if (!nm->is_marked_for_deoptimization()) { 1223 for (Dependencies::DepStream deps(nm); deps.next(); ) { 1224 // Construct abstraction of a dependency. 1225 DependencySignature* current_sig = new DependencySignature(deps); 1226 1227 // Determine if dependency is already checked. table->put(...) returns 1228 // 'true' if the dependency is added (i.e., was not in the hashtable). 1229 if (table->put(*current_sig, 1)) { 1230 Klass* witness = deps.check_dependency(); 1231 if (witness != nullptr) { 1232 // Dependency checking failed. Print out information about the failed 1233 // dependency and finally fail with an assert. We can fail here, since 1234 // dependency checking is never done in a product build. 1235 deps.print_dependency(tty, witness, true); 1236 changes.print(); 1237 nm->print(); 1238 nm->print_dependencies_on(tty); 1239 assert(false, "Should have been marked for deoptimization"); 1240 } 1241 } 1242 } 1243 } 1244 } 1245 } 1246 #endif 1247 1248 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) { 1249 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1250 1251 // search the hierarchy looking for nmethods which are affected by the loading of this class 1252 1253 // then search the interfaces this class implements looking for nmethods 1254 // which might be dependent of the fact that an interface only had one 1255 // implementor. 1256 // nmethod::check_all_dependencies works only correctly, if no safepoint 1257 // can happen 1258 NoSafepointVerifier nsv; 1259 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1260 InstanceKlass* d = str.klass(); 1261 { 1262 LogStreamHandle(Trace, dependencies) log; 1263 if (log.is_enabled()) { 1264 log.print("Processing context "); 1265 d->name()->print_value_on(&log); 1266 } 1267 } 1268 d->mark_dependent_nmethods(deopt_scope, changes); 1269 } 1270 1271 #ifndef PRODUCT 1272 if (VerifyDependencies) { 1273 // Object pointers are used as unique identifiers for dependency arguments. This 1274 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1275 dependentCheckTime.start(); 1276 check_live_nmethods_dependencies(changes); 1277 dependentCheckTime.stop(); 1278 } 1279 #endif 1280 } 1281 1282 #if INCLUDE_JVMTI 1283 // RedefineClasses support for saving nmethods that are dependent on "old" methods. 1284 // We don't really expect this table to grow very large. If it does, it can become a hashtable. 1285 static GrowableArray<nmethod*>* old_nmethod_table = nullptr; 1286 1287 static void add_to_old_table(nmethod* c) { 1288 if (old_nmethod_table == nullptr) { 1289 old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode); 1290 } 1291 old_nmethod_table->push(c); 1292 } 1293 1294 static void reset_old_method_table() { 1295 if (old_nmethod_table != nullptr) { 1296 delete old_nmethod_table; 1297 old_nmethod_table = nullptr; 1298 } 1299 } 1300 1301 // Remove this method when flushed. 1302 void CodeCache::unregister_old_nmethod(nmethod* c) { 1303 assert_lock_strong(CodeCache_lock); 1304 if (old_nmethod_table != nullptr) { 1305 int index = old_nmethod_table->find(c); 1306 if (index != -1) { 1307 old_nmethod_table->delete_at(index); 1308 } 1309 } 1310 } 1311 1312 void CodeCache::old_nmethods_do(MetadataClosure* f) { 1313 // Walk old method table and mark those on stack. 1314 int length = 0; 1315 if (old_nmethod_table != nullptr) { 1316 length = old_nmethod_table->length(); 1317 for (int i = 0; i < length; i++) { 1318 // Walk all methods saved on the last pass. Concurrent class unloading may 1319 // also be looking at this method's metadata, so don't delete it yet if 1320 // it is marked as unloaded. 1321 old_nmethod_table->at(i)->metadata_do(f); 1322 } 1323 } 1324 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); 1325 } 1326 1327 // Walk compiled methods and mark dependent methods for deoptimization. 1328 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { 1329 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1330 // Each redefinition creates a new set of nmethods that have references to "old" Methods 1331 // So delete old method table and create a new one. 1332 reset_old_method_table(); 1333 1334 NMethodIterator iter(NMethodIterator::all); 1335 while(iter.next()) { 1336 nmethod* nm = iter.method(); 1337 // Walk all alive nmethods to check for old Methods. 1338 // This includes methods whose inline caches point to old methods, so 1339 // inline cache clearing is unnecessary. 1340 if (nm->has_evol_metadata()) { 1341 deopt_scope->mark(nm); 1342 add_to_old_table(nm); 1343 } 1344 } 1345 } 1346 1347 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { 1348 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1349 NMethodIterator iter(NMethodIterator::all); 1350 while(iter.next()) { 1351 nmethod* nm = iter.method(); 1352 if (!nm->method()->is_method_handle_intrinsic()) { 1353 if (nm->can_be_deoptimized()) { 1354 deopt_scope->mark(nm); 1355 } 1356 if (nm->has_evol_metadata()) { 1357 add_to_old_table(nm); 1358 } 1359 } 1360 } 1361 } 1362 1363 #endif // INCLUDE_JVMTI 1364 1365 // Mark methods for deopt (if safe or possible). 1366 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) { 1367 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1368 NMethodIterator iter(NMethodIterator::not_unloading); 1369 while(iter.next()) { 1370 nmethod* nm = iter.method(); 1371 if (!nm->is_native_method()) { 1372 deopt_scope->mark(nm); 1373 } 1374 } 1375 } 1376 1377 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) { 1378 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1379 1380 NMethodIterator iter(NMethodIterator::not_unloading); 1381 while(iter.next()) { 1382 nmethod* nm = iter.method(); 1383 if (nm->is_dependent_on_method(dependee)) { 1384 deopt_scope->mark(nm); 1385 } 1386 } 1387 } 1388 1389 void CodeCache::make_marked_nmethods_deoptimized() { 1390 RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading); 1391 while(iter.next()) { 1392 nmethod* nm = iter.method(); 1393 if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { 1394 nm->make_not_entrant("marked for deoptimization"); 1395 nm->make_deoptimized(); 1396 } 1397 } 1398 } 1399 1400 // Marks compiled methods dependent on dependee. 1401 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) { 1402 assert_lock_strong(Compile_lock); 1403 1404 if (!has_nmethods_with_dependencies()) { 1405 return; 1406 } 1407 1408 if (dependee->is_linked()) { 1409 // Class initialization state change. 1410 KlassInitDepChange changes(dependee); 1411 mark_for_deoptimization(deopt_scope, changes); 1412 } else { 1413 // New class is loaded. 1414 NewKlassDepChange changes(dependee); 1415 mark_for_deoptimization(deopt_scope, changes); 1416 } 1417 } 1418 1419 // Marks compiled methods dependent on dependee 1420 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) { 1421 assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 1422 1423 DeoptimizationScope deopt_scope; 1424 // Compute the dependent nmethods 1425 mark_for_deoptimization(&deopt_scope, m_h()); 1426 deopt_scope.deoptimize_marked(); 1427 } 1428 1429 void CodeCache::verify() { 1430 assert_locked_or_safepoint(CodeCache_lock); 1431 FOR_ALL_HEAPS(heap) { 1432 (*heap)->verify(); 1433 FOR_ALL_BLOBS(cb, *heap) { 1434 cb->verify(); 1435 } 1436 } 1437 } 1438 1439 // A CodeHeap is full. Print out warning and report event. 1440 PRAGMA_DIAG_PUSH 1441 PRAGMA_FORMAT_NONLITERAL_IGNORED 1442 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) { 1443 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1444 CodeHeap* heap = get_code_heap(code_blob_type); 1445 assert(heap != nullptr, "heap is null"); 1446 1447 int full_count = heap->report_full(); 1448 1449 if ((full_count == 1) || print) { 1450 // Not yet reported for this heap, report 1451 if (SegmentedCodeCache) { 1452 ResourceMark rm; 1453 stringStream msg1_stream, msg2_stream; 1454 msg1_stream.print("%s is full. Compiler has been disabled.", 1455 get_code_heap_name(code_blob_type)); 1456 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1457 get_code_heap_flag_name(code_blob_type)); 1458 const char *msg1 = msg1_stream.as_string(); 1459 const char *msg2 = msg2_stream.as_string(); 1460 1461 log_warning(codecache)("%s", msg1); 1462 log_warning(codecache)("%s", msg2); 1463 warning("%s", msg1); 1464 warning("%s", msg2); 1465 } else { 1466 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1467 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1468 1469 log_warning(codecache)("%s", msg1); 1470 log_warning(codecache)("%s", msg2); 1471 warning("%s", msg1); 1472 warning("%s", msg2); 1473 } 1474 stringStream s; 1475 // Dump code cache into a buffer before locking the tty. 1476 { 1477 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1478 print_summary(&s); 1479 } 1480 { 1481 ttyLocker ttyl; 1482 tty->print("%s", s.freeze()); 1483 } 1484 1485 if (full_count == 1) { 1486 if (PrintCodeHeapAnalytics) { 1487 CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot! 1488 } 1489 } 1490 } 1491 1492 EventCodeCacheFull event; 1493 if (event.should_commit()) { 1494 event.set_codeBlobType((u1)code_blob_type); 1495 event.set_startAddress((u8)heap->low_boundary()); 1496 event.set_commitedTopAddress((u8)heap->high()); 1497 event.set_reservedTopAddress((u8)heap->high_boundary()); 1498 event.set_entryCount(heap->blob_count()); 1499 event.set_methodCount(heap->nmethod_count()); 1500 event.set_adaptorCount(heap->adapter_count()); 1501 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1502 event.set_fullCount(heap->full_count()); 1503 event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); 1504 event.commit(); 1505 } 1506 } 1507 PRAGMA_DIAG_POP 1508 1509 void CodeCache::print_memory_overhead() { 1510 size_t wasted_bytes = 0; 1511 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1512 CodeHeap* curr_heap = *heap; 1513 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) { 1514 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1515 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1516 } 1517 } 1518 // Print bytes that are allocated in the freelist 1519 ttyLocker ttl; 1520 tty->print_cr("Number of elements in freelist: %zd", freelists_length()); 1521 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K); 1522 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K)); 1523 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment 1524 } 1525 1526 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) { 1527 if (total > 0) { 1528 double ratio = (100.0 * used) / total; 1529 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio); 1530 } 1531 } 1532 1533 void CodeCache::print_nmethod_statistics_on(outputStream* st) { 1534 int stats [2][6][3][2] = {0}; 1535 int stats_used[2][6][3][2] = {0}; 1536 1537 int total_osr = 0; 1538 int total_entrant = 0; 1539 int total_non_entrant = 0; 1540 int total_other = 0; 1541 int total_used = 0; 1542 1543 NMethodIterator iter(NMethodIterator::all); 1544 while (iter.next()) { 1545 nmethod* nm = iter.method(); 1546 if (nm->is_in_use()) { 1547 ++total_entrant; 1548 } else if (nm->is_not_entrant()) { 1549 ++total_non_entrant; 1550 } else { 1551 ++total_other; 1552 } 1553 if (nm->is_osr_method()) { 1554 ++total_osr; 1555 } 1556 if (nm->used()) { 1557 ++total_used; 1558 } 1559 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, ""); 1560 1561 int idx1 = nm->is_aot() ? 1 : 0; 1562 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0); 1563 int idx3 = (nm->is_in_use() ? 0 : 1564 (nm->is_not_entrant() ? 1 : 1565 2)); 1566 int idx4 = (nm->is_osr_method() ? 1 : 0); 1567 stats[idx1][idx2][idx3][idx4] += 1; 1568 if (nm->used()) { 1569 stats_used[idx1][idx2][idx3][idx4] += 1; 1570 } 1571 } 1572 1573 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ", 1574 total_entrant + total_non_entrant + total_other, 1575 total_entrant, total_non_entrant, total_osr); 1576 if (total_other > 0) { 1577 st->print("; %d other", total_other); 1578 } 1579 st->print_cr(")"); 1580 1581 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) { 1582 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0]; 1583 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1]; 1584 if (total_normal + total_osr > 0) { 1585 st->print(" Tier%d:", i); 1586 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]); 1587 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]); 1588 st->cr(); 1589 } 1590 } 1591 st->cr(); 1592 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) { 1593 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0]; 1594 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1]; 1595 assert(total_osr == 0, "sanity"); 1596 if (total_normal + total_osr > 0) { 1597 st->print(" AOT Code T%d:", i); 1598 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]); 1599 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]); 1600 st->cr(); 1601 } 1602 } 1603 } 1604 1605 //------------------------------------------------------------------------------------------------ 1606 // Non-product version 1607 1608 #ifndef PRODUCT 1609 1610 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) { 1611 if (PrintCodeCache2) { // Need to add a new flag 1612 ResourceMark rm; 1613 if (size == 0) { 1614 int s = cb->size(); 1615 assert(s >= 0, "CodeBlob size is negative: %d", s); 1616 size = (uint) s; 1617 } 1618 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1619 } 1620 } 1621 1622 void CodeCache::print_internals() { 1623 int nmethodCount = 0; 1624 int runtimeStubCount = 0; 1625 int upcallStubCount = 0; 1626 int adapterCount = 0; 1627 int mhAdapterCount = 0; 1628 int vtableBlobCount = 0; 1629 int deoptimizationStubCount = 0; 1630 int uncommonTrapStubCount = 0; 1631 int exceptionStubCount = 0; 1632 int safepointStubCount = 0; 1633 int bufferBlobCount = 0; 1634 int total = 0; 1635 int nmethodNotEntrant = 0; 1636 int nmethodJava = 0; 1637 int nmethodNative = 0; 1638 int max_nm_size = 0; 1639 ResourceMark rm; 1640 1641 int i = 0; 1642 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1643 int heap_total = 0; 1644 tty->print_cr("-- %s --", (*heap)->name()); 1645 FOR_ALL_BLOBS(cb, *heap) { 1646 total++; 1647 heap_total++; 1648 if (cb->is_nmethod()) { 1649 nmethod* nm = (nmethod*)cb; 1650 1651 tty->print("%4d: ", heap_total); 1652 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true); 1653 1654 nmethodCount++; 1655 1656 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1657 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; } 1658 1659 if(nm->method() != nullptr && nm->is_java_method()) { 1660 nmethodJava++; 1661 max_nm_size = MAX2(max_nm_size, nm->size()); 1662 } 1663 } else if (cb->is_runtime_stub()) { 1664 runtimeStubCount++; 1665 } else if (cb->is_upcall_stub()) { 1666 upcallStubCount++; 1667 } else if (cb->is_deoptimization_stub()) { 1668 deoptimizationStubCount++; 1669 } else if (cb->is_uncommon_trap_stub()) { 1670 uncommonTrapStubCount++; 1671 } else if (cb->is_exception_stub()) { 1672 exceptionStubCount++; 1673 } else if (cb->is_safepoint_stub()) { 1674 safepointStubCount++; 1675 } else if (cb->is_adapter_blob()) { 1676 adapterCount++; 1677 } else if (cb->is_method_handles_adapter_blob()) { 1678 mhAdapterCount++; 1679 } else if (cb->is_vtable_blob()) { 1680 vtableBlobCount++; 1681 } else if (cb->is_buffer_blob()) { 1682 bufferBlobCount++; 1683 } 1684 } 1685 } 1686 1687 int bucketSize = 512; 1688 int bucketLimit = max_nm_size / bucketSize + 1; 1689 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1690 memset(buckets, 0, sizeof(int) * bucketLimit); 1691 1692 NMethodIterator iter(NMethodIterator::all); 1693 while(iter.next()) { 1694 nmethod* nm = iter.method(); 1695 if(nm->method() != nullptr && nm->is_java_method()) { 1696 buckets[nm->size() / bucketSize]++; 1697 } 1698 } 1699 1700 tty->print_cr("Code Cache Entries (total of %d)",total); 1701 tty->print_cr("-------------------------------------------------"); 1702 tty->print_cr("nmethods: %d",nmethodCount); 1703 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1704 tty->print_cr("\tjava: %d",nmethodJava); 1705 tty->print_cr("\tnative: %d",nmethodNative); 1706 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1707 tty->print_cr("upcall_stubs: %d",upcallStubCount); 1708 tty->print_cr("adapters: %d",adapterCount); 1709 tty->print_cr("MH adapters: %d",mhAdapterCount); 1710 tty->print_cr("VTables: %d",vtableBlobCount); 1711 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1712 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1713 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1714 tty->print_cr("exception_stubs: %d",exceptionStubCount); 1715 tty->print_cr("safepoint_stubs: %d",safepointStubCount); 1716 tty->print_cr("\nnmethod size distribution"); 1717 tty->print_cr("-------------------------------------------------"); 1718 1719 for(int i=0; i<bucketLimit; i++) { 1720 if(buckets[i] != 0) { 1721 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1722 tty->fill_to(40); 1723 tty->print_cr("%d",buckets[i]); 1724 } 1725 } 1726 1727 FREE_C_HEAP_ARRAY(int, buckets); 1728 print_memory_overhead(); 1729 } 1730 1731 #endif // !PRODUCT 1732 1733 void CodeCache::print() { 1734 print_summary(tty); 1735 1736 #ifndef PRODUCT 1737 if (!Verbose) return; 1738 1739 CodeBlob_sizes live[CompLevel_full_optimization + 1]; 1740 CodeBlob_sizes runtimeStub; 1741 CodeBlob_sizes upcallStub; 1742 CodeBlob_sizes uncommonTrapStub; 1743 CodeBlob_sizes deoptimizationStub; 1744 CodeBlob_sizes exceptionStub; 1745 CodeBlob_sizes safepointStub; 1746 CodeBlob_sizes adapter; 1747 CodeBlob_sizes mhAdapter; 1748 CodeBlob_sizes vtableBlob; 1749 CodeBlob_sizes bufferBlob; 1750 CodeBlob_sizes other; 1751 1752 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1753 FOR_ALL_BLOBS(cb, *heap) { 1754 if (cb->is_nmethod()) { 1755 const int level = cb->as_nmethod()->comp_level(); 1756 assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level"); 1757 live[level].add(cb); 1758 } else if (cb->is_runtime_stub()) { 1759 runtimeStub.add(cb); 1760 } else if (cb->is_upcall_stub()) { 1761 upcallStub.add(cb); 1762 } else if (cb->is_deoptimization_stub()) { 1763 deoptimizationStub.add(cb); 1764 } else if (cb->is_uncommon_trap_stub()) { 1765 uncommonTrapStub.add(cb); 1766 } else if (cb->is_exception_stub()) { 1767 exceptionStub.add(cb); 1768 } else if (cb->is_safepoint_stub()) { 1769 safepointStub.add(cb); 1770 } else if (cb->is_adapter_blob()) { 1771 adapter.add(cb); 1772 } else if (cb->is_method_handles_adapter_blob()) { 1773 mhAdapter.add(cb); 1774 } else if (cb->is_vtable_blob()) { 1775 vtableBlob.add(cb); 1776 } else if (cb->is_buffer_blob()) { 1777 bufferBlob.add(cb); 1778 } else { 1779 other.add(cb); 1780 } 1781 } 1782 } 1783 1784 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1785 1786 tty->print_cr("nmethod blobs per compilation level:"); 1787 for (int i = 0; i <= CompLevel_full_optimization; i++) { 1788 const char *level_name; 1789 switch (i) { 1790 case CompLevel_none: level_name = "none"; break; 1791 case CompLevel_simple: level_name = "simple"; break; 1792 case CompLevel_limited_profile: level_name = "limited profile"; break; 1793 case CompLevel_full_profile: level_name = "full profile"; break; 1794 case CompLevel_full_optimization: level_name = "full optimization"; break; 1795 default: assert(false, "invalid compilation level"); 1796 } 1797 tty->print_cr("%s:", level_name); 1798 live[i].print("live"); 1799 } 1800 1801 struct { 1802 const char* name; 1803 const CodeBlob_sizes* sizes; 1804 } non_nmethod_blobs[] = { 1805 { "runtime", &runtimeStub }, 1806 { "upcall", &upcallStub }, 1807 { "uncommon trap", &uncommonTrapStub }, 1808 { "deoptimization", &deoptimizationStub }, 1809 { "exception", &exceptionStub }, 1810 { "safepoint", &safepointStub }, 1811 { "adapter", &adapter }, 1812 { "mh_adapter", &mhAdapter }, 1813 { "vtable", &vtableBlob }, 1814 { "buffer blob", &bufferBlob }, 1815 { "other", &other }, 1816 }; 1817 tty->print_cr("Non-nmethod blobs:"); 1818 for (auto& blob: non_nmethod_blobs) { 1819 blob.sizes->print(blob.name); 1820 } 1821 1822 if (WizardMode) { 1823 // print the oop_map usage 1824 int code_size = 0; 1825 int number_of_blobs = 0; 1826 int number_of_oop_maps = 0; 1827 int map_size = 0; 1828 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1829 FOR_ALL_BLOBS(cb, *heap) { 1830 number_of_blobs++; 1831 code_size += cb->code_size(); 1832 ImmutableOopMapSet* set = cb->oop_maps(); 1833 if (set != nullptr) { 1834 number_of_oop_maps += set->count(); 1835 map_size += set->nr_of_bytes(); 1836 } 1837 } 1838 } 1839 tty->print_cr("OopMaps"); 1840 tty->print_cr(" #blobs = %d", number_of_blobs); 1841 tty->print_cr(" code size = %d", code_size); 1842 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1843 tty->print_cr(" map size = %d", map_size); 1844 } 1845 1846 #endif // !PRODUCT 1847 } 1848 1849 void CodeCache::print_nmethods_on(outputStream* st) { 1850 ResourceMark rm; 1851 int i = 0; 1852 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1853 st->print_cr("-- %s --", (*heap)->name()); 1854 FOR_ALL_BLOBS(cb, *heap) { 1855 i++; 1856 if (cb->is_nmethod()) { 1857 nmethod* nm = (nmethod*)cb; 1858 st->print("%4d: ", i); 1859 CompileTask::print(st, nm, nullptr, true, false); 1860 1861 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' '); 1862 st->print_cr(" %c", non_entrant_char); 1863 } 1864 } 1865 } 1866 } 1867 1868 void CodeCache::print_summary(outputStream* st, bool detailed) { 1869 int full_count = 0; 1870 julong total_used = 0; 1871 julong total_max_used = 0; 1872 julong total_free = 0; 1873 julong total_size = 0; 1874 FOR_ALL_HEAPS(heap_iterator) { 1875 CodeHeap* heap = (*heap_iterator); 1876 size_t total = (heap->high_boundary() - heap->low_boundary()); 1877 if (_heaps->length() >= 1) { 1878 st->print("%s:", heap->name()); 1879 } else { 1880 st->print("CodeCache:"); 1881 } 1882 size_t size = total/K; 1883 size_t used = (total - heap->unallocated_capacity())/K; 1884 size_t max_used = heap->max_allocated_capacity()/K; 1885 size_t free = heap->unallocated_capacity()/K; 1886 total_size += size; 1887 total_used += used; 1888 total_max_used += max_used; 1889 total_free += free; 1890 st->print_cr(" size=%zuKb used=%zu" 1891 "Kb max_used=%zuKb free=%zuKb", 1892 size, used, max_used, free); 1893 1894 if (detailed) { 1895 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1896 p2i(heap->low_boundary()), 1897 p2i(heap->high()), 1898 p2i(heap->high_boundary())); 1899 1900 full_count += get_codemem_full_count(heap->code_blob_type()); 1901 } 1902 } 1903 1904 if (detailed) { 1905 if (SegmentedCodeCache) { 1906 st->print("CodeCache:"); 1907 st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT 1908 "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb", 1909 total_size, total_used, total_max_used, total_free); 1910 } 1911 st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT 1912 ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT, 1913 blob_count(), nmethod_count(), adapter_count(), full_count); 1914 st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d", 1915 CompileBroker::should_compile_new_jobs() ? 1916 "enabled" : Arguments::mode() == Arguments::_int ? 1917 "disabled (interpreter mode)" : 1918 "disabled (not enough contiguous free space left)", 1919 CompileBroker::get_total_compiler_stopped_count(), 1920 CompileBroker::get_total_compiler_restarted_count()); 1921 } 1922 } 1923 1924 void CodeCache::print_codelist(outputStream* st) { 1925 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1926 1927 NMethodIterator iter(NMethodIterator::not_unloading); 1928 while (iter.next()) { 1929 nmethod* nm = iter.method(); 1930 ResourceMark rm; 1931 char* method_name = nm->method()->name_and_sig_as_C_string(); 1932 const char* jvmci_name = nullptr; 1933 #if INCLUDE_JVMCI 1934 jvmci_name = nm->jvmci_name(); 1935 #endif 1936 st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1937 nm->compile_id(), nm->comp_level(), nm->get_state(), 1938 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "", 1939 (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1940 } 1941 } 1942 1943 void CodeCache::print_layout(outputStream* st) { 1944 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1945 ResourceMark rm; 1946 print_summary(st, true); 1947 } 1948 1949 void CodeCache::log_state(outputStream* st) { 1950 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1951 " adapters='" UINT32_FORMAT "' free_code_cache='%zu'", 1952 blob_count(), nmethod_count(), adapter_count(), 1953 unallocated_capacity()); 1954 } 1955 1956 #ifdef LINUX 1957 void CodeCache::write_perf_map(const char* filename, outputStream* st) { 1958 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1959 char fname[JVM_MAXPATHLEN]; 1960 if (filename == nullptr) { 1961 // Invocation outside of jcmd requires pid substitution. 1962 if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME, 1963 strlen(DEFAULT_PERFMAP_FILENAME), 1964 fname, JVM_MAXPATHLEN)) { 1965 st->print_cr("Warning: Not writing perf map as pid substitution failed."); 1966 return; 1967 } 1968 filename = fname; 1969 } 1970 fileStream fs(filename, "w"); 1971 if (!fs.is_open()) { 1972 st->print_cr("Warning: Failed to create %s for perf map", filename); 1973 return; 1974 } 1975 1976 AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading); 1977 while (iter.next()) { 1978 CodeBlob *cb = iter.method(); 1979 ResourceMark rm; 1980 const char* method_name = nullptr; 1981 const char* jvmci_name = nullptr; 1982 if (cb->is_nmethod()) { 1983 nmethod* nm = cb->as_nmethod(); 1984 method_name = nm->method()->external_name(); 1985 #if INCLUDE_JVMCI 1986 jvmci_name = nm->jvmci_name(); 1987 #endif 1988 } else { 1989 method_name = cb->name(); 1990 } 1991 fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s", 1992 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(), 1993 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : ""); 1994 } 1995 } 1996 #endif // LINUX 1997 1998 //---< BEGIN >--- CodeHeap State Analytics. 1999 2000 void CodeCache::aggregate(outputStream *out, size_t granularity) { 2001 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2002 CodeHeapState::aggregate(out, (*heap), granularity); 2003 } 2004 } 2005 2006 void CodeCache::discard(outputStream *out) { 2007 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2008 CodeHeapState::discard(out, (*heap)); 2009 } 2010 } 2011 2012 void CodeCache::print_usedSpace(outputStream *out) { 2013 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2014 CodeHeapState::print_usedSpace(out, (*heap)); 2015 } 2016 } 2017 2018 void CodeCache::print_freeSpace(outputStream *out) { 2019 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2020 CodeHeapState::print_freeSpace(out, (*heap)); 2021 } 2022 } 2023 2024 void CodeCache::print_count(outputStream *out) { 2025 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2026 CodeHeapState::print_count(out, (*heap)); 2027 } 2028 } 2029 2030 void CodeCache::print_space(outputStream *out) { 2031 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2032 CodeHeapState::print_space(out, (*heap)); 2033 } 2034 } 2035 2036 void CodeCache::print_age(outputStream *out) { 2037 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2038 CodeHeapState::print_age(out, (*heap)); 2039 } 2040 } 2041 2042 void CodeCache::print_names(outputStream *out) { 2043 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2044 CodeHeapState::print_names(out, (*heap)); 2045 } 2046 } 2047 //---< END >--- CodeHeap State Analytics.