1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/codeHeapState.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/dependencyContext.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compilationPolicy.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/compilerDefinitions.inline.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/barrierSetNMethod.hpp" 39 #include "gc/shared/classUnloadingContext.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "jfr/jfrEvents.hpp" 42 #include "jvm_io.h" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/allocation.inline.hpp" 46 #include "memory/iterator.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "oops/method.inline.hpp" 50 #include "oops/objArrayOop.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "oops/verifyOopClosure.hpp" 53 #include "runtime/arguments.hpp" 54 #include "runtime/atomic.hpp" 55 #include "runtime/deoptimization.hpp" 56 #include "runtime/globals_extension.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/icache.hpp" 59 #include "runtime/init.hpp" 60 #include "runtime/java.hpp" 61 #include "runtime/mutexLocker.hpp" 62 #include "runtime/os.inline.hpp" 63 #include "runtime/safepointVerifiers.hpp" 64 #include "runtime/vmThread.hpp" 65 #include "sanitizers/leak.hpp" 66 #include "services/memoryService.hpp" 67 #include "utilities/align.hpp" 68 #include "utilities/vmError.hpp" 69 #include "utilities/xmlstream.hpp" 70 #ifdef COMPILER1 71 #include "c1/c1_Compilation.hpp" 72 #include "c1/c1_Compiler.hpp" 73 #endif 74 #ifdef COMPILER2 75 #include "opto/c2compiler.hpp" 76 #include "opto/compile.hpp" 77 #include "opto/node.hpp" 78 #endif 79 80 // Helper class for printing in CodeCache 81 class CodeBlob_sizes { 82 private: 83 int count; 84 int total_size; 85 int header_size; 86 int code_size; 87 int stub_size; 88 int relocation_size; 89 int scopes_oop_size; 90 int scopes_metadata_size; 91 int scopes_data_size; 92 int scopes_pcs_size; 93 94 public: 95 CodeBlob_sizes() { 96 count = 0; 97 total_size = 0; 98 header_size = 0; 99 code_size = 0; 100 stub_size = 0; 101 relocation_size = 0; 102 scopes_oop_size = 0; 103 scopes_metadata_size = 0; 104 scopes_data_size = 0; 105 scopes_pcs_size = 0; 106 } 107 108 int total() const { return total_size; } 109 bool is_empty() const { return count == 0; } 110 111 void print(const char* title) const { 112 if (is_empty()) { 113 tty->print_cr(" #%d %s = %dK", 114 count, 115 title, 116 total() / (int)K); 117 } else { 118 tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])", 119 count, 120 title, 121 total() / (int)K, 122 header_size / (int)K, 123 header_size * 100 / total_size, 124 relocation_size / (int)K, 125 relocation_size * 100 / total_size, 126 code_size / (int)K, 127 code_size * 100 / total_size, 128 stub_size / (int)K, 129 stub_size * 100 / total_size, 130 scopes_oop_size / (int)K, 131 scopes_oop_size * 100 / total_size, 132 scopes_metadata_size / (int)K, 133 scopes_metadata_size * 100 / total_size, 134 scopes_data_size / (int)K, 135 scopes_data_size * 100 / total_size, 136 scopes_pcs_size / (int)K, 137 scopes_pcs_size * 100 / total_size); 138 } 139 } 140 141 void add(CodeBlob* cb) { 142 count++; 143 total_size += cb->size(); 144 header_size += cb->header_size(); 145 relocation_size += cb->relocation_size(); 146 if (cb->is_nmethod()) { 147 nmethod* nm = cb->as_nmethod_or_null(); 148 code_size += nm->insts_size(); 149 stub_size += nm->stub_size(); 150 151 scopes_oop_size += nm->oops_size(); 152 scopes_metadata_size += nm->metadata_size(); 153 scopes_data_size += nm->scopes_data_size(); 154 scopes_pcs_size += nm->scopes_pcs_size(); 155 } else { 156 code_size += cb->code_size(); 157 } 158 } 159 }; 160 161 // Iterate over all CodeHeaps 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 164 165 // Iterate over all CodeBlobs (cb) on the given CodeHeap 166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb)) 167 168 address CodeCache::_low_bound = 0; 169 address CodeCache::_high_bound = 0; 170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0; 171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr; 172 173 // Initialize arrays of CodeHeap subsets 174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 177 178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) { 179 if (size < required_size) { 180 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K", 181 codeheap, size/K, required_size/K); 182 err_msg title("Not enough space in %s to run VM", codeheap); 183 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K); 184 vm_exit_during_initialization(title, message); 185 } 186 } 187 188 struct CodeHeapInfo { 189 size_t size; 190 bool set; 191 bool enabled; 192 }; 193 194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) { 195 assert(!heap->set, "sanity"); 196 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size; 197 } 198 199 void CodeCache::initialize_heaps() { 200 201 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true}; 202 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true}; 203 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true}; 204 205 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize); 206 const size_t ps = page_size(false, 8); 207 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps); 208 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code 209 size_t cache_size = align_up(ReservedCodeCacheSize, min_size); 210 211 // Prerequisites 212 if (!heap_available(CodeBlobType::MethodProfiled)) { 213 // For compatibility reasons, disabled tiered compilation overrides 214 // segment size even if it is set explicitly. 215 non_profiled.size += profiled.size; 216 // Profiled code heap is not available, forcibly set size to 0 217 profiled.size = 0; 218 profiled.set = true; 219 profiled.enabled = false; 220 } 221 222 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap"); 223 224 size_t compiler_buffer_size = 0; 225 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size()); 226 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size()); 227 228 if (!non_nmethod.set) { 229 non_nmethod.size += compiler_buffer_size; 230 } 231 232 if (!profiled.set && !non_profiled.set) { 233 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ? 234 (cache_size - non_nmethod.size) / 2 : min_size; 235 } 236 237 if (profiled.set && !non_profiled.set) { 238 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size); 239 } 240 241 if (!profiled.set && non_profiled.set) { 242 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size); 243 } 244 245 // Compatibility. 246 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size; 247 if (!non_nmethod.set && profiled.set && non_profiled.set) { 248 set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size); 249 } 250 251 size_t total = non_nmethod.size + profiled.size + non_profiled.size; 252 if (total != cache_size && !cache_size_set) { 253 log_info(codecache)("ReservedCodeCache size " SIZE_FORMAT "K changed to total segments size NonNMethod " 254 SIZE_FORMAT "K NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K = " SIZE_FORMAT "K", 255 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K); 256 // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly 257 cache_size = total; 258 } 259 260 log_debug(codecache)("Initializing code heaps ReservedCodeCache " SIZE_FORMAT "K NonNMethod " SIZE_FORMAT "K" 261 " NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K", 262 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K); 263 264 // Validation 265 // Check minimal required sizes 266 check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size); 267 if (profiled.enabled) { 268 check_min_size("profiled code heap", profiled.size, min_size); 269 } 270 if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity 271 check_min_size("non-profiled code heap", non_profiled.size, min_size); 272 } 273 if (cache_size_set) { 274 check_min_size("reserved code cache", cache_size, min_cache_size); 275 } 276 277 // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes 278 if (total != cache_size && cache_size_set) { 279 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K)", non_nmethod.size/K); 280 if (profiled.enabled) { 281 message.append(" + ProfiledCodeHeapSize (" SIZE_FORMAT "K)", profiled.size/K); 282 } 283 if (non_profiled.enabled) { 284 message.append(" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K)", non_profiled.size/K); 285 } 286 message.append(" = " SIZE_FORMAT "K", total/K); 287 message.append((total > cache_size) ? " is greater than " : " is less than "); 288 message.append("ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 289 290 vm_exit_during_initialization("Invalid code heap sizes", message); 291 } 292 293 // Compatibility. Print warning if using large pages but not able to use the size given 294 if (UseLargePages) { 295 const size_t lg_ps = page_size(false, 1); 296 if (ps < lg_ps) { 297 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. " 298 "Reverting to smaller page size (" PROPERFMT ").", 299 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps)); 300 } 301 } 302 303 // Note: if large page support is enabled, min_size is at least the large 304 // page size. This ensures that the code cache is covered by large pages. 305 non_profiled.size += non_nmethod.size & alignment_mask(min_size); 306 non_profiled.size += profiled.size & alignment_mask(min_size); 307 non_nmethod.size = align_down(non_nmethod.size, min_size); 308 profiled.size = align_down(profiled.size, min_size); 309 non_profiled.size = align_down(non_profiled.size, min_size); 310 311 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size); 312 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size); 313 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size); 314 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size); 315 316 ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps); 317 318 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory. 319 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size()); 320 321 size_t offset = 0; 322 if (profiled.enabled) { 323 ReservedSpace profiled_space = rs.partition(offset, profiled.size); 324 offset += profiled.size; 325 // Tier 2 and tier 3 (profiled) methods 326 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 327 } 328 329 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size); 330 offset += non_nmethod.size; 331 // Non-nmethods (stubs, adapters, ...) 332 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 333 334 if (non_profiled.enabled) { 335 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size); 336 // Tier 1 and tier 4 (non-profiled) methods and native methods 337 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 338 } 339 } 340 341 size_t CodeCache::page_size(bool aligned, size_t min_pages) { 342 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : 343 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); 344 } 345 346 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) { 347 // Align and reserve space for code cache 348 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity()); 349 const size_t rs_size = align_up(size, rs_align); 350 ReservedCodeSpace rs(rs_size, rs_align, rs_ps); 351 if (!rs.is_reserved()) { 352 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 353 rs_size/K)); 354 } 355 356 // Initialize bounds 357 _low_bound = (address)rs.base(); 358 _high_bound = _low_bound + rs.size(); 359 return rs; 360 } 361 362 // Heaps available for allocation 363 bool CodeCache::heap_available(CodeBlobType code_blob_type) { 364 if (!SegmentedCodeCache) { 365 // No segmentation: use a single code heap 366 return (code_blob_type == CodeBlobType::All); 367 } else if (CompilerConfig::is_interpreter_only()) { 368 // Interpreter only: we don't need any method code heaps 369 return (code_blob_type == CodeBlobType::NonNMethod); 370 } else if (CompilerConfig::is_c1_profiling()) { 371 // Tiered compilation: use all code heaps 372 return (code_blob_type < CodeBlobType::All); 373 } else { 374 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 375 return (code_blob_type == CodeBlobType::NonNMethod) || 376 (code_blob_type == CodeBlobType::MethodNonProfiled); 377 } 378 } 379 380 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) { 381 switch(code_blob_type) { 382 case CodeBlobType::NonNMethod: 383 return "NonNMethodCodeHeapSize"; 384 break; 385 case CodeBlobType::MethodNonProfiled: 386 return "NonProfiledCodeHeapSize"; 387 break; 388 case CodeBlobType::MethodProfiled: 389 return "ProfiledCodeHeapSize"; 390 break; 391 default: 392 ShouldNotReachHere(); 393 return nullptr; 394 } 395 } 396 397 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 398 if (lhs->code_blob_type() == rhs->code_blob_type()) { 399 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 400 } else { 401 return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type()); 402 } 403 } 404 405 void CodeCache::add_heap(CodeHeap* heap) { 406 assert(!Universe::is_fully_initialized(), "late heap addition?"); 407 408 _heaps->insert_sorted<code_heap_compare>(heap); 409 410 CodeBlobType type = heap->code_blob_type(); 411 if (code_blob_type_accepts_nmethod(type)) { 412 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 413 } 414 if (code_blob_type_accepts_allocable(type)) { 415 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 416 } 417 } 418 419 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) { 420 // Check if heap is needed 421 if (!heap_available(code_blob_type)) { 422 return; 423 } 424 425 // Create CodeHeap 426 CodeHeap* heap = new CodeHeap(name, code_blob_type); 427 add_heap(heap); 428 429 // Reserve Space 430 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 431 size_initial = align_up(size_initial, os::vm_page_size()); 432 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 433 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 434 heap->name(), size_initial/K)); 435 } 436 437 // Register the CodeHeap 438 MemoryService::add_code_heap_memory_pool(heap, name); 439 } 440 441 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 442 FOR_ALL_HEAPS(heap) { 443 if ((*heap)->contains(start)) { 444 return *heap; 445 } 446 } 447 return nullptr; 448 } 449 450 CodeHeap* CodeCache::get_code_heap(const void* cb) { 451 assert(cb != nullptr, "CodeBlob is null"); 452 FOR_ALL_HEAPS(heap) { 453 if ((*heap)->contains(cb)) { 454 return *heap; 455 } 456 } 457 ShouldNotReachHere(); 458 return nullptr; 459 } 460 461 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) { 462 FOR_ALL_HEAPS(heap) { 463 if ((*heap)->accepts(code_blob_type)) { 464 return *heap; 465 } 466 } 467 return nullptr; 468 } 469 470 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 471 assert_locked_or_safepoint(CodeCache_lock); 472 assert(heap != nullptr, "heap is null"); 473 return (CodeBlob*)heap->first(); 474 } 475 476 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) { 477 if (heap_available(code_blob_type)) { 478 return first_blob(get_code_heap(code_blob_type)); 479 } else { 480 return nullptr; 481 } 482 } 483 484 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 485 assert_locked_or_safepoint(CodeCache_lock); 486 assert(heap != nullptr, "heap is null"); 487 return (CodeBlob*)heap->next(cb); 488 } 489 490 /** 491 * Do not seize the CodeCache lock here--if the caller has not 492 * already done so, we are going to lose bigtime, since the code 493 * cache will contain a garbage CodeBlob until the caller can 494 * run the constructor for the CodeBlob subclass he is busy 495 * instantiating. 496 */ 497 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) { 498 assert_locked_or_safepoint(CodeCache_lock); 499 assert(size > 0, "Code cache allocation request must be > 0"); 500 if (size == 0) { 501 return nullptr; 502 } 503 CodeBlob* cb = nullptr; 504 505 // Get CodeHeap for the given CodeBlobType 506 CodeHeap* heap = get_code_heap(code_blob_type); 507 assert(heap != nullptr, "heap is null"); 508 509 while (true) { 510 cb = (CodeBlob*)heap->allocate(size); 511 if (cb != nullptr) break; 512 if (!heap->expand_by(CodeCacheExpansionSize)) { 513 // Save original type for error reporting 514 if (orig_code_blob_type == CodeBlobType::All) { 515 orig_code_blob_type = code_blob_type; 516 } 517 // Expansion failed 518 if (SegmentedCodeCache) { 519 // Fallback solution: Try to store code in another code heap. 520 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 521 CodeBlobType type = code_blob_type; 522 switch (type) { 523 case CodeBlobType::NonNMethod: 524 type = CodeBlobType::MethodNonProfiled; 525 break; 526 case CodeBlobType::MethodNonProfiled: 527 type = CodeBlobType::MethodProfiled; 528 break; 529 case CodeBlobType::MethodProfiled: 530 // Avoid loop if we already tried that code heap 531 if (type == orig_code_blob_type) { 532 type = CodeBlobType::MethodNonProfiled; 533 } 534 break; 535 default: 536 break; 537 } 538 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 539 if (PrintCodeCacheExtension) { 540 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 541 heap->name(), get_code_heap(type)->name()); 542 } 543 return allocate(size, type, handle_alloc_failure, orig_code_blob_type); 544 } 545 } 546 if (handle_alloc_failure) { 547 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 548 CompileBroker::handle_full_code_cache(orig_code_blob_type); 549 } 550 return nullptr; 551 } else { 552 OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage()) 553 } 554 if (PrintCodeCacheExtension) { 555 ResourceMark rm; 556 if (_nmethod_heaps->length() >= 1) { 557 tty->print("%s", heap->name()); 558 } else { 559 tty->print("CodeCache"); 560 } 561 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 562 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 563 (address)heap->high() - (address)heap->low_boundary()); 564 } 565 } 566 print_trace("allocation", cb, size); 567 return cb; 568 } 569 570 void CodeCache::free(CodeBlob* cb) { 571 assert_locked_or_safepoint(CodeCache_lock); 572 CodeHeap* heap = get_code_heap(cb); 573 print_trace("free", cb); 574 if (cb->is_nmethod()) { 575 heap->set_nmethod_count(heap->nmethod_count() - 1); 576 if (((nmethod *)cb)->has_dependencies()) { 577 Atomic::dec(&_number_of_nmethods_with_dependencies); 578 } 579 } 580 if (cb->is_adapter_blob()) { 581 heap->set_adapter_count(heap->adapter_count() - 1); 582 } 583 584 cb->~CodeBlob(); 585 // Get heap for given CodeBlob and deallocate 586 heap->deallocate(cb); 587 588 assert(heap->blob_count() >= 0, "sanity check"); 589 } 590 591 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 592 assert_locked_or_safepoint(CodeCache_lock); 593 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 594 print_trace("free_unused_tail", cb); 595 596 // We also have to account for the extra space (i.e. header) used by the CodeBlob 597 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 598 used += CodeBlob::align_code_offset(cb->header_size()); 599 600 // Get heap for given CodeBlob and deallocate its unused tail 601 get_code_heap(cb)->deallocate_tail(cb, used); 602 // Adjust the sizes of the CodeBlob 603 cb->adjust_size(used); 604 } 605 606 void CodeCache::commit(CodeBlob* cb) { 607 // this is called by nmethod::nmethod, which must already own CodeCache_lock 608 assert_locked_or_safepoint(CodeCache_lock); 609 CodeHeap* heap = get_code_heap(cb); 610 if (cb->is_nmethod()) { 611 heap->set_nmethod_count(heap->nmethod_count() + 1); 612 if (((nmethod *)cb)->has_dependencies()) { 613 Atomic::inc(&_number_of_nmethods_with_dependencies); 614 } 615 } 616 if (cb->is_adapter_blob()) { 617 heap->set_adapter_count(heap->adapter_count() + 1); 618 } 619 } 620 621 bool CodeCache::contains(void *p) { 622 // S390 uses contains() in current_frame(), which is used before 623 // code cache initialization if NativeMemoryTracking=detail is set. 624 S390_ONLY(if (_heaps == nullptr) return false;) 625 // It should be ok to call contains without holding a lock. 626 FOR_ALL_HEAPS(heap) { 627 if ((*heap)->contains(p)) { 628 return true; 629 } 630 } 631 return false; 632 } 633 634 bool CodeCache::contains(nmethod *nm) { 635 return contains((void *)nm); 636 } 637 638 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain 639 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 640 CodeBlob* CodeCache::find_blob(void* start) { 641 // NMT can walk the stack before code cache is created 642 if (_heaps != nullptr) { 643 CodeHeap* heap = get_code_heap_containing(start); 644 if (heap != nullptr) { 645 return heap->find_blob(start); 646 } 647 } 648 return nullptr; 649 } 650 651 nmethod* CodeCache::find_nmethod(void* start) { 652 CodeBlob* cb = find_blob(start); 653 assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod"); 654 return (nmethod*)cb; 655 } 656 657 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 658 assert_locked_or_safepoint(CodeCache_lock); 659 FOR_ALL_HEAPS(heap) { 660 FOR_ALL_BLOBS(cb, *heap) { 661 f(cb); 662 } 663 } 664 } 665 666 void CodeCache::nmethods_do(void f(nmethod* nm)) { 667 assert_locked_or_safepoint(CodeCache_lock); 668 NMethodIterator iter(NMethodIterator::all); 669 while(iter.next()) { 670 f(iter.method()); 671 } 672 } 673 674 void CodeCache::nmethods_do(NMethodClosure* cl) { 675 assert_locked_or_safepoint(CodeCache_lock); 676 NMethodIterator iter(NMethodIterator::all); 677 while(iter.next()) { 678 cl->do_nmethod(iter.method()); 679 } 680 } 681 682 void CodeCache::metadata_do(MetadataClosure* f) { 683 assert_locked_or_safepoint(CodeCache_lock); 684 NMethodIterator iter(NMethodIterator::all); 685 while(iter.next()) { 686 iter.method()->metadata_do(f); 687 } 688 } 689 690 // Calculate the number of GCs after which an nmethod is expected to have been 691 // used in order to not be classed as cold. 692 void CodeCache::update_cold_gc_count() { 693 if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) { 694 // No aging 695 return; 696 } 697 698 size_t last_used = _last_unloading_used; 699 double last_time = _last_unloading_time; 700 701 double time = os::elapsedTime(); 702 703 size_t free = unallocated_capacity(); 704 size_t max = max_capacity(); 705 size_t used = max - free; 706 double gc_interval = time - last_time; 707 708 _unloading_threshold_gc_requested = false; 709 _last_unloading_time = time; 710 _last_unloading_used = used; 711 712 if (last_time == 0.0) { 713 // The first GC doesn't have enough information to make good 714 // decisions, so just keep everything afloat 715 log_info(codecache)("Unknown code cache pressure; don't age code"); 716 return; 717 } 718 719 if (gc_interval <= 0.0 || last_used >= used) { 720 // Dodge corner cases where there is no pressure or negative pressure 721 // on the code cache. Just don't unload when this happens. 722 _cold_gc_count = INT_MAX; 723 log_info(codecache)("No code cache pressure; don't age code"); 724 return; 725 } 726 727 double allocation_rate = (used - last_used) / gc_interval; 728 729 _unloading_allocation_rates.add(allocation_rate); 730 _unloading_gc_intervals.add(gc_interval); 731 732 size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max; 733 if (free < aggressive_sweeping_free_threshold) { 734 // We are already in the red zone; be very aggressive to avoid disaster 735 // But not more aggressive than 2. This ensures that an nmethod must 736 // have been unused at least between two GCs to be considered cold still. 737 _cold_gc_count = 2; 738 log_info(codecache)("Code cache critically low; use aggressive aging"); 739 return; 740 } 741 742 // The code cache has an expected time for cold nmethods to "time out" 743 // when they have not been used. The time for nmethods to time out 744 // depends on how long we expect we can keep allocating code until 745 // aggressive sweeping starts, based on sampled allocation rates. 746 double average_gc_interval = _unloading_gc_intervals.avg(); 747 double average_allocation_rate = _unloading_allocation_rates.avg(); 748 double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate; 749 double cold_timeout = time_to_aggressive / NmethodSweepActivity; 750 751 // Convert time to GC cycles, and crop at INT_MAX. The reason for 752 // that is that the _cold_gc_count will be added to an epoch number 753 // and that addition must not overflow, or we can crash the VM. 754 // But not more aggressive than 2. This ensures that an nmethod must 755 // have been unused at least between two GCs to be considered cold still. 756 _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2); 757 758 double used_ratio = double(used) / double(max); 759 double last_used_ratio = double(last_used) / double(max); 760 log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT 761 ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s", 762 average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count, 763 double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval); 764 765 } 766 767 uint64_t CodeCache::cold_gc_count() { 768 return _cold_gc_count; 769 } 770 771 void CodeCache::gc_on_allocation() { 772 if (!is_init_completed()) { 773 // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what 774 return; 775 } 776 777 size_t free = unallocated_capacity(); 778 size_t max = max_capacity(); 779 size_t used = max - free; 780 double free_ratio = double(free) / double(max); 781 if (free_ratio <= StartAggressiveSweepingAt / 100.0) { 782 // In case the GC is concurrent, we make sure only one thread requests the GC. 783 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { 784 log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0); 785 Universe::heap()->collect(GCCause::_codecache_GC_aggressive); 786 } 787 return; 788 } 789 790 size_t last_used = _last_unloading_used; 791 if (last_used >= used) { 792 // No increase since last GC; no need to sweep yet 793 return; 794 } 795 size_t allocated_since_last = used - last_used; 796 double allocated_since_last_ratio = double(allocated_since_last) / double(max); 797 double threshold = SweeperThreshold / 100.0; 798 double used_ratio = double(used) / double(max); 799 double last_used_ratio = double(last_used) / double(max); 800 if (used_ratio > threshold) { 801 // After threshold is reached, scale it by free_ratio so that more aggressive 802 // GC is triggered as we approach code cache exhaustion 803 threshold *= free_ratio; 804 } 805 // If code cache has been allocated without any GC at all, let's make sure 806 // it is eventually invoked to avoid trouble. 807 if (allocated_since_last_ratio > threshold) { 808 // In case the GC is concurrent, we make sure only one thread requests the GC. 809 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { 810 log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)", 811 threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0); 812 Universe::heap()->collect(GCCause::_codecache_GC_threshold); 813 } 814 } 815 } 816 817 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle 818 // subtracts the value by 2, and the type is unsigned. We don't want underflow. 819 // 820 // Odd values mean that marking is in progress, and even values mean that no 821 // marking is currently active. 822 uint64_t CodeCache::_gc_epoch = 2; 823 824 // How many GCs after an nmethod has not been used, do we consider it cold? 825 uint64_t CodeCache::_cold_gc_count = INT_MAX; 826 827 double CodeCache::_last_unloading_time = 0.0; 828 size_t CodeCache::_last_unloading_used = 0; 829 volatile bool CodeCache::_unloading_threshold_gc_requested = false; 830 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */); 831 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */); 832 833 uint64_t CodeCache::gc_epoch() { 834 return _gc_epoch; 835 } 836 837 bool CodeCache::is_gc_marking_cycle_active() { 838 // Odd means that marking is active 839 return (_gc_epoch % 2) == 1; 840 } 841 842 uint64_t CodeCache::previous_completed_gc_marking_cycle() { 843 if (is_gc_marking_cycle_active()) { 844 return _gc_epoch - 2; 845 } else { 846 return _gc_epoch - 1; 847 } 848 } 849 850 void CodeCache::on_gc_marking_cycle_start() { 851 assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended"); 852 ++_gc_epoch; 853 } 854 855 // Once started the code cache marking cycle must only be finished after marking of 856 // the java heap is complete. Otherwise nmethods could appear to be not on stack even 857 // if they have frames in continuation StackChunks that were not yet visited. 858 void CodeCache::on_gc_marking_cycle_finish() { 859 assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished"); 860 ++_gc_epoch; 861 update_cold_gc_count(); 862 } 863 864 void CodeCache::arm_all_nmethods() { 865 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 866 if (bs_nm != nullptr) { 867 bs_nm->arm_all_nmethods(); 868 } 869 } 870 871 // Mark nmethods for unloading if they contain otherwise unreachable oops. 872 void CodeCache::do_unloading(bool unloading_occurred) { 873 assert_locked_or_safepoint(CodeCache_lock); 874 NMethodIterator iter(NMethodIterator::all); 875 while(iter.next()) { 876 iter.method()->do_unloading(unloading_occurred); 877 } 878 } 879 880 void CodeCache::verify_clean_inline_caches() { 881 #ifdef ASSERT 882 NMethodIterator iter(NMethodIterator::not_unloading); 883 while(iter.next()) { 884 nmethod* nm = iter.method(); 885 nm->verify_clean_inline_caches(); 886 nm->verify(); 887 } 888 #endif 889 } 890 891 // Defer freeing of concurrently cleaned ExceptionCache entries until 892 // after a global handshake operation. 893 void CodeCache::release_exception_cache(ExceptionCache* entry) { 894 if (SafepointSynchronize::is_at_safepoint()) { 895 delete entry; 896 } else { 897 for (;;) { 898 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); 899 entry->set_purge_list_next(purge_list_head); 900 if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { 901 break; 902 } 903 } 904 } 905 } 906 907 // Delete exception caches that have been concurrently unlinked, 908 // followed by a global handshake operation. 909 void CodeCache::purge_exception_caches() { 910 ExceptionCache* curr = _exception_cache_purge_list; 911 while (curr != nullptr) { 912 ExceptionCache* next = curr->purge_list_next(); 913 delete curr; 914 curr = next; 915 } 916 _exception_cache_purge_list = nullptr; 917 } 918 919 // Restart compiler if possible and required.. 920 void CodeCache::maybe_restart_compiler(size_t freed_memory) { 921 922 // Try to start the compiler again if we freed any memory 923 if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) { 924 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 925 log_info(codecache)("Restarting compiler"); 926 EventJITRestart event; 927 event.set_freedMemory(freed_memory); 928 event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); 929 event.commit(); 930 } 931 } 932 933 uint8_t CodeCache::_unloading_cycle = 1; 934 935 void CodeCache::increment_unloading_cycle() { 936 // 2-bit value (see IsUnloadingState in nmethod.cpp for details) 937 // 0 is reserved for new methods. 938 _unloading_cycle = (_unloading_cycle + 1) % 4; 939 if (_unloading_cycle == 0) { 940 _unloading_cycle = 1; 941 } 942 } 943 944 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive) 945 : _is_unloading_behaviour(is_alive) 946 { 947 _saved_behaviour = IsUnloadingBehaviour::current(); 948 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); 949 increment_unloading_cycle(); 950 DependencyContext::cleaning_start(); 951 } 952 953 CodeCache::UnlinkingScope::~UnlinkingScope() { 954 IsUnloadingBehaviour::set_current(_saved_behaviour); 955 DependencyContext::cleaning_end(); 956 } 957 958 void CodeCache::verify_oops() { 959 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 960 VerifyOopClosure voc; 961 NMethodIterator iter(NMethodIterator::not_unloading); 962 while(iter.next()) { 963 nmethod* nm = iter.method(); 964 nm->oops_do(&voc); 965 nm->verify_oop_relocations(); 966 } 967 } 968 969 int CodeCache::blob_count(CodeBlobType code_blob_type) { 970 CodeHeap* heap = get_code_heap(code_blob_type); 971 return (heap != nullptr) ? heap->blob_count() : 0; 972 } 973 974 int CodeCache::blob_count() { 975 int count = 0; 976 FOR_ALL_HEAPS(heap) { 977 count += (*heap)->blob_count(); 978 } 979 return count; 980 } 981 982 int CodeCache::nmethod_count(CodeBlobType code_blob_type) { 983 CodeHeap* heap = get_code_heap(code_blob_type); 984 return (heap != nullptr) ? heap->nmethod_count() : 0; 985 } 986 987 int CodeCache::nmethod_count() { 988 int count = 0; 989 for (CodeHeap* heap : *_nmethod_heaps) { 990 count += heap->nmethod_count(); 991 } 992 return count; 993 } 994 995 int CodeCache::adapter_count(CodeBlobType code_blob_type) { 996 CodeHeap* heap = get_code_heap(code_blob_type); 997 return (heap != nullptr) ? heap->adapter_count() : 0; 998 } 999 1000 int CodeCache::adapter_count() { 1001 int count = 0; 1002 FOR_ALL_HEAPS(heap) { 1003 count += (*heap)->adapter_count(); 1004 } 1005 return count; 1006 } 1007 1008 address CodeCache::low_bound(CodeBlobType code_blob_type) { 1009 CodeHeap* heap = get_code_heap(code_blob_type); 1010 return (heap != nullptr) ? (address)heap->low_boundary() : nullptr; 1011 } 1012 1013 address CodeCache::high_bound(CodeBlobType code_blob_type) { 1014 CodeHeap* heap = get_code_heap(code_blob_type); 1015 return (heap != nullptr) ? (address)heap->high_boundary() : nullptr; 1016 } 1017 1018 size_t CodeCache::capacity() { 1019 size_t cap = 0; 1020 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1021 cap += (*heap)->capacity(); 1022 } 1023 return cap; 1024 } 1025 1026 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) { 1027 CodeHeap* heap = get_code_heap(code_blob_type); 1028 return (heap != nullptr) ? heap->unallocated_capacity() : 0; 1029 } 1030 1031 size_t CodeCache::unallocated_capacity() { 1032 size_t unallocated_cap = 0; 1033 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1034 unallocated_cap += (*heap)->unallocated_capacity(); 1035 } 1036 return unallocated_cap; 1037 } 1038 1039 size_t CodeCache::max_capacity() { 1040 size_t max_cap = 0; 1041 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1042 max_cap += (*heap)->max_capacity(); 1043 } 1044 return max_cap; 1045 } 1046 1047 bool CodeCache::is_non_nmethod(address addr) { 1048 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod); 1049 return blob->contains(addr); 1050 } 1051 1052 size_t CodeCache::max_distance_to_non_nmethod() { 1053 if (!SegmentedCodeCache) { 1054 return ReservedCodeCacheSize; 1055 } else { 1056 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod); 1057 // the max distance is minimized by placing the NonNMethod segment 1058 // in between MethodProfiled and MethodNonProfiled segments 1059 size_t dist1 = (size_t)blob->high() - (size_t)_low_bound; 1060 size_t dist2 = (size_t)_high_bound - (size_t)blob->low(); 1061 return dist1 > dist2 ? dist1 : dist2; 1062 } 1063 } 1064 1065 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 1066 // is free, reverse_free_ratio() returns 4. 1067 // Since code heap for each type of code blobs falls forward to the next 1068 // type of code heap, return the reverse free ratio for the entire 1069 // code cache. 1070 double CodeCache::reverse_free_ratio() { 1071 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0; 1072 double max = (double)max_capacity(); 1073 double result = max / unallocated; 1074 assert (max >= unallocated, "Must be"); 1075 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1076 return result; 1077 } 1078 1079 size_t CodeCache::bytes_allocated_in_freelists() { 1080 size_t allocated_bytes = 0; 1081 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1082 allocated_bytes += (*heap)->allocated_in_freelist(); 1083 } 1084 return allocated_bytes; 1085 } 1086 1087 int CodeCache::allocated_segments() { 1088 int number_of_segments = 0; 1089 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1090 number_of_segments += (*heap)->allocated_segments(); 1091 } 1092 return number_of_segments; 1093 } 1094 1095 size_t CodeCache::freelists_length() { 1096 size_t length = 0; 1097 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1098 length += (*heap)->freelist_length(); 1099 } 1100 return length; 1101 } 1102 1103 void icache_init(); 1104 1105 void CodeCache::initialize() { 1106 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1107 #ifdef COMPILER2 1108 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1109 #endif 1110 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1111 // This was originally just a check of the alignment, causing failure, instead, round 1112 // the code cache to the page size. In particular, Solaris is moving to a larger 1113 // default page size. 1114 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1115 1116 if (SegmentedCodeCache) { 1117 // Use multiple code heaps 1118 initialize_heaps(); 1119 } else { 1120 // Use a single code heap 1121 FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size()); 1122 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); 1123 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0); 1124 1125 // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely 1126 // users want to use the largest available page. 1127 const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8; 1128 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages)); 1129 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory. 1130 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size()); 1131 add_heap(rs, "CodeCache", CodeBlobType::All); 1132 } 1133 1134 // Initialize ICache flush mechanism 1135 // This service is needed for os::register_code_area 1136 icache_init(); 1137 1138 // Give OS a chance to register generated code area. 1139 // This is used on Windows 64 bit platforms to register 1140 // Structured Exception Handlers for our generated code. 1141 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1142 } 1143 1144 void codeCache_init() { 1145 CodeCache::initialize(); 1146 } 1147 1148 //------------------------------------------------------------------------------------------------ 1149 1150 bool CodeCache::has_nmethods_with_dependencies() { 1151 return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0; 1152 } 1153 1154 void CodeCache::clear_inline_caches() { 1155 assert_locked_or_safepoint(CodeCache_lock); 1156 NMethodIterator iter(NMethodIterator::not_unloading); 1157 while(iter.next()) { 1158 iter.method()->clear_inline_caches(); 1159 } 1160 } 1161 1162 // Only used by whitebox API 1163 void CodeCache::cleanup_inline_caches_whitebox() { 1164 assert_locked_or_safepoint(CodeCache_lock); 1165 NMethodIterator iter(NMethodIterator::not_unloading); 1166 while(iter.next()) { 1167 iter.method()->cleanup_inline_caches_whitebox(); 1168 } 1169 } 1170 1171 // Keeps track of time spent for checking dependencies 1172 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1173 1174 #ifndef PRODUCT 1175 // Check if any of live methods dependencies have been invalidated. 1176 // (this is expensive!) 1177 static void check_live_nmethods_dependencies(DepChange& changes) { 1178 // Checked dependencies are allocated into this ResourceMark 1179 ResourceMark rm; 1180 1181 // Turn off dependency tracing while actually testing dependencies. 1182 FlagSetting fs(Dependencies::_verify_in_progress, true); 1183 1184 typedef ResourceHashtable<DependencySignature, int, 11027, 1185 AnyObj::RESOURCE_AREA, mtInternal, 1186 &DependencySignature::hash, 1187 &DependencySignature::equals> DepTable; 1188 1189 DepTable* table = new DepTable(); 1190 1191 // Iterate over live nmethods and check dependencies of all nmethods that are not 1192 // marked for deoptimization. A particular dependency is only checked once. 1193 NMethodIterator iter(NMethodIterator::not_unloading); 1194 while(iter.next()) { 1195 nmethod* nm = iter.method(); 1196 // Only notify for live nmethods 1197 if (!nm->is_marked_for_deoptimization()) { 1198 for (Dependencies::DepStream deps(nm); deps.next(); ) { 1199 // Construct abstraction of a dependency. 1200 DependencySignature* current_sig = new DependencySignature(deps); 1201 1202 // Determine if dependency is already checked. table->put(...) returns 1203 // 'true' if the dependency is added (i.e., was not in the hashtable). 1204 if (table->put(*current_sig, 1)) { 1205 if (deps.check_dependency() != nullptr) { 1206 // Dependency checking failed. Print out information about the failed 1207 // dependency and finally fail with an assert. We can fail here, since 1208 // dependency checking is never done in a product build. 1209 tty->print_cr("Failed dependency:"); 1210 changes.print(); 1211 nm->print(); 1212 nm->print_dependencies_on(tty); 1213 assert(false, "Should have been marked for deoptimization"); 1214 } 1215 } 1216 } 1217 } 1218 } 1219 } 1220 #endif 1221 1222 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) { 1223 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1224 1225 // search the hierarchy looking for nmethods which are affected by the loading of this class 1226 1227 // then search the interfaces this class implements looking for nmethods 1228 // which might be dependent of the fact that an interface only had one 1229 // implementor. 1230 // nmethod::check_all_dependencies works only correctly, if no safepoint 1231 // can happen 1232 NoSafepointVerifier nsv; 1233 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1234 InstanceKlass* d = str.klass(); 1235 d->mark_dependent_nmethods(deopt_scope, changes); 1236 } 1237 1238 #ifndef PRODUCT 1239 if (VerifyDependencies) { 1240 // Object pointers are used as unique identifiers for dependency arguments. This 1241 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1242 dependentCheckTime.start(); 1243 check_live_nmethods_dependencies(changes); 1244 dependentCheckTime.stop(); 1245 } 1246 #endif 1247 } 1248 1249 #if INCLUDE_JVMTI 1250 // RedefineClasses support for saving nmethods that are dependent on "old" methods. 1251 // We don't really expect this table to grow very large. If it does, it can become a hashtable. 1252 static GrowableArray<nmethod*>* old_nmethod_table = nullptr; 1253 1254 static void add_to_old_table(nmethod* c) { 1255 if (old_nmethod_table == nullptr) { 1256 old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode); 1257 } 1258 old_nmethod_table->push(c); 1259 } 1260 1261 static void reset_old_method_table() { 1262 if (old_nmethod_table != nullptr) { 1263 delete old_nmethod_table; 1264 old_nmethod_table = nullptr; 1265 } 1266 } 1267 1268 // Remove this method when flushed. 1269 void CodeCache::unregister_old_nmethod(nmethod* c) { 1270 assert_lock_strong(CodeCache_lock); 1271 if (old_nmethod_table != nullptr) { 1272 int index = old_nmethod_table->find(c); 1273 if (index != -1) { 1274 old_nmethod_table->delete_at(index); 1275 } 1276 } 1277 } 1278 1279 void CodeCache::old_nmethods_do(MetadataClosure* f) { 1280 // Walk old method table and mark those on stack. 1281 int length = 0; 1282 if (old_nmethod_table != nullptr) { 1283 length = old_nmethod_table->length(); 1284 for (int i = 0; i < length; i++) { 1285 // Walk all methods saved on the last pass. Concurrent class unloading may 1286 // also be looking at this method's metadata, so don't delete it yet if 1287 // it is marked as unloaded. 1288 old_nmethod_table->at(i)->metadata_do(f); 1289 } 1290 } 1291 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); 1292 } 1293 1294 // Walk compiled methods and mark dependent methods for deoptimization. 1295 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { 1296 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1297 // Each redefinition creates a new set of nmethods that have references to "old" Methods 1298 // So delete old method table and create a new one. 1299 reset_old_method_table(); 1300 1301 NMethodIterator iter(NMethodIterator::all); 1302 while(iter.next()) { 1303 nmethod* nm = iter.method(); 1304 // Walk all alive nmethods to check for old Methods. 1305 // This includes methods whose inline caches point to old methods, so 1306 // inline cache clearing is unnecessary. 1307 if (nm->has_evol_metadata()) { 1308 deopt_scope->mark(nm); 1309 add_to_old_table(nm); 1310 } 1311 } 1312 } 1313 1314 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { 1315 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1316 NMethodIterator iter(NMethodIterator::all); 1317 while(iter.next()) { 1318 nmethod* nm = iter.method(); 1319 if (!nm->method()->is_method_handle_intrinsic()) { 1320 if (nm->can_be_deoptimized()) { 1321 deopt_scope->mark(nm); 1322 } 1323 if (nm->has_evol_metadata()) { 1324 add_to_old_table(nm); 1325 } 1326 } 1327 } 1328 } 1329 1330 #endif // INCLUDE_JVMTI 1331 1332 // Mark methods for deopt (if safe or possible). 1333 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) { 1334 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1335 NMethodIterator iter(NMethodIterator::not_unloading); 1336 while(iter.next()) { 1337 nmethod* nm = iter.method(); 1338 if (!nm->is_native_method()) { 1339 deopt_scope->mark(nm); 1340 } 1341 } 1342 } 1343 1344 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) { 1345 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1346 1347 NMethodIterator iter(NMethodIterator::not_unloading); 1348 while(iter.next()) { 1349 nmethod* nm = iter.method(); 1350 if (nm->is_dependent_on_method(dependee)) { 1351 deopt_scope->mark(nm); 1352 } 1353 } 1354 } 1355 1356 void CodeCache::make_marked_nmethods_deoptimized() { 1357 RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading); 1358 while(iter.next()) { 1359 nmethod* nm = iter.method(); 1360 if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { 1361 nm->make_not_entrant(); 1362 nm->make_deoptimized(); 1363 } 1364 } 1365 } 1366 1367 // Marks compiled methods dependent on dependee. 1368 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) { 1369 assert_lock_strong(Compile_lock); 1370 1371 if (!has_nmethods_with_dependencies()) { 1372 return; 1373 } 1374 1375 if (dependee->is_linked()) { 1376 // Class initialization state change. 1377 KlassInitDepChange changes(dependee); 1378 mark_for_deoptimization(deopt_scope, changes); 1379 } else { 1380 // New class is loaded. 1381 NewKlassDepChange changes(dependee); 1382 mark_for_deoptimization(deopt_scope, changes); 1383 } 1384 } 1385 1386 // Marks compiled methods dependent on dependee 1387 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) { 1388 assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 1389 1390 DeoptimizationScope deopt_scope; 1391 // Compute the dependent nmethods 1392 mark_for_deoptimization(&deopt_scope, m_h()); 1393 deopt_scope.deoptimize_marked(); 1394 } 1395 1396 void CodeCache::verify() { 1397 assert_locked_or_safepoint(CodeCache_lock); 1398 FOR_ALL_HEAPS(heap) { 1399 (*heap)->verify(); 1400 FOR_ALL_BLOBS(cb, *heap) { 1401 cb->verify(); 1402 } 1403 } 1404 } 1405 1406 // A CodeHeap is full. Print out warning and report event. 1407 PRAGMA_DIAG_PUSH 1408 PRAGMA_FORMAT_NONLITERAL_IGNORED 1409 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) { 1410 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1411 CodeHeap* heap = get_code_heap(code_blob_type); 1412 assert(heap != nullptr, "heap is null"); 1413 1414 int full_count = heap->report_full(); 1415 1416 if ((full_count == 1) || print) { 1417 // Not yet reported for this heap, report 1418 if (SegmentedCodeCache) { 1419 ResourceMark rm; 1420 stringStream msg1_stream, msg2_stream; 1421 msg1_stream.print("%s is full. Compiler has been disabled.", 1422 get_code_heap_name(code_blob_type)); 1423 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1424 get_code_heap_flag_name(code_blob_type)); 1425 const char *msg1 = msg1_stream.as_string(); 1426 const char *msg2 = msg2_stream.as_string(); 1427 1428 log_warning(codecache)("%s", msg1); 1429 log_warning(codecache)("%s", msg2); 1430 warning("%s", msg1); 1431 warning("%s", msg2); 1432 } else { 1433 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1434 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1435 1436 log_warning(codecache)("%s", msg1); 1437 log_warning(codecache)("%s", msg2); 1438 warning("%s", msg1); 1439 warning("%s", msg2); 1440 } 1441 stringStream s; 1442 // Dump code cache into a buffer before locking the tty. 1443 { 1444 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1445 print_summary(&s); 1446 } 1447 { 1448 ttyLocker ttyl; 1449 tty->print("%s", s.freeze()); 1450 } 1451 1452 if (full_count == 1) { 1453 if (PrintCodeHeapAnalytics) { 1454 CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot! 1455 } 1456 } 1457 } 1458 1459 EventCodeCacheFull event; 1460 if (event.should_commit()) { 1461 event.set_codeBlobType((u1)code_blob_type); 1462 event.set_startAddress((u8)heap->low_boundary()); 1463 event.set_commitedTopAddress((u8)heap->high()); 1464 event.set_reservedTopAddress((u8)heap->high_boundary()); 1465 event.set_entryCount(heap->blob_count()); 1466 event.set_methodCount(heap->nmethod_count()); 1467 event.set_adaptorCount(heap->adapter_count()); 1468 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1469 event.set_fullCount(heap->full_count()); 1470 event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); 1471 event.commit(); 1472 } 1473 } 1474 PRAGMA_DIAG_POP 1475 1476 void CodeCache::print_memory_overhead() { 1477 size_t wasted_bytes = 0; 1478 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1479 CodeHeap* curr_heap = *heap; 1480 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) { 1481 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1482 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1483 } 1484 } 1485 // Print bytes that are allocated in the freelist 1486 ttyLocker ttl; 1487 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1488 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1489 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1490 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1491 } 1492 1493 //------------------------------------------------------------------------------------------------ 1494 // Non-product version 1495 1496 #ifndef PRODUCT 1497 1498 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) { 1499 if (PrintCodeCache2) { // Need to add a new flag 1500 ResourceMark rm; 1501 if (size == 0) { 1502 int s = cb->size(); 1503 assert(s >= 0, "CodeBlob size is negative: %d", s); 1504 size = (uint) s; 1505 } 1506 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1507 } 1508 } 1509 1510 void CodeCache::print_internals() { 1511 int nmethodCount = 0; 1512 int runtimeStubCount = 0; 1513 int adapterCount = 0; 1514 int deoptimizationStubCount = 0; 1515 int uncommonTrapStubCount = 0; 1516 int bufferBlobCount = 0; 1517 int total = 0; 1518 int nmethodNotEntrant = 0; 1519 int nmethodJava = 0; 1520 int nmethodNative = 0; 1521 int max_nm_size = 0; 1522 ResourceMark rm; 1523 1524 int i = 0; 1525 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1526 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1527 tty->print_cr("-- %s --", (*heap)->name()); 1528 } 1529 FOR_ALL_BLOBS(cb, *heap) { 1530 total++; 1531 if (cb->is_nmethod()) { 1532 nmethod* nm = (nmethod*)cb; 1533 1534 if (Verbose && nm->method() != nullptr) { 1535 ResourceMark rm; 1536 char *method_name = nm->method()->name_and_sig_as_C_string(); 1537 tty->print("%s", method_name); 1538 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1539 } 1540 1541 nmethodCount++; 1542 1543 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1544 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; } 1545 1546 if(nm->method() != nullptr && nm->is_java_method()) { 1547 nmethodJava++; 1548 max_nm_size = MAX2(max_nm_size, nm->size()); 1549 } 1550 } else if (cb->is_runtime_stub()) { 1551 runtimeStubCount++; 1552 } else if (cb->is_deoptimization_stub()) { 1553 deoptimizationStubCount++; 1554 } else if (cb->is_uncommon_trap_stub()) { 1555 uncommonTrapStubCount++; 1556 } else if (cb->is_adapter_blob()) { 1557 adapterCount++; 1558 } else if (cb->is_buffer_blob()) { 1559 bufferBlobCount++; 1560 } 1561 } 1562 } 1563 1564 int bucketSize = 512; 1565 int bucketLimit = max_nm_size / bucketSize + 1; 1566 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1567 memset(buckets, 0, sizeof(int) * bucketLimit); 1568 1569 NMethodIterator iter(NMethodIterator::all); 1570 while(iter.next()) { 1571 nmethod* nm = iter.method(); 1572 if(nm->method() != nullptr && nm->is_java_method()) { 1573 buckets[nm->size() / bucketSize]++; 1574 } 1575 } 1576 1577 tty->print_cr("Code Cache Entries (total of %d)",total); 1578 tty->print_cr("-------------------------------------------------"); 1579 tty->print_cr("nmethods: %d",nmethodCount); 1580 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1581 tty->print_cr("\tjava: %d",nmethodJava); 1582 tty->print_cr("\tnative: %d",nmethodNative); 1583 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1584 tty->print_cr("adapters: %d",adapterCount); 1585 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1586 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1587 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1588 tty->print_cr("\nnmethod size distribution"); 1589 tty->print_cr("-------------------------------------------------"); 1590 1591 for(int i=0; i<bucketLimit; i++) { 1592 if(buckets[i] != 0) { 1593 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1594 tty->fill_to(40); 1595 tty->print_cr("%d",buckets[i]); 1596 } 1597 } 1598 1599 FREE_C_HEAP_ARRAY(int, buckets); 1600 print_memory_overhead(); 1601 } 1602 1603 #endif // !PRODUCT 1604 1605 void CodeCache::print() { 1606 print_summary(tty); 1607 1608 #ifndef PRODUCT 1609 if (!Verbose) return; 1610 1611 CodeBlob_sizes live[CompLevel_full_optimization + 1]; 1612 CodeBlob_sizes runtimeStub; 1613 CodeBlob_sizes uncommonTrapStub; 1614 CodeBlob_sizes deoptimizationStub; 1615 CodeBlob_sizes adapter; 1616 CodeBlob_sizes bufferBlob; 1617 CodeBlob_sizes other; 1618 1619 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1620 FOR_ALL_BLOBS(cb, *heap) { 1621 if (cb->is_nmethod()) { 1622 const int level = cb->as_nmethod()->comp_level(); 1623 assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level"); 1624 live[level].add(cb); 1625 } else if (cb->is_runtime_stub()) { 1626 runtimeStub.add(cb); 1627 } else if (cb->is_deoptimization_stub()) { 1628 deoptimizationStub.add(cb); 1629 } else if (cb->is_uncommon_trap_stub()) { 1630 uncommonTrapStub.add(cb); 1631 } else if (cb->is_adapter_blob()) { 1632 adapter.add(cb); 1633 } else if (cb->is_buffer_blob()) { 1634 bufferBlob.add(cb); 1635 } else { 1636 other.add(cb); 1637 } 1638 } 1639 } 1640 1641 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1642 1643 tty->print_cr("nmethod blobs per compilation level:"); 1644 for (int i = 0; i <= CompLevel_full_optimization; i++) { 1645 const char *level_name; 1646 switch (i) { 1647 case CompLevel_none: level_name = "none"; break; 1648 case CompLevel_simple: level_name = "simple"; break; 1649 case CompLevel_limited_profile: level_name = "limited profile"; break; 1650 case CompLevel_full_profile: level_name = "full profile"; break; 1651 case CompLevel_full_optimization: level_name = "full optimization"; break; 1652 default: assert(false, "invalid compilation level"); 1653 } 1654 tty->print_cr("%s:", level_name); 1655 live[i].print("live"); 1656 } 1657 1658 struct { 1659 const char* name; 1660 const CodeBlob_sizes* sizes; 1661 } non_nmethod_blobs[] = { 1662 { "runtime", &runtimeStub }, 1663 { "uncommon trap", &uncommonTrapStub }, 1664 { "deoptimization", &deoptimizationStub }, 1665 { "adapter", &adapter }, 1666 { "buffer blob", &bufferBlob }, 1667 { "other", &other }, 1668 }; 1669 tty->print_cr("Non-nmethod blobs:"); 1670 for (auto& blob: non_nmethod_blobs) { 1671 blob.sizes->print(blob.name); 1672 } 1673 1674 if (WizardMode) { 1675 // print the oop_map usage 1676 int code_size = 0; 1677 int number_of_blobs = 0; 1678 int number_of_oop_maps = 0; 1679 int map_size = 0; 1680 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1681 FOR_ALL_BLOBS(cb, *heap) { 1682 number_of_blobs++; 1683 code_size += cb->code_size(); 1684 ImmutableOopMapSet* set = cb->oop_maps(); 1685 if (set != nullptr) { 1686 number_of_oop_maps += set->count(); 1687 map_size += set->nr_of_bytes(); 1688 } 1689 } 1690 } 1691 tty->print_cr("OopMaps"); 1692 tty->print_cr(" #blobs = %d", number_of_blobs); 1693 tty->print_cr(" code size = %d", code_size); 1694 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1695 tty->print_cr(" map size = %d", map_size); 1696 } 1697 1698 #endif // !PRODUCT 1699 } 1700 1701 void CodeCache::print_summary(outputStream* st, bool detailed) { 1702 int full_count = 0; 1703 julong total_used = 0; 1704 julong total_max_used = 0; 1705 julong total_free = 0; 1706 julong total_size = 0; 1707 FOR_ALL_HEAPS(heap_iterator) { 1708 CodeHeap* heap = (*heap_iterator); 1709 size_t total = (heap->high_boundary() - heap->low_boundary()); 1710 if (_heaps->length() >= 1) { 1711 st->print("%s:", heap->name()); 1712 } else { 1713 st->print("CodeCache:"); 1714 } 1715 size_t size = total/K; 1716 size_t used = (total - heap->unallocated_capacity())/K; 1717 size_t max_used = heap->max_allocated_capacity()/K; 1718 size_t free = heap->unallocated_capacity()/K; 1719 total_size += size; 1720 total_used += used; 1721 total_max_used += max_used; 1722 total_free += free; 1723 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1724 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1725 size, used, max_used, free); 1726 1727 if (detailed) { 1728 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1729 p2i(heap->low_boundary()), 1730 p2i(heap->high()), 1731 p2i(heap->high_boundary())); 1732 1733 full_count += get_codemem_full_count(heap->code_blob_type()); 1734 } 1735 } 1736 1737 if (detailed) { 1738 if (SegmentedCodeCache) { 1739 st->print("CodeCache:"); 1740 st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT 1741 "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb", 1742 total_size, total_used, total_max_used, total_free); 1743 } 1744 st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT 1745 ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT, 1746 blob_count(), nmethod_count(), adapter_count(), full_count); 1747 st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d", 1748 CompileBroker::should_compile_new_jobs() ? 1749 "enabled" : Arguments::mode() == Arguments::_int ? 1750 "disabled (interpreter mode)" : 1751 "disabled (not enough contiguous free space left)", 1752 CompileBroker::get_total_compiler_stopped_count(), 1753 CompileBroker::get_total_compiler_restarted_count()); 1754 } 1755 } 1756 1757 void CodeCache::print_codelist(outputStream* st) { 1758 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1759 1760 NMethodIterator iter(NMethodIterator::not_unloading); 1761 while (iter.next()) { 1762 nmethod* nm = iter.method(); 1763 ResourceMark rm; 1764 char* method_name = nm->method()->name_and_sig_as_C_string(); 1765 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1766 nm->compile_id(), nm->comp_level(), nm->get_state(), 1767 method_name, 1768 (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1769 } 1770 } 1771 1772 void CodeCache::print_layout(outputStream* st) { 1773 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1774 ResourceMark rm; 1775 print_summary(st, true); 1776 } 1777 1778 void CodeCache::log_state(outputStream* st) { 1779 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1780 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1781 blob_count(), nmethod_count(), adapter_count(), 1782 unallocated_capacity()); 1783 } 1784 1785 #ifdef LINUX 1786 void CodeCache::write_perf_map(const char* filename) { 1787 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1788 1789 // Perf expects to find the map file at /tmp/perf-<pid>.map 1790 // if the file name is not specified. 1791 char fname[32]; 1792 if (filename == nullptr) { 1793 jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id()); 1794 filename = fname; 1795 } 1796 1797 fileStream fs(filename, "w"); 1798 if (!fs.is_open()) { 1799 log_warning(codecache)("Failed to create %s for perf map", filename); 1800 return; 1801 } 1802 1803 AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading); 1804 while (iter.next()) { 1805 CodeBlob *cb = iter.method(); 1806 ResourceMark rm; 1807 const char* method_name = 1808 cb->is_nmethod() ? cb->as_nmethod()->method()->external_name() 1809 : cb->name(); 1810 fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s", 1811 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(), 1812 method_name); 1813 } 1814 } 1815 #endif // LINUX 1816 1817 //---< BEGIN >--- CodeHeap State Analytics. 1818 1819 void CodeCache::aggregate(outputStream *out, size_t granularity) { 1820 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1821 CodeHeapState::aggregate(out, (*heap), granularity); 1822 } 1823 } 1824 1825 void CodeCache::discard(outputStream *out) { 1826 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1827 CodeHeapState::discard(out, (*heap)); 1828 } 1829 } 1830 1831 void CodeCache::print_usedSpace(outputStream *out) { 1832 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1833 CodeHeapState::print_usedSpace(out, (*heap)); 1834 } 1835 } 1836 1837 void CodeCache::print_freeSpace(outputStream *out) { 1838 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1839 CodeHeapState::print_freeSpace(out, (*heap)); 1840 } 1841 } 1842 1843 void CodeCache::print_count(outputStream *out) { 1844 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1845 CodeHeapState::print_count(out, (*heap)); 1846 } 1847 } 1848 1849 void CodeCache::print_space(outputStream *out) { 1850 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1851 CodeHeapState::print_space(out, (*heap)); 1852 } 1853 } 1854 1855 void CodeCache::print_age(outputStream *out) { 1856 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1857 CodeHeapState::print_age(out, (*heap)); 1858 } 1859 } 1860 1861 void CodeCache::print_names(outputStream *out) { 1862 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1863 CodeHeapState::print_names(out, (*heap)); 1864 } 1865 } 1866 //---< END >--- CodeHeap State Analytics.