1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/dependencyContext.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compilationPolicy.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/barrierSetNMethod.hpp"
38 #include "gc/shared/classUnloadingContext.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "jfr/jfrEvents.hpp"
41 #include "jvm_io.h"
42 #include "logging/log.hpp"
43 #include "logging/logStream.hpp"
44 #include "memory/allocation.inline.hpp"
45 #include "memory/iterator.hpp"
46 #include "memory/memoryReserver.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/method.inline.hpp"
50 #include "oops/objArrayOop.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/verifyOopClosure.hpp"
53 #include "runtime/arguments.hpp"
54 #include "runtime/atomic.hpp"
55 #include "runtime/deoptimization.hpp"
56 #include "runtime/globals_extension.hpp"
57 #include "runtime/handles.inline.hpp"
58 #include "runtime/icache.hpp"
59 #include "runtime/init.hpp"
60 #include "runtime/java.hpp"
61 #include "runtime/mutexLocker.hpp"
62 #include "runtime/os.inline.hpp"
63 #include "runtime/safepointVerifiers.hpp"
64 #include "runtime/vmThread.hpp"
65 #include "sanitizers/leak.hpp"
66 #include "services/memoryService.hpp"
67 #include "utilities/align.hpp"
68 #include "utilities/vmError.hpp"
69 #include "utilities/xmlstream.hpp"
70 #ifdef COMPILER1
71 #include "c1/c1_Compilation.hpp"
72 #include "c1/c1_Compiler.hpp"
73 #endif
74 #ifdef COMPILER2
75 #include "opto/c2compiler.hpp"
76 #include "opto/compile.hpp"
77 #include "opto/node.hpp"
78 #endif
79
80 // Helper class for printing in CodeCache
81 class CodeBlob_sizes {
82 private:
83 int count;
84 int total_size;
85 int header_size;
86 int code_size;
87 int stub_size;
88 int relocation_size;
89 int scopes_oop_size;
90 int scopes_metadata_size;
91 int scopes_data_size;
92 int scopes_pcs_size;
93
94 public:
95 CodeBlob_sizes() {
96 count = 0;
97 total_size = 0;
98 header_size = 0;
99 code_size = 0;
100 stub_size = 0;
101 relocation_size = 0;
102 scopes_oop_size = 0;
103 scopes_metadata_size = 0;
104 scopes_data_size = 0;
105 scopes_pcs_size = 0;
106 }
107
108 int total() const { return total_size; }
109 bool is_empty() const { return count == 0; }
110
111 void print(const char* title) const {
112 if (is_empty()) {
113 tty->print_cr(" #%d %s = %dK",
114 count,
115 title,
116 total() / (int)K);
117 } else {
118 tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
119 count,
120 title,
121 total() / (int)K,
122 header_size / (int)K,
123 header_size * 100 / total_size,
124 relocation_size / (int)K,
125 relocation_size * 100 / total_size,
126 code_size / (int)K,
127 code_size * 100 / total_size,
128 stub_size / (int)K,
129 stub_size * 100 / total_size,
130 scopes_oop_size / (int)K,
131 scopes_oop_size * 100 / total_size,
132 scopes_metadata_size / (int)K,
133 scopes_metadata_size * 100 / total_size,
134 scopes_data_size / (int)K,
135 scopes_data_size * 100 / total_size,
136 scopes_pcs_size / (int)K,
137 scopes_pcs_size * 100 / total_size);
138 }
139 }
140
141 void add(CodeBlob* cb) {
142 count++;
143 total_size += cb->size();
144 header_size += cb->header_size();
145 relocation_size += cb->relocation_size();
146 if (cb->is_nmethod()) {
147 nmethod* nm = cb->as_nmethod_or_null();
148 code_size += nm->insts_size();
149 stub_size += nm->stub_size();
150
151 scopes_oop_size += nm->oops_size();
152 scopes_metadata_size += nm->metadata_size();
153 scopes_data_size += nm->scopes_data_size();
154 scopes_pcs_size += nm->scopes_pcs_size();
155 } else {
156 code_size += cb->code_size();
157 }
158 }
159 };
160
161 // Iterate over all CodeHeaps
162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
164
165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
167
168 address CodeCache::_low_bound = nullptr;
169 address CodeCache::_high_bound = nullptr;
170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
172
173 // Initialize arrays of CodeHeap subsets
174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
177
178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
179 if (size < required_size) {
180 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
181 codeheap, size/K, required_size/K);
182 err_msg title("Not enough space in %s to run VM", codeheap);
183 err_msg message("%zuK < %zuK", size/K, required_size/K);
184 vm_exit_during_initialization(title, message);
185 }
186 }
187
188 struct CodeHeapInfo {
189 size_t size;
190 bool set;
191 bool enabled;
192 };
193
194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
195 assert(!heap->set, "sanity");
196 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
197 }
198
199 void CodeCache::initialize_heaps() {
200
201 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
202 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
203 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
204
205 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
206 const size_t ps = page_size(false, 8);
207 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
208 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
209 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
210
211 // Prerequisites
212 if (!heap_available(CodeBlobType::MethodProfiled)) {
213 // For compatibility reasons, disabled tiered compilation overrides
214 // segment size even if it is set explicitly.
215 non_profiled.size += profiled.size;
216 // Profiled code heap is not available, forcibly set size to 0
217 profiled.size = 0;
218 profiled.set = true;
219 profiled.enabled = false;
220 }
221
222 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
223
224 size_t compiler_buffer_size = 0;
225 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
226 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
227
228 if (!non_nmethod.set) {
229 non_nmethod.size += compiler_buffer_size;
230 // Further down, just before FLAG_SET_ERGO(), all segment sizes are
231 // aligned down to the next lower multiple of min_size. For large page
232 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
233 // Therefore, force non_nmethod.size to at least min_size.
234 non_nmethod.size = MAX2(non_nmethod.size, min_size);
235 }
236
237 if (!profiled.set && !non_profiled.set) {
238 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
239 (cache_size - non_nmethod.size) / 2 : min_size;
240 }
241
242 if (profiled.set && !non_profiled.set) {
243 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
244 }
245
246 if (!profiled.set && non_profiled.set) {
247 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
248 }
249
250 // Compatibility.
251 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
252 if (!non_nmethod.set && profiled.set && non_profiled.set) {
253 set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
254 }
255
256 size_t total = non_nmethod.size + profiled.size + non_profiled.size;
257 if (total != cache_size && !cache_size_set) {
258 log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
259 "%zuK NonProfiled %zuK Profiled %zuK = %zuK",
260 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
261 // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
262 cache_size = total;
263 }
264
265 log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK"
266 " NonProfiled %zuK Profiled %zuK",
267 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
268
269 // Validation
270 // Check minimal required sizes
271 check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
272 if (profiled.enabled) {
273 check_min_size("profiled code heap", profiled.size, min_size);
274 }
275 if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
276 check_min_size("non-profiled code heap", non_profiled.size, min_size);
277 }
278 if (cache_size_set) {
279 check_min_size("reserved code cache", cache_size, min_cache_size);
280 }
281
282 // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
283 if (total != cache_size && cache_size_set) {
284 err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K);
285 if (profiled.enabled) {
286 message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K);
287 }
288 if (non_profiled.enabled) {
289 message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K);
290 }
291 message.append(" = %zuK", total/K);
292 message.append((total > cache_size) ? " is greater than " : " is less than ");
293 message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
294
295 vm_exit_during_initialization("Invalid code heap sizes", message);
296 }
297
298 // Compatibility. Print warning if using large pages but not able to use the size given
299 if (UseLargePages) {
300 const size_t lg_ps = page_size(false, 1);
301 if (ps < lg_ps) {
302 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
303 "Reverting to smaller page size (" PROPERFMT ").",
304 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
305 }
306 }
307
308 // Note: if large page support is enabled, min_size is at least the large
309 // page size. This ensures that the code cache is covered by large pages.
310 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
311 non_profiled.size += profiled.size & alignment_mask(min_size);
312 non_nmethod.size = align_down(non_nmethod.size, min_size);
313 profiled.size = align_down(profiled.size, min_size);
314 non_profiled.size = align_down(non_profiled.size, min_size);
315
316 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
317 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
318 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
319 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
320
321 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
322
323 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
324 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
325
326 size_t offset = 0;
327 if (profiled.enabled) {
328 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
329 offset += profiled.size;
330 // Tier 2 and tier 3 (profiled) methods
331 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
332 }
333
334 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
335 offset += non_nmethod.size;
336 // Non-nmethods (stubs, adapters, ...)
337 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
338
339 if (non_profiled.enabled) {
340 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
341 // Tier 1 and tier 4 (non-profiled) methods and native methods
342 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
343 }
344 }
345
346 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
347 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
348 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
349 }
350
351 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
352 // Align and reserve space for code cache
353 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
354 const size_t rs_size = align_up(size, rs_align);
355
356 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
357 if (!rs.is_reserved()) {
358 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
359 rs_size/K));
360 }
361
362 // Initialize bounds
363 _low_bound = (address)rs.base();
364 _high_bound = _low_bound + rs.size();
365 return rs;
366 }
367
368 // Heaps available for allocation
369 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
370 if (!SegmentedCodeCache) {
371 // No segmentation: use a single code heap
372 return (code_blob_type == CodeBlobType::All);
373 } else if (CompilerConfig::is_interpreter_only()) {
374 // Interpreter only: we don't need any method code heaps
375 return (code_blob_type == CodeBlobType::NonNMethod);
376 } else if (CompilerConfig::is_c1_profiling()) {
377 // Tiered compilation: use all code heaps
378 return (code_blob_type < CodeBlobType::All);
379 } else {
380 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
381 return (code_blob_type == CodeBlobType::NonNMethod) ||
382 (code_blob_type == CodeBlobType::MethodNonProfiled);
383 }
384 }
385
386 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
387 switch(code_blob_type) {
388 case CodeBlobType::NonNMethod:
389 return "NonNMethodCodeHeapSize";
390 break;
391 case CodeBlobType::MethodNonProfiled:
392 return "NonProfiledCodeHeapSize";
393 break;
394 case CodeBlobType::MethodProfiled:
395 return "ProfiledCodeHeapSize";
396 break;
397 default:
398 ShouldNotReachHere();
399 return nullptr;
400 }
401 }
402
403 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
404 if (lhs->code_blob_type() == rhs->code_blob_type()) {
405 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
406 } else {
407 return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
408 }
409 }
410
411 void CodeCache::add_heap(CodeHeap* heap) {
412 assert(!Universe::is_fully_initialized(), "late heap addition?");
413
414 _heaps->insert_sorted<code_heap_compare>(heap);
415
416 CodeBlobType type = heap->code_blob_type();
417 if (code_blob_type_accepts_nmethod(type)) {
418 _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
419 }
420 if (code_blob_type_accepts_allocable(type)) {
421 _allocable_heaps->insert_sorted<code_heap_compare>(heap);
422 }
423 }
424
425 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
426 // Check if heap is needed
427 if (!heap_available(code_blob_type)) {
428 return;
429 }
430
431 // Create CodeHeap
432 CodeHeap* heap = new CodeHeap(name, code_blob_type);
433 add_heap(heap);
434
435 // Reserve Space
436 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
437 size_initial = align_up(size_initial, rs.page_size());
438 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
439 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)",
440 heap->name(), size_initial/K));
441 }
442
443 // Register the CodeHeap
444 MemoryService::add_code_heap_memory_pool(heap, name);
445 }
446
447 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
448 FOR_ALL_HEAPS(heap) {
449 if ((*heap)->contains(start)) {
450 return *heap;
451 }
452 }
453 return nullptr;
454 }
455
456 CodeHeap* CodeCache::get_code_heap(const void* cb) {
457 assert(cb != nullptr, "CodeBlob is null");
458 FOR_ALL_HEAPS(heap) {
459 if ((*heap)->contains(cb)) {
460 return *heap;
461 }
462 }
463 ShouldNotReachHere();
464 return nullptr;
465 }
466
467 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
468 FOR_ALL_HEAPS(heap) {
469 if ((*heap)->accepts(code_blob_type)) {
470 return *heap;
471 }
472 }
473 return nullptr;
474 }
475
476 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
477 assert_locked_or_safepoint(CodeCache_lock);
478 assert(heap != nullptr, "heap is null");
479 return (CodeBlob*)heap->first();
480 }
481
482 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
483 if (heap_available(code_blob_type)) {
484 return first_blob(get_code_heap(code_blob_type));
485 } else {
486 return nullptr;
487 }
488 }
489
490 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
491 assert_locked_or_safepoint(CodeCache_lock);
492 assert(heap != nullptr, "heap is null");
493 return (CodeBlob*)heap->next(cb);
494 }
495
496 /**
497 * Do not seize the CodeCache lock here--if the caller has not
498 * already done so, we are going to lose bigtime, since the code
499 * cache will contain a garbage CodeBlob until the caller can
500 * run the constructor for the CodeBlob subclass he is busy
501 * instantiating.
502 */
503 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
504 assert_locked_or_safepoint(CodeCache_lock);
505 assert(size > 0, "Code cache allocation request must be > 0");
506 if (size == 0) {
507 return nullptr;
508 }
509 CodeBlob* cb = nullptr;
510
511 // Get CodeHeap for the given CodeBlobType
512 CodeHeap* heap = get_code_heap(code_blob_type);
513 assert(heap != nullptr, "heap is null");
514
515 while (true) {
516 cb = (CodeBlob*)heap->allocate(size);
517 if (cb != nullptr) break;
518 if (!heap->expand_by(CodeCacheExpansionSize)) {
519 // Save original type for error reporting
520 if (orig_code_blob_type == CodeBlobType::All) {
521 orig_code_blob_type = code_blob_type;
522 }
523 // Expansion failed
524 if (SegmentedCodeCache) {
525 // Fallback solution: Try to store code in another code heap.
526 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
527 CodeBlobType type = code_blob_type;
528 switch (type) {
529 case CodeBlobType::NonNMethod:
530 type = CodeBlobType::MethodNonProfiled;
531 break;
532 case CodeBlobType::MethodNonProfiled:
533 type = CodeBlobType::MethodProfiled;
534 break;
535 case CodeBlobType::MethodProfiled:
536 // Avoid loop if we already tried that code heap
537 if (type == orig_code_blob_type) {
538 type = CodeBlobType::MethodNonProfiled;
539 }
540 break;
541 default:
542 break;
543 }
544 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
545 if (PrintCodeCacheExtension) {
546 tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
547 heap->name(), get_code_heap(type)->name());
548 }
549 return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
550 }
551 }
552 if (handle_alloc_failure) {
553 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
554 CompileBroker::handle_full_code_cache(orig_code_blob_type);
555 }
556 return nullptr;
557 } else {
558 OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
559 }
560 if (PrintCodeCacheExtension) {
561 ResourceMark rm;
562 if (_nmethod_heaps->length() >= 1) {
563 tty->print("%s", heap->name());
564 } else {
565 tty->print("CodeCache");
566 }
567 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
568 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
569 (address)heap->high() - (address)heap->low_boundary());
570 }
571 }
572 print_trace("allocation", cb, size);
573 return cb;
574 }
575
576 void CodeCache::free(CodeBlob* cb) {
577 assert_locked_or_safepoint(CodeCache_lock);
578 CodeHeap* heap = get_code_heap(cb);
579 print_trace("free", cb);
580 if (cb->is_nmethod()) {
581 heap->set_nmethod_count(heap->nmethod_count() - 1);
582 if (((nmethod *)cb)->has_dependencies()) {
583 Atomic::dec(&_number_of_nmethods_with_dependencies);
584 }
585 }
586 if (cb->is_adapter_blob()) {
587 heap->set_adapter_count(heap->adapter_count() - 1);
588 }
589
590 cb->~CodeBlob();
591 // Get heap for given CodeBlob and deallocate
592 heap->deallocate(cb);
593
594 assert(heap->blob_count() >= 0, "sanity check");
595 }
596
597 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
598 assert_locked_or_safepoint(CodeCache_lock);
599 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
600 print_trace("free_unused_tail", cb);
601
602 // We also have to account for the extra space (i.e. header) used by the CodeBlob
603 // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
604 used += CodeBlob::align_code_offset(cb->header_size());
605
606 // Get heap for given CodeBlob and deallocate its unused tail
607 get_code_heap(cb)->deallocate_tail(cb, used);
608 // Adjust the sizes of the CodeBlob
609 cb->adjust_size(used);
610 }
611
612 void CodeCache::commit(CodeBlob* cb) {
613 // this is called by nmethod::nmethod, which must already own CodeCache_lock
614 assert_locked_or_safepoint(CodeCache_lock);
615 CodeHeap* heap = get_code_heap(cb);
616 if (cb->is_nmethod()) {
617 heap->set_nmethod_count(heap->nmethod_count() + 1);
618 if (((nmethod *)cb)->has_dependencies()) {
619 Atomic::inc(&_number_of_nmethods_with_dependencies);
620 }
621 }
622 if (cb->is_adapter_blob()) {
623 heap->set_adapter_count(heap->adapter_count() + 1);
624 }
625 }
626
627 bool CodeCache::contains(void *p) {
628 // S390 uses contains() in current_frame(), which is used before
629 // code cache initialization if NativeMemoryTracking=detail is set.
630 S390_ONLY(if (_heaps == nullptr) return false;)
631 // It should be ok to call contains without holding a lock.
632 FOR_ALL_HEAPS(heap) {
633 if ((*heap)->contains(p)) {
634 return true;
635 }
636 }
637 return false;
638 }
639
640 bool CodeCache::contains(nmethod *nm) {
641 return contains((void *)nm);
642 }
643
644 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
645 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
646 CodeBlob* CodeCache::find_blob(void* start) {
647 // NMT can walk the stack before code cache is created
648 if (_heaps != nullptr) {
649 CodeHeap* heap = get_code_heap_containing(start);
650 if (heap != nullptr) {
651 return heap->find_blob(start);
652 }
653 }
654 return nullptr;
655 }
656
657 nmethod* CodeCache::find_nmethod(void* start) {
658 CodeBlob* cb = find_blob(start);
659 assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
660 return (nmethod*)cb;
661 }
662
663 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
664 assert_locked_or_safepoint(CodeCache_lock);
665 FOR_ALL_HEAPS(heap) {
666 FOR_ALL_BLOBS(cb, *heap) {
667 f(cb);
668 }
669 }
670 }
671
672 void CodeCache::nmethods_do(void f(nmethod* nm)) {
673 assert_locked_or_safepoint(CodeCache_lock);
674 NMethodIterator iter(NMethodIterator::all);
675 while(iter.next()) {
676 f(iter.method());
677 }
678 }
679
680 void CodeCache::nmethods_do(NMethodClosure* cl) {
681 assert_locked_or_safepoint(CodeCache_lock);
682 NMethodIterator iter(NMethodIterator::all);
683 while(iter.next()) {
684 cl->do_nmethod(iter.method());
685 }
686 }
687
688 void CodeCache::metadata_do(MetadataClosure* f) {
689 assert_locked_or_safepoint(CodeCache_lock);
690 NMethodIterator iter(NMethodIterator::all);
691 while(iter.next()) {
692 iter.method()->metadata_do(f);
693 }
694 }
695
696 // Calculate the number of GCs after which an nmethod is expected to have been
697 // used in order to not be classed as cold.
698 void CodeCache::update_cold_gc_count() {
699 if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
700 // No aging
701 return;
702 }
703
704 size_t last_used = _last_unloading_used;
705 double last_time = _last_unloading_time;
706
707 double time = os::elapsedTime();
708
709 size_t free = unallocated_capacity();
710 size_t max = max_capacity();
711 size_t used = max - free;
712 double gc_interval = time - last_time;
713
714 _unloading_threshold_gc_requested = false;
715 _last_unloading_time = time;
716 _last_unloading_used = used;
717
718 if (last_time == 0.0) {
719 // The first GC doesn't have enough information to make good
720 // decisions, so just keep everything afloat
721 log_info(codecache)("Unknown code cache pressure; don't age code");
722 return;
723 }
724
725 if (gc_interval <= 0.0 || last_used >= used) {
726 // Dodge corner cases where there is no pressure or negative pressure
727 // on the code cache. Just don't unload when this happens.
728 _cold_gc_count = INT_MAX;
729 log_info(codecache)("No code cache pressure; don't age code");
730 return;
731 }
732
733 double allocation_rate = (used - last_used) / gc_interval;
734
735 _unloading_allocation_rates.add(allocation_rate);
736 _unloading_gc_intervals.add(gc_interval);
737
738 size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
739 if (free < aggressive_sweeping_free_threshold) {
740 // We are already in the red zone; be very aggressive to avoid disaster
741 // But not more aggressive than 2. This ensures that an nmethod must
742 // have been unused at least between two GCs to be considered cold still.
743 _cold_gc_count = 2;
744 log_info(codecache)("Code cache critically low; use aggressive aging");
745 return;
746 }
747
748 // The code cache has an expected time for cold nmethods to "time out"
749 // when they have not been used. The time for nmethods to time out
750 // depends on how long we expect we can keep allocating code until
751 // aggressive sweeping starts, based on sampled allocation rates.
752 double average_gc_interval = _unloading_gc_intervals.avg();
753 double average_allocation_rate = _unloading_allocation_rates.avg();
754 double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
755 double cold_timeout = time_to_aggressive / NmethodSweepActivity;
756
757 // Convert time to GC cycles, and crop at INT_MAX. The reason for
758 // that is that the _cold_gc_count will be added to an epoch number
759 // and that addition must not overflow, or we can crash the VM.
760 // But not more aggressive than 2. This ensures that an nmethod must
761 // have been unused at least between two GCs to be considered cold still.
762 _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
763
764 double used_ratio = double(used) / double(max);
765 double last_used_ratio = double(last_used) / double(max);
766 log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
767 ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
768 average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
769 double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
770
771 }
772
773 uint64_t CodeCache::cold_gc_count() {
774 return _cold_gc_count;
775 }
776
777 void CodeCache::gc_on_allocation() {
778 if (!is_init_completed()) {
779 // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
780 return;
781 }
782
783 size_t free = unallocated_capacity();
784 size_t max = max_capacity();
785 size_t used = max - free;
786 double free_ratio = double(free) / double(max);
787 if (free_ratio <= StartAggressiveSweepingAt / 100.0) {
788 // In case the GC is concurrent, we make sure only one thread requests the GC.
789 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
790 log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
791 Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
792 }
793 return;
794 }
795
796 size_t last_used = _last_unloading_used;
797 if (last_used >= used) {
798 // No increase since last GC; no need to sweep yet
799 return;
800 }
801 size_t allocated_since_last = used - last_used;
802 double allocated_since_last_ratio = double(allocated_since_last) / double(max);
803 double threshold = SweeperThreshold / 100.0;
804 double used_ratio = double(used) / double(max);
805 double last_used_ratio = double(last_used) / double(max);
806 if (used_ratio > threshold) {
807 // After threshold is reached, scale it by free_ratio so that more aggressive
808 // GC is triggered as we approach code cache exhaustion
809 threshold *= free_ratio;
810 }
811 // If code cache has been allocated without any GC at all, let's make sure
812 // it is eventually invoked to avoid trouble.
813 if (allocated_since_last_ratio > threshold) {
814 // In case the GC is concurrent, we make sure only one thread requests the GC.
815 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
816 log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
817 threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
818 Universe::heap()->collect(GCCause::_codecache_GC_threshold);
819 }
820 }
821 }
822
823 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
824 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
825 //
826 // Odd values mean that marking is in progress, and even values mean that no
827 // marking is currently active.
828 uint64_t CodeCache::_gc_epoch = 2;
829
830 // How many GCs after an nmethod has not been used, do we consider it cold?
831 uint64_t CodeCache::_cold_gc_count = INT_MAX;
832
833 double CodeCache::_last_unloading_time = 0.0;
834 size_t CodeCache::_last_unloading_used = 0;
835 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
836 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
837 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
838
839 uint64_t CodeCache::gc_epoch() {
840 return _gc_epoch;
841 }
842
843 bool CodeCache::is_gc_marking_cycle_active() {
844 // Odd means that marking is active
845 return (_gc_epoch % 2) == 1;
846 }
847
848 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
849 if (is_gc_marking_cycle_active()) {
850 return _gc_epoch - 2;
851 } else {
852 return _gc_epoch - 1;
853 }
854 }
855
856 void CodeCache::on_gc_marking_cycle_start() {
857 assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
858 ++_gc_epoch;
859 }
860
861 // Once started the code cache marking cycle must only be finished after marking of
862 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
863 // if they have frames in continuation StackChunks that were not yet visited.
864 void CodeCache::on_gc_marking_cycle_finish() {
865 assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
866 ++_gc_epoch;
867 update_cold_gc_count();
868 }
869
870 void CodeCache::arm_all_nmethods() {
871 BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
872 }
873
874 // Mark nmethods for unloading if they contain otherwise unreachable oops.
875 void CodeCache::do_unloading(bool unloading_occurred) {
876 assert_locked_or_safepoint(CodeCache_lock);
877 NMethodIterator iter(NMethodIterator::all);
878 while(iter.next()) {
879 iter.method()->do_unloading(unloading_occurred);
880 }
881 }
882
883 void CodeCache::verify_clean_inline_caches() {
884 #ifdef ASSERT
885 if (!VerifyInlineCaches) return;
886 NMethodIterator iter(NMethodIterator::not_unloading);
887 while(iter.next()) {
888 nmethod* nm = iter.method();
889 nm->verify_clean_inline_caches();
890 nm->verify();
891 }
892 #endif
893 }
894
895 // Defer freeing of concurrently cleaned ExceptionCache entries until
896 // after a global handshake operation.
897 void CodeCache::release_exception_cache(ExceptionCache* entry) {
898 if (SafepointSynchronize::is_at_safepoint()) {
899 delete entry;
900 } else {
901 for (;;) {
902 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
903 entry->set_purge_list_next(purge_list_head);
904 if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
905 break;
906 }
907 }
908 }
909 }
910
911 // Delete exception caches that have been concurrently unlinked,
912 // followed by a global handshake operation.
913 void CodeCache::purge_exception_caches() {
914 ExceptionCache* curr = _exception_cache_purge_list;
915 while (curr != nullptr) {
916 ExceptionCache* next = curr->purge_list_next();
917 delete curr;
918 curr = next;
919 }
920 _exception_cache_purge_list = nullptr;
921 }
922
923 // Restart compiler if possible and required..
924 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
925
926 // Try to start the compiler again if we freed any memory
927 if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
928 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
929 log_info(codecache)("Restarting compiler");
930 EventJITRestart event;
931 event.set_freedMemory(freed_memory);
932 event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
933 event.commit();
934 }
935 }
936
937 uint8_t CodeCache::_unloading_cycle = 1;
938
939 void CodeCache::increment_unloading_cycle() {
940 // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
941 // 0 is reserved for new methods.
942 _unloading_cycle = (_unloading_cycle + 1) % 4;
943 if (_unloading_cycle == 0) {
944 _unloading_cycle = 1;
945 }
946 }
947
948 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
949 : _is_unloading_behaviour(is_alive)
950 {
951 _saved_behaviour = IsUnloadingBehaviour::current();
952 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
953 increment_unloading_cycle();
954 DependencyContext::cleaning_start();
955 }
956
957 CodeCache::UnlinkingScope::~UnlinkingScope() {
958 IsUnloadingBehaviour::set_current(_saved_behaviour);
959 DependencyContext::cleaning_end();
960 }
961
962 void CodeCache::verify_oops() {
963 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
964 VerifyOopClosure voc;
965 NMethodIterator iter(NMethodIterator::not_unloading);
966 while(iter.next()) {
967 nmethod* nm = iter.method();
968 nm->oops_do(&voc);
969 nm->verify_oop_relocations();
970 }
971 }
972
973 int CodeCache::blob_count(CodeBlobType code_blob_type) {
974 CodeHeap* heap = get_code_heap(code_blob_type);
975 return (heap != nullptr) ? heap->blob_count() : 0;
976 }
977
978 int CodeCache::blob_count() {
979 int count = 0;
980 FOR_ALL_HEAPS(heap) {
981 count += (*heap)->blob_count();
982 }
983 return count;
984 }
985
986 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
987 CodeHeap* heap = get_code_heap(code_blob_type);
988 return (heap != nullptr) ? heap->nmethod_count() : 0;
989 }
990
991 int CodeCache::nmethod_count() {
992 int count = 0;
993 for (CodeHeap* heap : *_nmethod_heaps) {
994 count += heap->nmethod_count();
995 }
996 return count;
997 }
998
999 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1000 CodeHeap* heap = get_code_heap(code_blob_type);
1001 return (heap != nullptr) ? heap->adapter_count() : 0;
1002 }
1003
1004 int CodeCache::adapter_count() {
1005 int count = 0;
1006 FOR_ALL_HEAPS(heap) {
1007 count += (*heap)->adapter_count();
1008 }
1009 return count;
1010 }
1011
1012 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1013 CodeHeap* heap = get_code_heap(code_blob_type);
1014 return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1015 }
1016
1017 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1018 CodeHeap* heap = get_code_heap(code_blob_type);
1019 return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1020 }
1021
1022 size_t CodeCache::capacity() {
1023 size_t cap = 0;
1024 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1025 cap += (*heap)->capacity();
1026 }
1027 return cap;
1028 }
1029
1030 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1031 CodeHeap* heap = get_code_heap(code_blob_type);
1032 return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1033 }
1034
1035 size_t CodeCache::unallocated_capacity() {
1036 size_t unallocated_cap = 0;
1037 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1038 unallocated_cap += (*heap)->unallocated_capacity();
1039 }
1040 return unallocated_cap;
1041 }
1042
1043 size_t CodeCache::max_capacity() {
1044 size_t max_cap = 0;
1045 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1046 max_cap += (*heap)->max_capacity();
1047 }
1048 return max_cap;
1049 }
1050
1051 bool CodeCache::is_non_nmethod(address addr) {
1052 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1053 return blob->contains(addr);
1054 }
1055
1056 size_t CodeCache::max_distance_to_non_nmethod() {
1057 if (!SegmentedCodeCache) {
1058 return ReservedCodeCacheSize;
1059 } else {
1060 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1061 // the max distance is minimized by placing the NonNMethod segment
1062 // in between MethodProfiled and MethodNonProfiled segments
1063 size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1064 size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1065 return dist1 > dist2 ? dist1 : dist2;
1066 }
1067 }
1068
1069 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1070 // is free, reverse_free_ratio() returns 4.
1071 // Since code heap for each type of code blobs falls forward to the next
1072 // type of code heap, return the reverse free ratio for the entire
1073 // code cache.
1074 double CodeCache::reverse_free_ratio() {
1075 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1076 double max = (double)max_capacity();
1077 double result = max / unallocated;
1078 assert (max >= unallocated, "Must be");
1079 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1080 return result;
1081 }
1082
1083 size_t CodeCache::bytes_allocated_in_freelists() {
1084 size_t allocated_bytes = 0;
1085 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1086 allocated_bytes += (*heap)->allocated_in_freelist();
1087 }
1088 return allocated_bytes;
1089 }
1090
1091 int CodeCache::allocated_segments() {
1092 int number_of_segments = 0;
1093 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1094 number_of_segments += (*heap)->allocated_segments();
1095 }
1096 return number_of_segments;
1097 }
1098
1099 size_t CodeCache::freelists_length() {
1100 size_t length = 0;
1101 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1102 length += (*heap)->freelist_length();
1103 }
1104 return length;
1105 }
1106
1107 void icache_init();
1108
1109 void CodeCache::initialize() {
1110 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1111 #ifdef COMPILER2
1112 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
1113 #endif
1114 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
1115 // This was originally just a check of the alignment, causing failure, instead, round
1116 // the code cache to the page size. In particular, Solaris is moving to a larger
1117 // default page size.
1118 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1119
1120 if (SegmentedCodeCache) {
1121 // Use multiple code heaps
1122 initialize_heaps();
1123 } else {
1124 // Use a single code heap
1125 FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1126 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1127 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1128
1129 // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1130 // users want to use the largest available page.
1131 const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1132 ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1133 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1134 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1135 add_heap(rs, "CodeCache", CodeBlobType::All);
1136 }
1137
1138 // Initialize ICache flush mechanism
1139 // This service is needed for os::register_code_area
1140 icache_init();
1141
1142 // Give OS a chance to register generated code area.
1143 // This is used on Windows 64 bit platforms to register
1144 // Structured Exception Handlers for our generated code.
1145 os::register_code_area((char*)low_bound(), (char*)high_bound());
1146 }
1147
1148 void codeCache_init() {
1149 CodeCache::initialize();
1150 }
1151
1152 //------------------------------------------------------------------------------------------------
1153
1154 bool CodeCache::has_nmethods_with_dependencies() {
1155 return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1156 }
1157
1158 void CodeCache::clear_inline_caches() {
1159 assert_locked_or_safepoint(CodeCache_lock);
1160 NMethodIterator iter(NMethodIterator::not_unloading);
1161 while(iter.next()) {
1162 iter.method()->clear_inline_caches();
1163 }
1164 }
1165
1166 // Only used by whitebox API
1167 void CodeCache::cleanup_inline_caches_whitebox() {
1168 assert_locked_or_safepoint(CodeCache_lock);
1169 NMethodIterator iter(NMethodIterator::not_unloading);
1170 while(iter.next()) {
1171 iter.method()->cleanup_inline_caches_whitebox();
1172 }
1173 }
1174
1175 // Keeps track of time spent for checking dependencies
1176 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1177
1178 #ifndef PRODUCT
1179 // Check if any of live methods dependencies have been invalidated.
1180 // (this is expensive!)
1181 static void check_live_nmethods_dependencies(DepChange& changes) {
1182 // Checked dependencies are allocated into this ResourceMark
1183 ResourceMark rm;
1184
1185 // Turn off dependency tracing while actually testing dependencies.
1186 FlagSetting fs(Dependencies::_verify_in_progress, true);
1187
1188 typedef ResourceHashtable<DependencySignature, int, 11027,
1189 AnyObj::RESOURCE_AREA, mtInternal,
1190 &DependencySignature::hash,
1191 &DependencySignature::equals> DepTable;
1192
1193 DepTable* table = new DepTable();
1194
1195 // Iterate over live nmethods and check dependencies of all nmethods that are not
1196 // marked for deoptimization. A particular dependency is only checked once.
1197 NMethodIterator iter(NMethodIterator::not_unloading);
1198 while(iter.next()) {
1199 nmethod* nm = iter.method();
1200 // Only notify for live nmethods
1201 if (!nm->is_marked_for_deoptimization()) {
1202 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1203 // Construct abstraction of a dependency.
1204 DependencySignature* current_sig = new DependencySignature(deps);
1205
1206 // Determine if dependency is already checked. table->put(...) returns
1207 // 'true' if the dependency is added (i.e., was not in the hashtable).
1208 if (table->put(*current_sig, 1)) {
1209 if (deps.check_dependency() != nullptr) {
1210 // Dependency checking failed. Print out information about the failed
1211 // dependency and finally fail with an assert. We can fail here, since
1212 // dependency checking is never done in a product build.
1213 tty->print_cr("Failed dependency:");
1214 changes.print();
1215 nm->print();
1216 nm->print_dependencies_on(tty);
1217 assert(false, "Should have been marked for deoptimization");
1218 }
1219 }
1220 }
1221 }
1222 }
1223 }
1224 #endif
1225
1226 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1227 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1228
1229 // search the hierarchy looking for nmethods which are affected by the loading of this class
1230
1231 // then search the interfaces this class implements looking for nmethods
1232 // which might be dependent of the fact that an interface only had one
1233 // implementor.
1234 // nmethod::check_all_dependencies works only correctly, if no safepoint
1235 // can happen
1236 NoSafepointVerifier nsv;
1237 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1238 InstanceKlass* d = str.klass();
1239 d->mark_dependent_nmethods(deopt_scope, changes);
1240 }
1241
1242 #ifndef PRODUCT
1243 if (VerifyDependencies) {
1244 // Object pointers are used as unique identifiers for dependency arguments. This
1245 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1246 dependentCheckTime.start();
1247 check_live_nmethods_dependencies(changes);
1248 dependentCheckTime.stop();
1249 }
1250 #endif
1251 }
1252
1253 #if INCLUDE_JVMTI
1254 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1255 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1256 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1257
1258 static void add_to_old_table(nmethod* c) {
1259 if (old_nmethod_table == nullptr) {
1260 old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1261 }
1262 old_nmethod_table->push(c);
1263 }
1264
1265 static void reset_old_method_table() {
1266 if (old_nmethod_table != nullptr) {
1267 delete old_nmethod_table;
1268 old_nmethod_table = nullptr;
1269 }
1270 }
1271
1272 // Remove this method when flushed.
1273 void CodeCache::unregister_old_nmethod(nmethod* c) {
1274 assert_lock_strong(CodeCache_lock);
1275 if (old_nmethod_table != nullptr) {
1276 int index = old_nmethod_table->find(c);
1277 if (index != -1) {
1278 old_nmethod_table->delete_at(index);
1279 }
1280 }
1281 }
1282
1283 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1284 // Walk old method table and mark those on stack.
1285 int length = 0;
1286 if (old_nmethod_table != nullptr) {
1287 length = old_nmethod_table->length();
1288 for (int i = 0; i < length; i++) {
1289 // Walk all methods saved on the last pass. Concurrent class unloading may
1290 // also be looking at this method's metadata, so don't delete it yet if
1291 // it is marked as unloaded.
1292 old_nmethod_table->at(i)->metadata_do(f);
1293 }
1294 }
1295 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1296 }
1297
1298 // Walk compiled methods and mark dependent methods for deoptimization.
1299 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1300 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1301 // Each redefinition creates a new set of nmethods that have references to "old" Methods
1302 // So delete old method table and create a new one.
1303 reset_old_method_table();
1304
1305 NMethodIterator iter(NMethodIterator::all);
1306 while(iter.next()) {
1307 nmethod* nm = iter.method();
1308 // Walk all alive nmethods to check for old Methods.
1309 // This includes methods whose inline caches point to old methods, so
1310 // inline cache clearing is unnecessary.
1311 if (nm->has_evol_metadata()) {
1312 deopt_scope->mark(nm);
1313 add_to_old_table(nm);
1314 }
1315 }
1316 }
1317
1318 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1319 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1320 NMethodIterator iter(NMethodIterator::all);
1321 while(iter.next()) {
1322 nmethod* nm = iter.method();
1323 if (!nm->method()->is_method_handle_intrinsic()) {
1324 if (nm->can_be_deoptimized()) {
1325 deopt_scope->mark(nm);
1326 }
1327 if (nm->has_evol_metadata()) {
1328 add_to_old_table(nm);
1329 }
1330 }
1331 }
1332 }
1333
1334 #endif // INCLUDE_JVMTI
1335
1336 // Mark methods for deopt (if safe or possible).
1337 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1338 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1339 NMethodIterator iter(NMethodIterator::not_unloading);
1340 while(iter.next()) {
1341 nmethod* nm = iter.method();
1342 if (!nm->is_native_method()) {
1343 deopt_scope->mark(nm);
1344 }
1345 }
1346 }
1347
1348 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1349 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1350
1351 NMethodIterator iter(NMethodIterator::not_unloading);
1352 while(iter.next()) {
1353 nmethod* nm = iter.method();
1354 if (nm->is_dependent_on_method(dependee)) {
1355 deopt_scope->mark(nm);
1356 }
1357 }
1358 }
1359
1360 void CodeCache::make_marked_nmethods_deoptimized() {
1361 RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1362 while(iter.next()) {
1363 nmethod* nm = iter.method();
1364 if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1365 nm->make_not_entrant(nmethod::InvalidationReason::MARKED_FOR_DEOPTIMIZATION);
1366 nm->make_deoptimized();
1367 }
1368 }
1369 }
1370
1371 // Marks compiled methods dependent on dependee.
1372 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1373 assert_lock_strong(Compile_lock);
1374
1375 if (!has_nmethods_with_dependencies()) {
1376 return;
1377 }
1378
1379 if (dependee->is_linked()) {
1380 // Class initialization state change.
1381 KlassInitDepChange changes(dependee);
1382 mark_for_deoptimization(deopt_scope, changes);
1383 } else {
1384 // New class is loaded.
1385 NewKlassDepChange changes(dependee);
1386 mark_for_deoptimization(deopt_scope, changes);
1387 }
1388 }
1389
1390 // Marks compiled methods dependent on dependee
1391 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1392 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1393
1394 DeoptimizationScope deopt_scope;
1395 // Compute the dependent nmethods
1396 mark_for_deoptimization(&deopt_scope, m_h());
1397 deopt_scope.deoptimize_marked();
1398 }
1399
1400 void CodeCache::verify() {
1401 assert_locked_or_safepoint(CodeCache_lock);
1402 FOR_ALL_HEAPS(heap) {
1403 (*heap)->verify();
1404 FOR_ALL_BLOBS(cb, *heap) {
1405 cb->verify();
1406 }
1407 }
1408 }
1409
1410 // A CodeHeap is full. Print out warning and report event.
1411 PRAGMA_DIAG_PUSH
1412 PRAGMA_FORMAT_NONLITERAL_IGNORED
1413 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1414 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1415 CodeHeap* heap = get_code_heap(code_blob_type);
1416 assert(heap != nullptr, "heap is null");
1417
1418 int full_count = heap->report_full();
1419
1420 if ((full_count == 1) || print) {
1421 // Not yet reported for this heap, report
1422 if (SegmentedCodeCache) {
1423 ResourceMark rm;
1424 stringStream msg1_stream, msg2_stream;
1425 msg1_stream.print("%s is full. Compiler has been disabled.",
1426 get_code_heap_name(code_blob_type));
1427 msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1428 get_code_heap_flag_name(code_blob_type));
1429 const char *msg1 = msg1_stream.as_string();
1430 const char *msg2 = msg2_stream.as_string();
1431
1432 log_warning(codecache)("%s", msg1);
1433 log_warning(codecache)("%s", msg2);
1434 warning("%s", msg1);
1435 warning("%s", msg2);
1436 } else {
1437 const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1438 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1439
1440 log_warning(codecache)("%s", msg1);
1441 log_warning(codecache)("%s", msg2);
1442 warning("%s", msg1);
1443 warning("%s", msg2);
1444 }
1445 stringStream s;
1446 // Dump code cache into a buffer before locking the tty.
1447 {
1448 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1449 print_summary(&s);
1450 }
1451 {
1452 ttyLocker ttyl;
1453 tty->print("%s", s.freeze());
1454 }
1455
1456 if (full_count == 1) {
1457 if (PrintCodeHeapAnalytics) {
1458 CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1459 }
1460 }
1461 }
1462
1463 EventCodeCacheFull event;
1464 if (event.should_commit()) {
1465 event.set_codeBlobType((u1)code_blob_type);
1466 event.set_startAddress((u8)heap->low_boundary());
1467 event.set_commitedTopAddress((u8)heap->high());
1468 event.set_reservedTopAddress((u8)heap->high_boundary());
1469 event.set_entryCount(heap->blob_count());
1470 event.set_methodCount(heap->nmethod_count());
1471 event.set_adaptorCount(heap->adapter_count());
1472 event.set_unallocatedCapacity(heap->unallocated_capacity());
1473 event.set_fullCount(heap->full_count());
1474 event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1475 event.commit();
1476 }
1477 }
1478 PRAGMA_DIAG_POP
1479
1480 void CodeCache::print_memory_overhead() {
1481 size_t wasted_bytes = 0;
1482 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1483 CodeHeap* curr_heap = *heap;
1484 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1485 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1486 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1487 }
1488 }
1489 // Print bytes that are allocated in the freelist
1490 ttyLocker ttl;
1491 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1492 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1493 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1494 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1495 }
1496
1497 //------------------------------------------------------------------------------------------------
1498 // Non-product version
1499
1500 #ifndef PRODUCT
1501
1502 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1503 if (PrintCodeCache2) { // Need to add a new flag
1504 ResourceMark rm;
1505 if (size == 0) {
1506 int s = cb->size();
1507 assert(s >= 0, "CodeBlob size is negative: %d", s);
1508 size = (uint) s;
1509 }
1510 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1511 }
1512 }
1513
1514 void CodeCache::print_internals() {
1515 int nmethodCount = 0;
1516 int runtimeStubCount = 0;
1517 int upcallStubCount = 0;
1518 int adapterCount = 0;
1519 int mhAdapterCount = 0;
1520 int vtableBlobCount = 0;
1521 int deoptimizationStubCount = 0;
1522 int uncommonTrapStubCount = 0;
1523 int exceptionStubCount = 0;
1524 int safepointStubCount = 0;
1525 int bufferBlobCount = 0;
1526 int total = 0;
1527 int nmethodNotEntrant = 0;
1528 int nmethodJava = 0;
1529 int nmethodNative = 0;
1530 int max_nm_size = 0;
1531 ResourceMark rm;
1532
1533 int i = 0;
1534 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1535 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1536 tty->print_cr("-- %s --", (*heap)->name());
1537 }
1538 FOR_ALL_BLOBS(cb, *heap) {
1539 total++;
1540 if (cb->is_nmethod()) {
1541 nmethod* nm = (nmethod*)cb;
1542
1543 if (Verbose && nm->method() != nullptr) {
1544 ResourceMark rm;
1545 char *method_name = nm->method()->name_and_sig_as_C_string();
1546 tty->print("%s", method_name);
1547 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1548 }
1549
1550 nmethodCount++;
1551
1552 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1553 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1554
1555 if(nm->method() != nullptr && nm->is_java_method()) {
1556 nmethodJava++;
1557 max_nm_size = MAX2(max_nm_size, nm->size());
1558 }
1559 } else if (cb->is_runtime_stub()) {
1560 runtimeStubCount++;
1561 } else if (cb->is_upcall_stub()) {
1562 upcallStubCount++;
1563 } else if (cb->is_deoptimization_stub()) {
1564 deoptimizationStubCount++;
1565 } else if (cb->is_uncommon_trap_stub()) {
1566 uncommonTrapStubCount++;
1567 } else if (cb->is_exception_stub()) {
1568 exceptionStubCount++;
1569 } else if (cb->is_safepoint_stub()) {
1570 safepointStubCount++;
1571 } else if (cb->is_adapter_blob()) {
1572 adapterCount++;
1573 } else if (cb->is_method_handles_adapter_blob()) {
1574 mhAdapterCount++;
1575 } else if (cb->is_vtable_blob()) {
1576 vtableBlobCount++;
1577 } else if (cb->is_buffer_blob()) {
1578 bufferBlobCount++;
1579 }
1580 }
1581 }
1582
1583 int bucketSize = 512;
1584 int bucketLimit = max_nm_size / bucketSize + 1;
1585 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1586 memset(buckets, 0, sizeof(int) * bucketLimit);
1587
1588 NMethodIterator iter(NMethodIterator::all);
1589 while(iter.next()) {
1590 nmethod* nm = iter.method();
1591 if(nm->method() != nullptr && nm->is_java_method()) {
1592 buckets[nm->size() / bucketSize]++;
1593 }
1594 }
1595
1596 tty->print_cr("Code Cache Entries (total of %d)",total);
1597 tty->print_cr("-------------------------------------------------");
1598 tty->print_cr("nmethods: %d",nmethodCount);
1599 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1600 tty->print_cr("\tjava: %d",nmethodJava);
1601 tty->print_cr("\tnative: %d",nmethodNative);
1602 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1603 tty->print_cr("upcall_stubs: %d",upcallStubCount);
1604 tty->print_cr("adapters: %d",adapterCount);
1605 tty->print_cr("MH adapters: %d",mhAdapterCount);
1606 tty->print_cr("VTables: %d",vtableBlobCount);
1607 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1608 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1609 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1610 tty->print_cr("exception_stubs: %d",exceptionStubCount);
1611 tty->print_cr("safepoint_stubs: %d",safepointStubCount);
1612 tty->print_cr("\nnmethod size distribution");
1613 tty->print_cr("-------------------------------------------------");
1614
1615 for(int i=0; i<bucketLimit; i++) {
1616 if(buckets[i] != 0) {
1617 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1618 tty->fill_to(40);
1619 tty->print_cr("%d",buckets[i]);
1620 }
1621 }
1622
1623 FREE_C_HEAP_ARRAY(int, buckets);
1624 print_memory_overhead();
1625 }
1626
1627 #endif // !PRODUCT
1628
1629 void CodeCache::print() {
1630 print_summary(tty);
1631
1632 #ifndef PRODUCT
1633 if (!Verbose) return;
1634
1635 CodeBlob_sizes live[CompLevel_full_optimization + 1];
1636 CodeBlob_sizes runtimeStub;
1637 CodeBlob_sizes upcallStub;
1638 CodeBlob_sizes uncommonTrapStub;
1639 CodeBlob_sizes deoptimizationStub;
1640 CodeBlob_sizes exceptionStub;
1641 CodeBlob_sizes safepointStub;
1642 CodeBlob_sizes adapter;
1643 CodeBlob_sizes mhAdapter;
1644 CodeBlob_sizes vtableBlob;
1645 CodeBlob_sizes bufferBlob;
1646 CodeBlob_sizes other;
1647
1648 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1649 FOR_ALL_BLOBS(cb, *heap) {
1650 if (cb->is_nmethod()) {
1651 const int level = cb->as_nmethod()->comp_level();
1652 assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1653 live[level].add(cb);
1654 } else if (cb->is_runtime_stub()) {
1655 runtimeStub.add(cb);
1656 } else if (cb->is_upcall_stub()) {
1657 upcallStub.add(cb);
1658 } else if (cb->is_deoptimization_stub()) {
1659 deoptimizationStub.add(cb);
1660 } else if (cb->is_uncommon_trap_stub()) {
1661 uncommonTrapStub.add(cb);
1662 } else if (cb->is_exception_stub()) {
1663 exceptionStub.add(cb);
1664 } else if (cb->is_safepoint_stub()) {
1665 safepointStub.add(cb);
1666 } else if (cb->is_adapter_blob()) {
1667 adapter.add(cb);
1668 } else if (cb->is_method_handles_adapter_blob()) {
1669 mhAdapter.add(cb);
1670 } else if (cb->is_vtable_blob()) {
1671 vtableBlob.add(cb);
1672 } else if (cb->is_buffer_blob()) {
1673 bufferBlob.add(cb);
1674 } else {
1675 other.add(cb);
1676 }
1677 }
1678 }
1679
1680 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1681
1682 tty->print_cr("nmethod blobs per compilation level:");
1683 for (int i = 0; i <= CompLevel_full_optimization; i++) {
1684 const char *level_name;
1685 switch (i) {
1686 case CompLevel_none: level_name = "none"; break;
1687 case CompLevel_simple: level_name = "simple"; break;
1688 case CompLevel_limited_profile: level_name = "limited profile"; break;
1689 case CompLevel_full_profile: level_name = "full profile"; break;
1690 case CompLevel_full_optimization: level_name = "full optimization"; break;
1691 default: assert(false, "invalid compilation level");
1692 }
1693 tty->print_cr("%s:", level_name);
1694 live[i].print("live");
1695 }
1696
1697 struct {
1698 const char* name;
1699 const CodeBlob_sizes* sizes;
1700 } non_nmethod_blobs[] = {
1701 { "runtime", &runtimeStub },
1702 { "upcall", &upcallStub },
1703 { "uncommon trap", &uncommonTrapStub },
1704 { "deoptimization", &deoptimizationStub },
1705 { "exception", &exceptionStub },
1706 { "safepoint", &safepointStub },
1707 { "adapter", &adapter },
1708 { "mh_adapter", &mhAdapter },
1709 { "vtable", &vtableBlob },
1710 { "buffer blob", &bufferBlob },
1711 { "other", &other },
1712 };
1713 tty->print_cr("Non-nmethod blobs:");
1714 for (auto& blob: non_nmethod_blobs) {
1715 blob.sizes->print(blob.name);
1716 }
1717
1718 if (WizardMode) {
1719 // print the oop_map usage
1720 int code_size = 0;
1721 int number_of_blobs = 0;
1722 int number_of_oop_maps = 0;
1723 int map_size = 0;
1724 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1725 FOR_ALL_BLOBS(cb, *heap) {
1726 number_of_blobs++;
1727 code_size += cb->code_size();
1728 ImmutableOopMapSet* set = cb->oop_maps();
1729 if (set != nullptr) {
1730 number_of_oop_maps += set->count();
1731 map_size += set->nr_of_bytes();
1732 }
1733 }
1734 }
1735 tty->print_cr("OopMaps");
1736 tty->print_cr(" #blobs = %d", number_of_blobs);
1737 tty->print_cr(" code size = %d", code_size);
1738 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1739 tty->print_cr(" map size = %d", map_size);
1740 }
1741
1742 #endif // !PRODUCT
1743 }
1744
1745 void CodeCache::print_summary(outputStream* st, bool detailed) {
1746 int full_count = 0;
1747 julong total_used = 0;
1748 julong total_max_used = 0;
1749 julong total_free = 0;
1750 julong total_size = 0;
1751 FOR_ALL_HEAPS(heap_iterator) {
1752 CodeHeap* heap = (*heap_iterator);
1753 size_t total = (heap->high_boundary() - heap->low_boundary());
1754 if (_heaps->length() >= 1) {
1755 st->print("%s:", heap->name());
1756 } else {
1757 st->print("CodeCache:");
1758 }
1759 size_t size = total/K;
1760 size_t used = (total - heap->unallocated_capacity())/K;
1761 size_t max_used = heap->max_allocated_capacity()/K;
1762 size_t free = heap->unallocated_capacity()/K;
1763 total_size += size;
1764 total_used += used;
1765 total_max_used += max_used;
1766 total_free += free;
1767 st->print_cr(" size=%zuKb used=%zu"
1768 "Kb max_used=%zuKb free=%zuKb",
1769 size, used, max_used, free);
1770
1771 if (detailed) {
1772 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1773 p2i(heap->low_boundary()),
1774 p2i(heap->high()),
1775 p2i(heap->high_boundary()));
1776
1777 full_count += get_codemem_full_count(heap->code_blob_type());
1778 }
1779 }
1780
1781 if (detailed) {
1782 if (SegmentedCodeCache) {
1783 st->print("CodeCache:");
1784 st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1785 "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1786 total_size, total_used, total_max_used, total_free);
1787 }
1788 st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1789 ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1790 blob_count(), nmethod_count(), adapter_count(), full_count);
1791 st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1792 CompileBroker::should_compile_new_jobs() ?
1793 "enabled" : Arguments::mode() == Arguments::_int ?
1794 "disabled (interpreter mode)" :
1795 "disabled (not enough contiguous free space left)",
1796 CompileBroker::get_total_compiler_stopped_count(),
1797 CompileBroker::get_total_compiler_restarted_count());
1798 }
1799 }
1800
1801 void CodeCache::print_codelist(outputStream* st) {
1802 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1803
1804 NMethodIterator iter(NMethodIterator::not_unloading);
1805 while (iter.next()) {
1806 nmethod* nm = iter.method();
1807 ResourceMark rm;
1808 char* method_name = nm->method()->name_and_sig_as_C_string();
1809 const char* jvmci_name = nullptr;
1810 #if INCLUDE_JVMCI
1811 jvmci_name = nm->jvmci_name();
1812 #endif
1813 st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1814 nm->compile_id(), nm->comp_level(), nm->get_state(),
1815 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1816 (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1817 }
1818 }
1819
1820 void CodeCache::print_layout(outputStream* st) {
1821 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1822 ResourceMark rm;
1823 print_summary(st, true);
1824 }
1825
1826 void CodeCache::log_state(outputStream* st) {
1827 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1828 " adapters='" UINT32_FORMAT "' free_code_cache='%zu'",
1829 blob_count(), nmethod_count(), adapter_count(),
1830 unallocated_capacity());
1831 }
1832
1833 #ifdef LINUX
1834 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1835 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1836 char fname[JVM_MAXPATHLEN];
1837 if (filename == nullptr) {
1838 // Invocation outside of jcmd requires pid substitution.
1839 if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1840 strlen(DEFAULT_PERFMAP_FILENAME),
1841 fname, JVM_MAXPATHLEN)) {
1842 st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1843 return;
1844 }
1845 filename = fname;
1846 }
1847 fileStream fs(filename, "w");
1848 if (!fs.is_open()) {
1849 st->print_cr("Warning: Failed to create %s for perf map", filename);
1850 return;
1851 }
1852
1853 AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1854 while (iter.next()) {
1855 CodeBlob *cb = iter.method();
1856 ResourceMark rm;
1857 const char* method_name = nullptr;
1858 const char* jvmci_name = nullptr;
1859 if (cb->is_nmethod()) {
1860 nmethod* nm = cb->as_nmethod();
1861 method_name = nm->method()->external_name();
1862 #if INCLUDE_JVMCI
1863 jvmci_name = nm->jvmci_name();
1864 #endif
1865 } else {
1866 method_name = cb->name();
1867 }
1868 fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1869 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1870 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1871 }
1872 }
1873 #endif // LINUX
1874
1875 //---< BEGIN >--- CodeHeap State Analytics.
1876
1877 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1878 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1879 CodeHeapState::aggregate(out, (*heap), granularity);
1880 }
1881 }
1882
1883 void CodeCache::discard(outputStream *out) {
1884 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1885 CodeHeapState::discard(out, (*heap));
1886 }
1887 }
1888
1889 void CodeCache::print_usedSpace(outputStream *out) {
1890 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1891 CodeHeapState::print_usedSpace(out, (*heap));
1892 }
1893 }
1894
1895 void CodeCache::print_freeSpace(outputStream *out) {
1896 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1897 CodeHeapState::print_freeSpace(out, (*heap));
1898 }
1899 }
1900
1901 void CodeCache::print_count(outputStream *out) {
1902 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1903 CodeHeapState::print_count(out, (*heap));
1904 }
1905 }
1906
1907 void CodeCache::print_space(outputStream *out) {
1908 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1909 CodeHeapState::print_space(out, (*heap));
1910 }
1911 }
1912
1913 void CodeCache::print_age(outputStream *out) {
1914 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1915 CodeHeapState::print_age(out, (*heap));
1916 }
1917 }
1918
1919 void CodeCache::print_names(outputStream *out) {
1920 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1921 CodeHeapState::print_names(out, (*heap));
1922 }
1923 }
1924 //---< END >--- CodeHeap State Analytics.