1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCacheAccess.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/codeHeapState.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/dependencyContext.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSetNMethod.hpp"
39 #include "gc/shared/classUnloadingContext.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/allocation.inline.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/memoryReserver.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/method.inline.hpp"
51 #include "oops/objArrayOop.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "oops/verifyOopClosure.hpp"
54 #include "runtime/arguments.hpp"
55 #include "runtime/atomicAccess.hpp"
56 #include "runtime/deoptimization.hpp"
57 #include "runtime/globals_extension.hpp"
58 #include "runtime/handles.inline.hpp"
59 #include "runtime/icache.hpp"
60 #include "runtime/init.hpp"
61 #include "runtime/java.hpp"
62 #include "runtime/mutexLocker.hpp"
63 #include "runtime/os.inline.hpp"
64 #include "runtime/safepointVerifiers.hpp"
65 #include "runtime/vmThread.hpp"
66 #include "sanitizers/leak.hpp"
67 #include "services/memoryService.hpp"
68 #include "utilities/align.hpp"
69 #include "utilities/vmError.hpp"
70 #include "utilities/xmlstream.hpp"
71 #ifdef COMPILER1
72 #include "c1/c1_Compilation.hpp"
73 #include "c1/c1_Compiler.hpp"
74 #endif
75 #ifdef COMPILER2
76 #include "opto/c2compiler.hpp"
77 #include "opto/compile.hpp"
78 #include "opto/node.hpp"
79 #endif
80
81 // Helper class for printing in CodeCache
82 class CodeBlob_sizes {
83 private:
84 int count;
85 int total_size;
86 int header_size;
87 int code_size;
88 int stub_size;
89 int relocation_size;
90 int scopes_oop_size;
91 int scopes_metadata_size;
92 int scopes_data_size;
93 int scopes_pcs_size;
94
95 public:
96 CodeBlob_sizes() {
97 count = 0;
98 total_size = 0;
99 header_size = 0;
100 code_size = 0;
101 stub_size = 0;
102 relocation_size = 0;
103 scopes_oop_size = 0;
104 scopes_metadata_size = 0;
105 scopes_data_size = 0;
106 scopes_pcs_size = 0;
107 }
108
109 int total() const { return total_size; }
110 bool is_empty() const { return count == 0; }
111
112 void print(const char* title) const {
113 if (is_empty()) {
114 tty->print_cr(" #%d %s = %dK",
115 count,
116 title,
117 total() / (int)K);
118 } else {
119 tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
120 count,
121 title,
122 total() / (int)K,
123 header_size / (int)K,
124 header_size * 100 / total_size,
125 relocation_size / (int)K,
126 relocation_size * 100 / total_size,
127 code_size / (int)K,
128 code_size * 100 / total_size,
129 stub_size / (int)K,
130 stub_size * 100 / total_size,
131 scopes_oop_size / (int)K,
132 scopes_oop_size * 100 / total_size,
133 scopes_metadata_size / (int)K,
134 scopes_metadata_size * 100 / total_size,
135 scopes_data_size / (int)K,
136 scopes_data_size * 100 / total_size,
137 scopes_pcs_size / (int)K,
138 scopes_pcs_size * 100 / total_size);
139 }
140 }
141
142 void add(CodeBlob* cb) {
143 count++;
144 total_size += cb->size();
145 header_size += cb->header_size();
146 relocation_size += cb->relocation_size();
147 if (cb->is_nmethod()) {
148 nmethod* nm = cb->as_nmethod_or_null();
149 code_size += nm->insts_size();
150 stub_size += nm->stub_size();
151
152 scopes_oop_size += nm->oops_size();
153 scopes_metadata_size += nm->metadata_size();
154 scopes_data_size += nm->scopes_data_size();
155 scopes_pcs_size += nm->scopes_pcs_size();
156 } else {
157 code_size += cb->code_size();
158 }
159 }
160 };
161
162 // Iterate over all CodeHeaps
163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
165
166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
168
169 address CodeCache::_low_bound = nullptr;
170 address CodeCache::_high_bound = nullptr;
171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
173
174 static ReservedSpace _cds_code_space;
175
176 // Initialize arrays of CodeHeap subsets
177 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
178 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
179 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
180
181 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
182 if (size < required_size) {
183 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
184 codeheap, size/K, required_size/K);
185 err_msg title("Not enough space in %s to run VM", codeheap);
186 err_msg message("%zuK < %zuK", size/K, required_size/K);
187 vm_exit_during_initialization(title, message);
188 }
189 }
190
191 struct CodeHeapInfo {
192 size_t size;
193 bool set;
194 bool enabled;
195 };
196
197 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
198 assert(!heap->set, "sanity");
199 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
200 }
201
202 void CodeCache::initialize_heaps() {
203 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
204 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
205 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
206
207 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
208 const size_t ps = page_size(false, 8);
209 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
210 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
211 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
212
213 // Prerequisites
214 if (!heap_available(CodeBlobType::MethodProfiled)) {
215 // For compatibility reasons, disabled tiered compilation overrides
216 // segment size even if it is set explicitly.
217 non_profiled.size += profiled.size;
218 // Profiled code heap is not available, forcibly set size to 0
219 profiled.size = 0;
220 profiled.set = true;
221 profiled.enabled = false;
222 }
223
224 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
225
226 size_t compiler_buffer_size = 0;
227 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
228 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
229
230 if (!non_nmethod.set) {
231 non_nmethod.size += compiler_buffer_size;
232 // Further down, just before FLAG_SET_ERGO(), all segment sizes are
233 // aligned down to the next lower multiple of min_size. For large page
234 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
235 // Therefore, force non_nmethod.size to at least min_size.
236 non_nmethod.size = MAX2(non_nmethod.size, min_size);
237 }
238
239 if (!profiled.set && !non_profiled.set) {
240 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
241 (cache_size - non_nmethod.size) / 2 : min_size;
242 }
243
244 if (profiled.set && !non_profiled.set) {
245 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
246 }
247
248 if (!profiled.set && non_profiled.set) {
249 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
250 }
251
252 // Compatibility.
253 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
254 if (!non_nmethod.set && profiled.set && non_profiled.set) {
255 set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
256 }
257
258 size_t total = non_nmethod.size + profiled.size + non_profiled.size;
259 if (total != cache_size && !cache_size_set) {
260 log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
261 "%zuK NonProfiled %zuK Profiled %zuK = %zuK",
262 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
263 // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
264 cache_size = total;
265 }
266
267 log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK"
268 " NonProfiled %zuK Profiled %zuK",
269 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
270
271 // Validation
272 // Check minimal required sizes
273 check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
274 if (profiled.enabled) {
275 check_min_size("profiled code heap", profiled.size, min_size);
276 }
277 if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
278 check_min_size("non-profiled code heap", non_profiled.size, min_size);
279 }
280 if (cache_size_set) {
281 check_min_size("reserved code cache", cache_size, min_cache_size);
282 }
283
284 // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
285 if (total != cache_size && cache_size_set) {
286 err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K);
287 if (profiled.enabled) {
288 message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K);
289 }
290 if (non_profiled.enabled) {
291 message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K);
292 }
293 message.append(" = %zuK", total/K);
294 message.append((total > cache_size) ? " is greater than " : " is less than ");
295 message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
296
297 vm_exit_during_initialization("Invalid code heap sizes", message);
298 }
299
300 // Compatibility. Print warning if using large pages but not able to use the size given
301 if (UseLargePages) {
302 const size_t lg_ps = page_size(false, 1);
303 if (ps < lg_ps) {
304 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
305 "Reverting to smaller page size (" PROPERFMT ").",
306 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
307 }
308 }
309
310 // Note: if large page support is enabled, min_size is at least the large
311 // page size. This ensures that the code cache is covered by large pages.
312 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
313 non_profiled.size += profiled.size & alignment_mask(min_size);
314 non_nmethod.size = align_down(non_nmethod.size, min_size);
315 profiled.size = align_down(profiled.size, min_size);
316 non_profiled.size = align_down(non_profiled.size, min_size);
317
318 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
319 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
320 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
321 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
322
323 const size_t cds_code_size = 0;
324 // FIXME: we should not increase CodeCache size - it affects branches.
325 // Instead we need to create separate code heap in CodeCache for AOT code.
326 // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size);
327 // cache_size += cds_code_size;
328
329 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
330
331 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
332 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
333
334 size_t offset = 0;
335 if (cds_code_size > 0) {
336 // FIXME: use CodeHeapInfo for this hack ...
337 _cds_code_space = rs.partition(offset, cds_code_size);
338 offset += cds_code_size;
339 }
340
341 if (profiled.enabled) {
342 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
343 offset += profiled.size;
344 // Tier 2 and tier 3 (profiled) methods
345 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
346 }
347
348 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
349 offset += non_nmethod.size;
350 // Non-nmethods (stubs, adapters, ...)
351 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
352
353 if (non_profiled.enabled) {
354 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
355 // Tier 1 and tier 4 (non-profiled) methods and native methods
356 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
357 }
358 }
359
360 void* CodeCache::map_aot_code() {
361 if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) {
362 return _cds_code_space.base();
363 } else {
364 return nullptr;
365 }
366 }
367
368 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
369 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
370 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
371 }
372
373 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
374 // Align and reserve space for code cache
375 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
376 const size_t rs_size = align_up(size, rs_align);
377
378 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
379 if (!rs.is_reserved()) {
380 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
381 rs_size/K));
382 }
383
384 // Initialize bounds
385 _low_bound = (address)rs.base();
386 _high_bound = _low_bound + rs.size();
387 return rs;
388 }
389
390 // Heaps available for allocation
391 bool CodeCache::heap_available(CodeBlobType code_blob_type) {
392 if (!SegmentedCodeCache) {
393 // No segmentation: use a single code heap
394 return (code_blob_type == CodeBlobType::All);
395 } else if (CompilerConfig::is_interpreter_only()) {
396 // Interpreter only: we don't need any method code heaps
397 return (code_blob_type == CodeBlobType::NonNMethod);
398 } else if (CompilerConfig::is_c1_profiling()) {
399 // Tiered compilation: use all code heaps
400 return (code_blob_type < CodeBlobType::All);
401 } else {
402 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
403 return (code_blob_type == CodeBlobType::NonNMethod) ||
404 (code_blob_type == CodeBlobType::MethodNonProfiled);
405 }
406 }
407
408 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
409 switch(code_blob_type) {
410 case CodeBlobType::NonNMethod:
411 return "NonNMethodCodeHeapSize";
412 break;
413 case CodeBlobType::MethodNonProfiled:
414 return "NonProfiledCodeHeapSize";
415 break;
416 case CodeBlobType::MethodProfiled:
417 return "ProfiledCodeHeapSize";
418 break;
419 default:
420 ShouldNotReachHere();
421 return nullptr;
422 }
423 }
424
425 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
426 if (lhs->code_blob_type() == rhs->code_blob_type()) {
427 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
428 } else {
429 return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
430 }
431 }
432
433 void CodeCache::add_heap(CodeHeap* heap) {
434 assert(!Universe::is_fully_initialized(), "late heap addition?");
435
436 _heaps->insert_sorted<code_heap_compare>(heap);
437
438 CodeBlobType type = heap->code_blob_type();
439 if (code_blob_type_accepts_nmethod(type)) {
440 _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
441 }
442 if (code_blob_type_accepts_allocable(type)) {
443 _allocable_heaps->insert_sorted<code_heap_compare>(heap);
444 }
445 }
446
447 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
448 // Check if heap is needed
449 if (!heap_available(code_blob_type)) {
450 return;
451 }
452
453 // Create CodeHeap
454 CodeHeap* heap = new CodeHeap(name, code_blob_type);
455 add_heap(heap);
456
457 // Reserve Space
458 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
459 size_initial = align_up(size_initial, rs.page_size());
460 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
461 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)",
462 heap->name(), size_initial/K));
463 }
464
465 // Register the CodeHeap
466 MemoryService::add_code_heap_memory_pool(heap, name);
467 }
468
469 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
470 FOR_ALL_HEAPS(heap) {
471 if ((*heap)->contains(start)) {
472 return *heap;
473 }
474 }
475 return nullptr;
476 }
477
478 CodeHeap* CodeCache::get_code_heap(const void* cb) {
479 assert(cb != nullptr, "CodeBlob is null");
480 FOR_ALL_HEAPS(heap) {
481 if ((*heap)->contains(cb)) {
482 return *heap;
483 }
484 }
485 ShouldNotReachHere();
486 return nullptr;
487 }
488
489 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
490 FOR_ALL_HEAPS(heap) {
491 if ((*heap)->accepts(code_blob_type)) {
492 return *heap;
493 }
494 }
495 return nullptr;
496 }
497
498 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
499 assert_locked_or_safepoint(CodeCache_lock);
500 assert(heap != nullptr, "heap is null");
501 return (CodeBlob*)heap->first();
502 }
503
504 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
505 if (heap_available(code_blob_type)) {
506 return first_blob(get_code_heap(code_blob_type));
507 } else {
508 return nullptr;
509 }
510 }
511
512 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
513 assert_locked_or_safepoint(CodeCache_lock);
514 assert(heap != nullptr, "heap is null");
515 return (CodeBlob*)heap->next(cb);
516 }
517
518 /**
519 * Do not seize the CodeCache lock here--if the caller has not
520 * already done so, we are going to lose bigtime, since the code
521 * cache will contain a garbage CodeBlob until the caller can
522 * run the constructor for the CodeBlob subclass he is busy
523 * instantiating.
524 */
525 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
526 assert_locked_or_safepoint(CodeCache_lock);
527 assert(size > 0, "Code cache allocation request must be > 0");
528 if (size == 0) {
529 return nullptr;
530 }
531 CodeBlob* cb = nullptr;
532
533 // Get CodeHeap for the given CodeBlobType
534 CodeHeap* heap = get_code_heap(code_blob_type);
535 assert(heap != nullptr, "heap is null");
536
537 while (true) {
538 cb = (CodeBlob*)heap->allocate(size);
539 if (cb != nullptr) break;
540 if (!heap->expand_by(CodeCacheExpansionSize)) {
541 // Save original type for error reporting
542 if (orig_code_blob_type == CodeBlobType::All) {
543 orig_code_blob_type = code_blob_type;
544 }
545 // Expansion failed
546 if (SegmentedCodeCache) {
547 // Fallback solution: Try to store code in another code heap.
548 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
549 CodeBlobType type = code_blob_type;
550 switch (type) {
551 case CodeBlobType::NonNMethod:
552 type = CodeBlobType::MethodNonProfiled;
553 break;
554 case CodeBlobType::MethodNonProfiled:
555 type = CodeBlobType::MethodProfiled;
556 break;
557 case CodeBlobType::MethodProfiled:
558 // Avoid loop if we already tried that code heap
559 if (type == orig_code_blob_type) {
560 type = CodeBlobType::MethodNonProfiled;
561 }
562 break;
563 default:
564 break;
565 }
566 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
567 if (PrintCodeCacheExtension) {
568 tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
569 heap->name(), get_code_heap(type)->name());
570 }
571 return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
572 }
573 }
574 if (handle_alloc_failure) {
575 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
576 CompileBroker::handle_full_code_cache(orig_code_blob_type);
577 }
578 return nullptr;
579 } else {
580 OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage())
581 }
582 if (PrintCodeCacheExtension) {
583 ResourceMark rm;
584 if (_nmethod_heaps->length() >= 1) {
585 tty->print("%s", heap->name());
586 } else {
587 tty->print("CodeCache");
588 }
589 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)",
590 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
591 (address)heap->high() - (address)heap->low_boundary());
592 }
593 }
594 print_trace("allocation", cb, size);
595 return cb;
596 }
597
598 void CodeCache::free(CodeBlob* cb) {
599 assert_locked_or_safepoint(CodeCache_lock);
600 CodeHeap* heap = get_code_heap(cb);
601 print_trace("free", cb);
602 if (cb->is_nmethod()) {
603 heap->set_nmethod_count(heap->nmethod_count() - 1);
604 if (((nmethod *)cb)->has_dependencies()) {
605 AtomicAccess::dec(&_number_of_nmethods_with_dependencies);
606 }
607 }
608 if (cb->is_adapter_blob()) {
609 heap->set_adapter_count(heap->adapter_count() - 1);
610 }
611
612 cb->~CodeBlob();
613 // Get heap for given CodeBlob and deallocate
614 heap->deallocate(cb);
615
616 assert(heap->blob_count() >= 0, "sanity check");
617 }
618
619 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
620 assert_locked_or_safepoint(CodeCache_lock);
621 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
622 print_trace("free_unused_tail", cb);
623
624 // We also have to account for the extra space (i.e. header) used by the CodeBlob
625 // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
626 used += CodeBlob::align_code_offset(cb->header_size());
627
628 // Get heap for given CodeBlob and deallocate its unused tail
629 get_code_heap(cb)->deallocate_tail(cb, used);
630 // Adjust the sizes of the CodeBlob
631 cb->adjust_size(used);
632 }
633
634 void CodeCache::commit(CodeBlob* cb) {
635 // this is called by nmethod::nmethod, which must already own CodeCache_lock
636 assert_locked_or_safepoint(CodeCache_lock);
637 CodeHeap* heap = get_code_heap(cb);
638 if (cb->is_nmethod()) {
639 heap->set_nmethod_count(heap->nmethod_count() + 1);
640 if (((nmethod *)cb)->has_dependencies()) {
641 AtomicAccess::inc(&_number_of_nmethods_with_dependencies);
642 }
643 }
644 if (cb->is_adapter_blob()) {
645 heap->set_adapter_count(heap->adapter_count() + 1);
646 }
647 }
648
649 bool CodeCache::contains(void *p) {
650 // S390 uses contains() in current_frame(), which is used before
651 // code cache initialization if NativeMemoryTracking=detail is set.
652 S390_ONLY(if (_heaps == nullptr) return false;)
653 // It should be ok to call contains without holding a lock.
654 FOR_ALL_HEAPS(heap) {
655 if ((*heap)->contains(p)) {
656 return true;
657 }
658 }
659 return false;
660 }
661
662 bool CodeCache::contains(nmethod *nm) {
663 return contains((void *)nm);
664 }
665
666 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
667 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
668 CodeBlob* CodeCache::find_blob(void* start) {
669 // NMT can walk the stack before code cache is created
670 if (_heaps != nullptr) {
671 CodeHeap* heap = get_code_heap_containing(start);
672 if (heap != nullptr) {
673 return heap->find_blob(start);
674 }
675 }
676 return nullptr;
677 }
678
679 nmethod* CodeCache::find_nmethod(void* start) {
680 CodeBlob* cb = find_blob(start);
681 assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod");
682 return (nmethod*)cb;
683 }
684
685 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
686 assert_locked_or_safepoint(CodeCache_lock);
687 FOR_ALL_HEAPS(heap) {
688 FOR_ALL_BLOBS(cb, *heap) {
689 f(cb);
690 }
691 }
692 }
693
694 void CodeCache::nmethods_do(void f(nmethod* nm)) {
695 assert_locked_or_safepoint(CodeCache_lock);
696 NMethodIterator iter(NMethodIterator::all);
697 while(iter.next()) {
698 f(iter.method());
699 }
700 }
701
702 void CodeCache::nmethods_do(NMethodClosure* cl) {
703 assert_locked_or_safepoint(CodeCache_lock);
704 NMethodIterator iter(NMethodIterator::all);
705 while(iter.next()) {
706 cl->do_nmethod(iter.method());
707 }
708 }
709
710 void CodeCache::metadata_do(MetadataClosure* f) {
711 assert_locked_or_safepoint(CodeCache_lock);
712 NMethodIterator iter(NMethodIterator::all);
713 while(iter.next()) {
714 iter.method()->metadata_do(f);
715 }
716 }
717
718 // Calculate the number of GCs after which an nmethod is expected to have been
719 // used in order to not be classed as cold.
720 void CodeCache::update_cold_gc_count() {
721 if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
722 // No aging
723 return;
724 }
725
726 size_t last_used = _last_unloading_used;
727 double last_time = _last_unloading_time;
728
729 double time = os::elapsedTime();
730
731 size_t free = unallocated_capacity();
732 size_t max = max_capacity();
733 size_t used = max - free;
734 double gc_interval = time - last_time;
735
736 _unloading_threshold_gc_requested = false;
737 _last_unloading_time = time;
738 _last_unloading_used = used;
739
740 if (last_time == 0.0) {
741 // The first GC doesn't have enough information to make good
742 // decisions, so just keep everything afloat
743 log_info(codecache)("Unknown code cache pressure; don't age code");
744 return;
745 }
746
747 if (gc_interval <= 0.0 || last_used >= used) {
748 // Dodge corner cases where there is no pressure or negative pressure
749 // on the code cache. Just don't unload when this happens.
750 _cold_gc_count = INT_MAX;
751 log_info(codecache)("No code cache pressure; don't age code");
752 return;
753 }
754
755 double allocation_rate = (used - last_used) / gc_interval;
756
757 _unloading_allocation_rates.add(allocation_rate);
758 _unloading_gc_intervals.add(gc_interval);
759
760 size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
761 if (free < aggressive_sweeping_free_threshold) {
762 // We are already in the red zone; be very aggressive to avoid disaster
763 // But not more aggressive than 2. This ensures that an nmethod must
764 // have been unused at least between two GCs to be considered cold still.
765 _cold_gc_count = 2;
766 log_info(codecache)("Code cache critically low; use aggressive aging");
767 return;
768 }
769
770 // The code cache has an expected time for cold nmethods to "time out"
771 // when they have not been used. The time for nmethods to time out
772 // depends on how long we expect we can keep allocating code until
773 // aggressive sweeping starts, based on sampled allocation rates.
774 double average_gc_interval = _unloading_gc_intervals.avg();
775 double average_allocation_rate = _unloading_allocation_rates.avg();
776 double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
777 double cold_timeout = time_to_aggressive / NmethodSweepActivity;
778
779 // Convert time to GC cycles, and crop at INT_MAX. The reason for
780 // that is that the _cold_gc_count will be added to an epoch number
781 // and that addition must not overflow, or we can crash the VM.
782 // But not more aggressive than 2. This ensures that an nmethod must
783 // have been unused at least between two GCs to be considered cold still.
784 _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
785
786 double used_ratio = double(used) / double(max);
787 double last_used_ratio = double(last_used) / double(max);
788 log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
789 ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
790 average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
791 double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
792
793 }
794
795 uint64_t CodeCache::cold_gc_count() {
796 return _cold_gc_count;
797 }
798
799 void CodeCache::gc_on_allocation() {
800 if (!is_init_completed()) {
801 // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
802 return;
803 }
804
805 size_t free = unallocated_capacity();
806 size_t max = max_capacity();
807 size_t used = max - free;
808 double free_ratio = double(free) / double(max);
809 if (free_ratio <= StartAggressiveSweepingAt / 100.0) {
810 // In case the GC is concurrent, we make sure only one thread requests the GC.
811 if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
812 log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
813 Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
814 }
815 return;
816 }
817
818 size_t last_used = _last_unloading_used;
819 if (last_used >= used) {
820 // No increase since last GC; no need to sweep yet
821 return;
822 }
823 size_t allocated_since_last = used - last_used;
824 double allocated_since_last_ratio = double(allocated_since_last) / double(max);
825 double threshold = SweeperThreshold / 100.0;
826 double used_ratio = double(used) / double(max);
827 double last_used_ratio = double(last_used) / double(max);
828 if (used_ratio > threshold) {
829 // After threshold is reached, scale it by free_ratio so that more aggressive
830 // GC is triggered as we approach code cache exhaustion
831 threshold *= free_ratio;
832 }
833 // If code cache has been allocated without any GC at all, let's make sure
834 // it is eventually invoked to avoid trouble.
835 if (allocated_since_last_ratio > threshold) {
836 // In case the GC is concurrent, we make sure only one thread requests the GC.
837 if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
838 log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
839 threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
840 Universe::heap()->collect(GCCause::_codecache_GC_threshold);
841 }
842 }
843 }
844
845 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
846 // subtracts the value by 2, and the type is unsigned. We don't want underflow.
847 //
848 // Odd values mean that marking is in progress, and even values mean that no
849 // marking is currently active.
850 uint64_t CodeCache::_gc_epoch = 2;
851
852 // How many GCs after an nmethod has not been used, do we consider it cold?
853 uint64_t CodeCache::_cold_gc_count = INT_MAX;
854
855 double CodeCache::_last_unloading_time = 0.0;
856 size_t CodeCache::_last_unloading_used = 0;
857 volatile bool CodeCache::_unloading_threshold_gc_requested = false;
858 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
859 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
860
861 uint64_t CodeCache::gc_epoch() {
862 return _gc_epoch;
863 }
864
865 bool CodeCache::is_gc_marking_cycle_active() {
866 // Odd means that marking is active
867 return (_gc_epoch % 2) == 1;
868 }
869
870 uint64_t CodeCache::previous_completed_gc_marking_cycle() {
871 if (is_gc_marking_cycle_active()) {
872 return _gc_epoch - 2;
873 } else {
874 return _gc_epoch - 1;
875 }
876 }
877
878 void CodeCache::on_gc_marking_cycle_start() {
879 assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
880 ++_gc_epoch;
881 }
882
883 // Once started the code cache marking cycle must only be finished after marking of
884 // the java heap is complete. Otherwise nmethods could appear to be not on stack even
885 // if they have frames in continuation StackChunks that were not yet visited.
886 void CodeCache::on_gc_marking_cycle_finish() {
887 assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
888 ++_gc_epoch;
889 update_cold_gc_count();
890 }
891
892 void CodeCache::arm_all_nmethods() {
893 BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
894 }
895
896 // Mark nmethods for unloading if they contain otherwise unreachable oops.
897 void CodeCache::do_unloading(bool unloading_occurred) {
898 assert_locked_or_safepoint(CodeCache_lock);
899 NMethodIterator iter(NMethodIterator::all);
900 while(iter.next()) {
901 iter.method()->do_unloading(unloading_occurred);
902 }
903 }
904
905 void CodeCache::verify_clean_inline_caches() {
906 #ifdef ASSERT
907 if (!VerifyInlineCaches) return;
908 NMethodIterator iter(NMethodIterator::not_unloading);
909 while(iter.next()) {
910 nmethod* nm = iter.method();
911 nm->verify_clean_inline_caches();
912 nm->verify();
913 }
914 #endif
915 }
916
917 // Defer freeing of concurrently cleaned ExceptionCache entries until
918 // after a global handshake operation.
919 void CodeCache::release_exception_cache(ExceptionCache* entry) {
920 if (SafepointSynchronize::is_at_safepoint()) {
921 delete entry;
922 } else {
923 for (;;) {
924 ExceptionCache* purge_list_head = AtomicAccess::load(&_exception_cache_purge_list);
925 entry->set_purge_list_next(purge_list_head);
926 if (AtomicAccess::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
927 break;
928 }
929 }
930 }
931 }
932
933 // Delete exception caches that have been concurrently unlinked,
934 // followed by a global handshake operation.
935 void CodeCache::purge_exception_caches() {
936 ExceptionCache* curr = _exception_cache_purge_list;
937 while (curr != nullptr) {
938 ExceptionCache* next = curr->purge_list_next();
939 delete curr;
940 curr = next;
941 }
942 _exception_cache_purge_list = nullptr;
943 }
944
945 // Restart compiler if possible and required..
946 void CodeCache::maybe_restart_compiler(size_t freed_memory) {
947
948 // Try to start the compiler again if we freed any memory
949 if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
950 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
951 log_info(codecache)("Restarting compiler");
952 EventJITRestart event;
953 event.set_freedMemory(freed_memory);
954 event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
955 event.commit();
956 }
957 }
958
959 uint8_t CodeCache::_unloading_cycle = 1;
960
961 void CodeCache::increment_unloading_cycle() {
962 // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
963 // 0 is reserved for new methods.
964 _unloading_cycle = (_unloading_cycle + 1) % 4;
965 if (_unloading_cycle == 0) {
966 _unloading_cycle = 1;
967 }
968 }
969
970 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
971 : _is_unloading_behaviour(is_alive)
972 {
973 _saved_behaviour = IsUnloadingBehaviour::current();
974 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
975 increment_unloading_cycle();
976 DependencyContext::cleaning_start();
977 }
978
979 CodeCache::UnlinkingScope::~UnlinkingScope() {
980 IsUnloadingBehaviour::set_current(_saved_behaviour);
981 DependencyContext::cleaning_end();
982 }
983
984 void CodeCache::verify_oops() {
985 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
986 VerifyOopClosure voc;
987 NMethodIterator iter(NMethodIterator::not_unloading);
988 while(iter.next()) {
989 nmethod* nm = iter.method();
990 nm->oops_do(&voc);
991 nm->verify_oop_relocations();
992 }
993 }
994
995 int CodeCache::blob_count(CodeBlobType code_blob_type) {
996 CodeHeap* heap = get_code_heap(code_blob_type);
997 return (heap != nullptr) ? heap->blob_count() : 0;
998 }
999
1000 int CodeCache::blob_count() {
1001 int count = 0;
1002 FOR_ALL_HEAPS(heap) {
1003 count += (*heap)->blob_count();
1004 }
1005 return count;
1006 }
1007
1008 int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
1009 CodeHeap* heap = get_code_heap(code_blob_type);
1010 return (heap != nullptr) ? heap->nmethod_count() : 0;
1011 }
1012
1013 int CodeCache::nmethod_count() {
1014 int count = 0;
1015 for (CodeHeap* heap : *_nmethod_heaps) {
1016 count += heap->nmethod_count();
1017 }
1018 return count;
1019 }
1020
1021 int CodeCache::adapter_count(CodeBlobType code_blob_type) {
1022 CodeHeap* heap = get_code_heap(code_blob_type);
1023 return (heap != nullptr) ? heap->adapter_count() : 0;
1024 }
1025
1026 int CodeCache::adapter_count() {
1027 int count = 0;
1028 FOR_ALL_HEAPS(heap) {
1029 count += (*heap)->adapter_count();
1030 }
1031 return count;
1032 }
1033
1034 address CodeCache::low_bound(CodeBlobType code_blob_type) {
1035 CodeHeap* heap = get_code_heap(code_blob_type);
1036 return (heap != nullptr) ? (address)heap->low_boundary() : nullptr;
1037 }
1038
1039 address CodeCache::high_bound(CodeBlobType code_blob_type) {
1040 CodeHeap* heap = get_code_heap(code_blob_type);
1041 return (heap != nullptr) ? (address)heap->high_boundary() : nullptr;
1042 }
1043
1044 size_t CodeCache::capacity() {
1045 size_t cap = 0;
1046 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1047 cap += (*heap)->capacity();
1048 }
1049 return cap;
1050 }
1051
1052 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) {
1053 CodeHeap* heap = get_code_heap(code_blob_type);
1054 return (heap != nullptr) ? heap->unallocated_capacity() : 0;
1055 }
1056
1057 size_t CodeCache::unallocated_capacity() {
1058 size_t unallocated_cap = 0;
1059 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1060 unallocated_cap += (*heap)->unallocated_capacity();
1061 }
1062 return unallocated_cap;
1063 }
1064
1065 size_t CodeCache::max_capacity() {
1066 size_t max_cap = 0;
1067 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1068 max_cap += (*heap)->max_capacity();
1069 }
1070 return max_cap;
1071 }
1072
1073 bool CodeCache::is_non_nmethod(address addr) {
1074 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1075 return blob->contains(addr);
1076 }
1077
1078 size_t CodeCache::max_distance_to_non_nmethod() {
1079 if (!SegmentedCodeCache) {
1080 return ReservedCodeCacheSize;
1081 } else {
1082 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1083 // the max distance is minimized by placing the NonNMethod segment
1084 // in between MethodProfiled and MethodNonProfiled segments
1085 size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound;
1086 size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary();
1087 return dist1 > dist2 ? dist1 : dist2;
1088 }
1089 }
1090
1091 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1092 // is free, reverse_free_ratio() returns 4.
1093 // Since code heap for each type of code blobs falls forward to the next
1094 // type of code heap, return the reverse free ratio for the entire
1095 // code cache.
1096 double CodeCache::reverse_free_ratio() {
1097 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1098 double max = (double)max_capacity();
1099 double result = max / unallocated;
1100 assert (max >= unallocated, "Must be");
1101 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1102 return result;
1103 }
1104
1105 size_t CodeCache::bytes_allocated_in_freelists() {
1106 size_t allocated_bytes = 0;
1107 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1108 allocated_bytes += (*heap)->allocated_in_freelist();
1109 }
1110 return allocated_bytes;
1111 }
1112
1113 int CodeCache::allocated_segments() {
1114 int number_of_segments = 0;
1115 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1116 number_of_segments += (*heap)->allocated_segments();
1117 }
1118 return number_of_segments;
1119 }
1120
1121 size_t CodeCache::freelists_length() {
1122 size_t length = 0;
1123 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1124 length += (*heap)->freelist_length();
1125 }
1126 return length;
1127 }
1128
1129 void icache_init();
1130
1131 void CodeCache::initialize() {
1132 assert(CodeCacheSegmentSize >= (size_t)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1133 #ifdef COMPILER2
1134 assert(CodeCacheSegmentSize >= (size_t)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
1135 #endif
1136 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
1137 // This was originally just a check of the alignment, causing failure, instead, round
1138 // the code cache to the page size. In particular, Solaris is moving to a larger
1139 // default page size.
1140 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
1141
1142 if (SegmentedCodeCache) {
1143 // Use multiple code heaps
1144 initialize_heaps();
1145 } else {
1146 // Use a single code heap
1147 FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
1148 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
1149 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
1150
1151 // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
1152 // users want to use the largest available page.
1153 const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
1154 ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
1155 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
1156 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
1157 add_heap(rs, "CodeCache", CodeBlobType::All);
1158 }
1159
1160 // Initialize ICache flush mechanism
1161 // This service is needed for os::register_code_area
1162 icache_init();
1163
1164 // Give OS a chance to register generated code area.
1165 // This is used on Windows 64 bit platforms to register
1166 // Structured Exception Handlers for our generated code.
1167 os::register_code_area((char*)low_bound(), (char*)high_bound());
1168 }
1169
1170 void codeCache_init() {
1171 CodeCache::initialize();
1172 }
1173
1174 //------------------------------------------------------------------------------------------------
1175
1176 bool CodeCache::has_nmethods_with_dependencies() {
1177 return AtomicAccess::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
1178 }
1179
1180 void CodeCache::clear_inline_caches() {
1181 assert_locked_or_safepoint(CodeCache_lock);
1182 NMethodIterator iter(NMethodIterator::not_unloading);
1183 while(iter.next()) {
1184 iter.method()->clear_inline_caches();
1185 }
1186 }
1187
1188 // Only used by whitebox API
1189 void CodeCache::cleanup_inline_caches_whitebox() {
1190 assert_locked_or_safepoint(CodeCache_lock);
1191 NMethodIterator iter(NMethodIterator::not_unloading);
1192 while(iter.next()) {
1193 iter.method()->cleanup_inline_caches_whitebox();
1194 }
1195 }
1196
1197 // Keeps track of time spent for checking dependencies
1198 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1199
1200 #ifndef PRODUCT
1201 // Check if any of live methods dependencies have been invalidated.
1202 // (this is expensive!)
1203 static void check_live_nmethods_dependencies(DepChange& changes) {
1204 // Checked dependencies are allocated into this ResourceMark
1205 ResourceMark rm;
1206
1207 // Turn off dependency tracing while actually testing dependencies.
1208 FlagSetting fs(Dependencies::_verify_in_progress, true);
1209
1210 typedef HashTable<DependencySignature, int, 11027,
1211 AnyObj::RESOURCE_AREA, mtInternal,
1212 &DependencySignature::hash,
1213 &DependencySignature::equals> DepTable;
1214
1215 DepTable* table = new DepTable();
1216
1217 // Iterate over live nmethods and check dependencies of all nmethods that are not
1218 // marked for deoptimization. A particular dependency is only checked once.
1219 NMethodIterator iter(NMethodIterator::not_unloading);
1220 while(iter.next()) {
1221 nmethod* nm = iter.method();
1222 // Only notify for live nmethods
1223 if (!nm->is_marked_for_deoptimization()) {
1224 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1225 // Construct abstraction of a dependency.
1226 DependencySignature* current_sig = new DependencySignature(deps);
1227
1228 // Determine if dependency is already checked. table->put(...) returns
1229 // 'true' if the dependency is added (i.e., was not in the hashtable).
1230 if (table->put(*current_sig, 1)) {
1231 Klass* witness = deps.check_dependency();
1232 if (witness != nullptr) {
1233 // Dependency checking failed. Print out information about the failed
1234 // dependency and finally fail with an assert. We can fail here, since
1235 // dependency checking is never done in a product build.
1236 deps.print_dependency(tty, witness, true);
1237 changes.print();
1238 nm->print();
1239 nm->print_dependencies_on(tty);
1240 assert(false, "Should have been marked for deoptimization");
1241 }
1242 }
1243 }
1244 }
1245 }
1246 }
1247 #endif
1248
1249 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1250 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1251
1252 // search the hierarchy looking for nmethods which are affected by the loading of this class
1253
1254 // then search the interfaces this class implements looking for nmethods
1255 // which might be dependent of the fact that an interface only had one
1256 // implementor.
1257 // nmethod::check_all_dependencies works only correctly, if no safepoint
1258 // can happen
1259 NoSafepointVerifier nsv;
1260 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1261 InstanceKlass* d = str.klass();
1262 {
1263 LogStreamHandle(Trace, dependencies) log;
1264 if (log.is_enabled()) {
1265 log.print("Processing context ");
1266 d->name()->print_value_on(&log);
1267 }
1268 }
1269 d->mark_dependent_nmethods(deopt_scope, changes);
1270 }
1271
1272 #ifndef PRODUCT
1273 if (VerifyDependencies) {
1274 // Object pointers are used as unique identifiers for dependency arguments. This
1275 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1276 dependentCheckTime.start();
1277 check_live_nmethods_dependencies(changes);
1278 dependentCheckTime.stop();
1279 }
1280 #endif
1281 }
1282
1283 #if INCLUDE_JVMTI
1284 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1285 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1286 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1287
1288 static void add_to_old_table(nmethod* c) {
1289 if (old_nmethod_table == nullptr) {
1290 old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
1291 }
1292 old_nmethod_table->push(c);
1293 }
1294
1295 static void reset_old_method_table() {
1296 if (old_nmethod_table != nullptr) {
1297 delete old_nmethod_table;
1298 old_nmethod_table = nullptr;
1299 }
1300 }
1301
1302 // Remove this method when flushed.
1303 void CodeCache::unregister_old_nmethod(nmethod* c) {
1304 assert_lock_strong(CodeCache_lock);
1305 if (old_nmethod_table != nullptr) {
1306 int index = old_nmethod_table->find(c);
1307 if (index != -1) {
1308 old_nmethod_table->delete_at(index);
1309 }
1310 }
1311 }
1312
1313 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1314 // Walk old method table and mark those on stack.
1315 int length = 0;
1316 if (old_nmethod_table != nullptr) {
1317 length = old_nmethod_table->length();
1318 for (int i = 0; i < length; i++) {
1319 // Walk all methods saved on the last pass. Concurrent class unloading may
1320 // also be looking at this method's metadata, so don't delete it yet if
1321 // it is marked as unloaded.
1322 old_nmethod_table->at(i)->metadata_do(f);
1323 }
1324 }
1325 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1326 }
1327
1328 // Walk compiled methods and mark dependent methods for deoptimization.
1329 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1330 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1331 // Each redefinition creates a new set of nmethods that have references to "old" Methods
1332 // So delete old method table and create a new one.
1333 reset_old_method_table();
1334
1335 NMethodIterator iter(NMethodIterator::all);
1336 while(iter.next()) {
1337 nmethod* nm = iter.method();
1338 // Walk all alive nmethods to check for old Methods.
1339 // This includes methods whose inline caches point to old methods, so
1340 // inline cache clearing is unnecessary.
1341 if (nm->has_evol_metadata()) {
1342 deopt_scope->mark(nm);
1343 add_to_old_table(nm);
1344 }
1345 }
1346 }
1347
1348 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
1349 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1350 NMethodIterator iter(NMethodIterator::all);
1351 while(iter.next()) {
1352 nmethod* nm = iter.method();
1353 if (!nm->method()->is_method_handle_intrinsic()) {
1354 if (nm->can_be_deoptimized()) {
1355 deopt_scope->mark(nm);
1356 }
1357 if (nm->has_evol_metadata()) {
1358 add_to_old_table(nm);
1359 }
1360 }
1361 }
1362 }
1363
1364 #endif // INCLUDE_JVMTI
1365
1366 // Mark methods for deopt (if safe or possible).
1367 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
1368 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1369 NMethodIterator iter(NMethodIterator::not_unloading);
1370 while(iter.next()) {
1371 nmethod* nm = iter.method();
1372 if (!nm->is_native_method()) {
1373 deopt_scope->mark(nm);
1374 }
1375 }
1376 }
1377
1378 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
1379 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1380
1381 NMethodIterator iter(NMethodIterator::not_unloading);
1382 while(iter.next()) {
1383 nmethod* nm = iter.method();
1384 if (nm->is_dependent_on_method(dependee)) {
1385 deopt_scope->mark(nm);
1386 }
1387 }
1388 }
1389
1390 void CodeCache::make_marked_nmethods_deoptimized() {
1391 RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
1392 while(iter.next()) {
1393 nmethod* nm = iter.method();
1394 if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
1395 nm->make_not_entrant(nmethod::InvalidationReason::MARKED_FOR_DEOPTIMIZATION);
1396 nm->make_deoptimized();
1397 }
1398 }
1399 }
1400
1401 // Marks compiled methods dependent on dependee.
1402 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
1403 assert_lock_strong(Compile_lock);
1404
1405 if (!has_nmethods_with_dependencies()) {
1406 return;
1407 }
1408
1409 if (dependee->is_linked()) {
1410 // Class initialization state change.
1411 KlassInitDepChange changes(dependee);
1412 mark_for_deoptimization(deopt_scope, changes);
1413 } else {
1414 // New class is loaded.
1415 NewKlassDepChange changes(dependee);
1416 mark_for_deoptimization(deopt_scope, changes);
1417 }
1418 }
1419
1420 // Marks compiled methods dependent on dependee
1421 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
1422 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
1423
1424 DeoptimizationScope deopt_scope;
1425 // Compute the dependent nmethods
1426 mark_for_deoptimization(&deopt_scope, m_h());
1427 deopt_scope.deoptimize_marked();
1428 }
1429
1430 void CodeCache::verify() {
1431 assert_locked_or_safepoint(CodeCache_lock);
1432 FOR_ALL_HEAPS(heap) {
1433 (*heap)->verify();
1434 FOR_ALL_BLOBS(cb, *heap) {
1435 cb->verify();
1436 }
1437 }
1438 }
1439
1440 // A CodeHeap is full. Print out warning and report event.
1441 PRAGMA_DIAG_PUSH
1442 PRAGMA_FORMAT_NONLITERAL_IGNORED
1443 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) {
1444 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1445 CodeHeap* heap = get_code_heap(code_blob_type);
1446 assert(heap != nullptr, "heap is null");
1447
1448 int full_count = heap->report_full();
1449
1450 if ((full_count == 1) || print) {
1451 // Not yet reported for this heap, report
1452 if (SegmentedCodeCache) {
1453 ResourceMark rm;
1454 stringStream msg1_stream, msg2_stream;
1455 msg1_stream.print("%s is full. Compiler has been disabled.",
1456 get_code_heap_name(code_blob_type));
1457 msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1458 get_code_heap_flag_name(code_blob_type));
1459 const char *msg1 = msg1_stream.as_string();
1460 const char *msg2 = msg2_stream.as_string();
1461
1462 log_warning(codecache)("%s", msg1);
1463 log_warning(codecache)("%s", msg2);
1464 warning("%s", msg1);
1465 warning("%s", msg2);
1466 } else {
1467 const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1468 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1469
1470 log_warning(codecache)("%s", msg1);
1471 log_warning(codecache)("%s", msg2);
1472 warning("%s", msg1);
1473 warning("%s", msg2);
1474 }
1475 stringStream s;
1476 // Dump code cache into a buffer before locking the tty.
1477 {
1478 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1479 print_summary(&s);
1480 }
1481 {
1482 ttyLocker ttyl;
1483 tty->print("%s", s.freeze());
1484 }
1485
1486 if (full_count == 1) {
1487 if (PrintCodeHeapAnalytics) {
1488 CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1489 }
1490 }
1491 }
1492
1493 EventCodeCacheFull event;
1494 if (event.should_commit()) {
1495 event.set_codeBlobType((u1)code_blob_type);
1496 event.set_startAddress((u8)heap->low_boundary());
1497 event.set_commitedTopAddress((u8)heap->high());
1498 event.set_reservedTopAddress((u8)heap->high_boundary());
1499 event.set_entryCount(heap->blob_count());
1500 event.set_methodCount(heap->nmethod_count());
1501 event.set_adaptorCount(heap->adapter_count());
1502 event.set_unallocatedCapacity(heap->unallocated_capacity());
1503 event.set_fullCount(heap->full_count());
1504 event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
1505 event.commit();
1506 }
1507 }
1508 PRAGMA_DIAG_POP
1509
1510 void CodeCache::print_memory_overhead() {
1511 size_t wasted_bytes = 0;
1512 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1513 CodeHeap* curr_heap = *heap;
1514 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1515 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1516 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1517 }
1518 }
1519 // Print bytes that are allocated in the freelist
1520 ttyLocker ttl;
1521 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1522 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1523 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1524 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1525 }
1526
1527 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1528 if (total > 0) {
1529 double ratio = (100.0 * used) / total;
1530 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1531 }
1532 }
1533
1534 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1535 int stats [2][6][3][2] = {0};
1536 int stats_used[2][6][3][2] = {0};
1537
1538 int total_osr = 0;
1539 int total_entrant = 0;
1540 int total_non_entrant = 0;
1541 int total_other = 0;
1542 int total_used = 0;
1543
1544 NMethodIterator iter(NMethodIterator::all);
1545 while (iter.next()) {
1546 nmethod* nm = iter.method();
1547 if (nm->is_in_use()) {
1548 ++total_entrant;
1549 } else if (nm->is_not_entrant()) {
1550 ++total_non_entrant;
1551 } else {
1552 ++total_other;
1553 }
1554 if (nm->is_osr_method()) {
1555 ++total_osr;
1556 }
1557 if (nm->used()) {
1558 ++total_used;
1559 }
1560 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1561
1562 int idx1 = nm->is_aot() ? 1 : 0;
1563 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1564 int idx3 = (nm->is_in_use() ? 0 :
1565 (nm->is_not_entrant() ? 1 :
1566 2));
1567 int idx4 = (nm->is_osr_method() ? 1 : 0);
1568 stats[idx1][idx2][idx3][idx4] += 1;
1569 if (nm->used()) {
1570 stats_used[idx1][idx2][idx3][idx4] += 1;
1571 }
1572 }
1573
1574 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1575 total_entrant + total_non_entrant + total_other,
1576 total_entrant, total_non_entrant, total_osr);
1577 if (total_other > 0) {
1578 st->print("; %d other", total_other);
1579 }
1580 st->print_cr(")");
1581
1582 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1583 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1584 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1585 if (total_normal + total_osr > 0) {
1586 st->print(" Tier%d:", i);
1587 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1588 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1589 st->cr();
1590 }
1591 }
1592 st->cr();
1593 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1594 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1595 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1596 assert(total_osr == 0, "sanity");
1597 if (total_normal + total_osr > 0) {
1598 st->print(" AOT Code T%d:", i);
1599 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1600 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1601 st->cr();
1602 }
1603 }
1604 }
1605
1606 //------------------------------------------------------------------------------------------------
1607 // Non-product version
1608
1609 #ifndef PRODUCT
1610
1611 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1612 if (PrintCodeCache2) { // Need to add a new flag
1613 ResourceMark rm;
1614 if (size == 0) {
1615 int s = cb->size();
1616 assert(s >= 0, "CodeBlob size is negative: %d", s);
1617 size = (uint) s;
1618 }
1619 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1620 }
1621 }
1622
1623 void CodeCache::print_internals() {
1624 int nmethodCount = 0;
1625 int runtimeStubCount = 0;
1626 int upcallStubCount = 0;
1627 int adapterCount = 0;
1628 int mhAdapterCount = 0;
1629 int vtableBlobCount = 0;
1630 int deoptimizationStubCount = 0;
1631 int uncommonTrapStubCount = 0;
1632 int exceptionStubCount = 0;
1633 int safepointStubCount = 0;
1634 int bufferBlobCount = 0;
1635 int total = 0;
1636 int nmethodNotEntrant = 0;
1637 int nmethodJava = 0;
1638 int nmethodNative = 0;
1639 int max_nm_size = 0;
1640 ResourceMark rm;
1641
1642 int i = 0;
1643 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1644 int heap_total = 0;
1645 tty->print_cr("-- %s --", (*heap)->name());
1646 FOR_ALL_BLOBS(cb, *heap) {
1647 total++;
1648 heap_total++;
1649 if (cb->is_nmethod()) {
1650 nmethod* nm = (nmethod*)cb;
1651
1652 tty->print("%4d: ", heap_total);
1653 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1654
1655 nmethodCount++;
1656
1657 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1658 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1659
1660 if(nm->method() != nullptr && nm->is_java_method()) {
1661 nmethodJava++;
1662 max_nm_size = MAX2(max_nm_size, nm->size());
1663 }
1664 } else if (cb->is_runtime_stub()) {
1665 runtimeStubCount++;
1666 } else if (cb->is_upcall_stub()) {
1667 upcallStubCount++;
1668 } else if (cb->is_deoptimization_stub()) {
1669 deoptimizationStubCount++;
1670 } else if (cb->is_uncommon_trap_stub()) {
1671 uncommonTrapStubCount++;
1672 } else if (cb->is_exception_stub()) {
1673 exceptionStubCount++;
1674 } else if (cb->is_safepoint_stub()) {
1675 safepointStubCount++;
1676 } else if (cb->is_adapter_blob()) {
1677 adapterCount++;
1678 } else if (cb->is_method_handles_adapter_blob()) {
1679 mhAdapterCount++;
1680 } else if (cb->is_vtable_blob()) {
1681 vtableBlobCount++;
1682 } else if (cb->is_buffer_blob()) {
1683 bufferBlobCount++;
1684 }
1685 }
1686 }
1687
1688 int bucketSize = 512;
1689 int bucketLimit = max_nm_size / bucketSize + 1;
1690 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1691 memset(buckets, 0, sizeof(int) * bucketLimit);
1692
1693 NMethodIterator iter(NMethodIterator::all);
1694 while(iter.next()) {
1695 nmethod* nm = iter.method();
1696 if(nm->method() != nullptr && nm->is_java_method()) {
1697 buckets[nm->size() / bucketSize]++;
1698 }
1699 }
1700
1701 tty->print_cr("Code Cache Entries (total of %d)",total);
1702 tty->print_cr("-------------------------------------------------");
1703 tty->print_cr("nmethods: %d",nmethodCount);
1704 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1705 tty->print_cr("\tjava: %d",nmethodJava);
1706 tty->print_cr("\tnative: %d",nmethodNative);
1707 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1708 tty->print_cr("upcall_stubs: %d",upcallStubCount);
1709 tty->print_cr("adapters: %d",adapterCount);
1710 tty->print_cr("MH adapters: %d",mhAdapterCount);
1711 tty->print_cr("VTables: %d",vtableBlobCount);
1712 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1713 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1714 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1715 tty->print_cr("exception_stubs: %d",exceptionStubCount);
1716 tty->print_cr("safepoint_stubs: %d",safepointStubCount);
1717 tty->print_cr("\nnmethod size distribution");
1718 tty->print_cr("-------------------------------------------------");
1719
1720 for(int i=0; i<bucketLimit; i++) {
1721 if(buckets[i] != 0) {
1722 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1723 tty->fill_to(40);
1724 tty->print_cr("%d",buckets[i]);
1725 }
1726 }
1727
1728 FREE_C_HEAP_ARRAY(int, buckets);
1729 print_memory_overhead();
1730 }
1731
1732 #endif // !PRODUCT
1733
1734 void CodeCache::print() {
1735 print_summary(tty);
1736
1737 #ifndef PRODUCT
1738 if (!Verbose) return;
1739
1740 CodeBlob_sizes live[CompLevel_full_optimization + 1];
1741 CodeBlob_sizes runtimeStub;
1742 CodeBlob_sizes upcallStub;
1743 CodeBlob_sizes uncommonTrapStub;
1744 CodeBlob_sizes deoptimizationStub;
1745 CodeBlob_sizes exceptionStub;
1746 CodeBlob_sizes safepointStub;
1747 CodeBlob_sizes adapter;
1748 CodeBlob_sizes mhAdapter;
1749 CodeBlob_sizes vtableBlob;
1750 CodeBlob_sizes bufferBlob;
1751 CodeBlob_sizes other;
1752
1753 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1754 FOR_ALL_BLOBS(cb, *heap) {
1755 if (cb->is_nmethod()) {
1756 const int level = cb->as_nmethod()->comp_level();
1757 assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
1758 live[level].add(cb);
1759 } else if (cb->is_runtime_stub()) {
1760 runtimeStub.add(cb);
1761 } else if (cb->is_upcall_stub()) {
1762 upcallStub.add(cb);
1763 } else if (cb->is_deoptimization_stub()) {
1764 deoptimizationStub.add(cb);
1765 } else if (cb->is_uncommon_trap_stub()) {
1766 uncommonTrapStub.add(cb);
1767 } else if (cb->is_exception_stub()) {
1768 exceptionStub.add(cb);
1769 } else if (cb->is_safepoint_stub()) {
1770 safepointStub.add(cb);
1771 } else if (cb->is_adapter_blob()) {
1772 adapter.add(cb);
1773 } else if (cb->is_method_handles_adapter_blob()) {
1774 mhAdapter.add(cb);
1775 } else if (cb->is_vtable_blob()) {
1776 vtableBlob.add(cb);
1777 } else if (cb->is_buffer_blob()) {
1778 bufferBlob.add(cb);
1779 } else {
1780 other.add(cb);
1781 }
1782 }
1783 }
1784
1785 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1786
1787 tty->print_cr("nmethod blobs per compilation level:");
1788 for (int i = 0; i <= CompLevel_full_optimization; i++) {
1789 const char *level_name;
1790 switch (i) {
1791 case CompLevel_none: level_name = "none"; break;
1792 case CompLevel_simple: level_name = "simple"; break;
1793 case CompLevel_limited_profile: level_name = "limited profile"; break;
1794 case CompLevel_full_profile: level_name = "full profile"; break;
1795 case CompLevel_full_optimization: level_name = "full optimization"; break;
1796 default: assert(false, "invalid compilation level");
1797 }
1798 tty->print_cr("%s:", level_name);
1799 live[i].print("live");
1800 }
1801
1802 struct {
1803 const char* name;
1804 const CodeBlob_sizes* sizes;
1805 } non_nmethod_blobs[] = {
1806 { "runtime", &runtimeStub },
1807 { "upcall", &upcallStub },
1808 { "uncommon trap", &uncommonTrapStub },
1809 { "deoptimization", &deoptimizationStub },
1810 { "exception", &exceptionStub },
1811 { "safepoint", &safepointStub },
1812 { "adapter", &adapter },
1813 { "mh_adapter", &mhAdapter },
1814 { "vtable", &vtableBlob },
1815 { "buffer blob", &bufferBlob },
1816 { "other", &other },
1817 };
1818 tty->print_cr("Non-nmethod blobs:");
1819 for (auto& blob: non_nmethod_blobs) {
1820 blob.sizes->print(blob.name);
1821 }
1822
1823 if (WizardMode) {
1824 // print the oop_map usage
1825 int code_size = 0;
1826 int number_of_blobs = 0;
1827 int number_of_oop_maps = 0;
1828 int map_size = 0;
1829 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1830 FOR_ALL_BLOBS(cb, *heap) {
1831 number_of_blobs++;
1832 code_size += cb->code_size();
1833 ImmutableOopMapSet* set = cb->oop_maps();
1834 if (set != nullptr) {
1835 number_of_oop_maps += set->count();
1836 map_size += set->nr_of_bytes();
1837 }
1838 }
1839 }
1840 tty->print_cr("OopMaps");
1841 tty->print_cr(" #blobs = %d", number_of_blobs);
1842 tty->print_cr(" code size = %d", code_size);
1843 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1844 tty->print_cr(" map size = %d", map_size);
1845 }
1846
1847 #endif // !PRODUCT
1848 }
1849
1850 void CodeCache::print_nmethods_on(outputStream* st) {
1851 ResourceMark rm;
1852 int i = 0;
1853 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1854 st->print_cr("-- %s --", (*heap)->name());
1855 FOR_ALL_BLOBS(cb, *heap) {
1856 i++;
1857 if (cb->is_nmethod()) {
1858 nmethod* nm = (nmethod*)cb;
1859 st->print("%4d: ", i);
1860 CompileTask::print(st, nm, nullptr, true, false);
1861
1862 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1863 st->print_cr(" %c", non_entrant_char);
1864 }
1865 }
1866 }
1867 }
1868
1869 void CodeCache::print_summary(outputStream* st, bool detailed) {
1870 int full_count = 0;
1871 julong total_used = 0;
1872 julong total_max_used = 0;
1873 julong total_free = 0;
1874 julong total_size = 0;
1875 FOR_ALL_HEAPS(heap_iterator) {
1876 CodeHeap* heap = (*heap_iterator);
1877 size_t total = (heap->high_boundary() - heap->low_boundary());
1878 if (_heaps->length() >= 1) {
1879 st->print("%s:", heap->name());
1880 } else {
1881 st->print("CodeCache:");
1882 }
1883 size_t size = total/K;
1884 size_t used = (total - heap->unallocated_capacity())/K;
1885 size_t max_used = heap->max_allocated_capacity()/K;
1886 size_t free = heap->unallocated_capacity()/K;
1887 total_size += size;
1888 total_used += used;
1889 total_max_used += max_used;
1890 total_free += free;
1891 st->print_cr(" size=%zuKb used=%zu"
1892 "Kb max_used=%zuKb free=%zuKb",
1893 size, used, max_used, free);
1894
1895 if (detailed) {
1896 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1897 p2i(heap->low_boundary()),
1898 p2i(heap->high()),
1899 p2i(heap->high_boundary()));
1900
1901 full_count += get_codemem_full_count(heap->code_blob_type());
1902 }
1903 }
1904
1905 if (detailed) {
1906 if (SegmentedCodeCache) {
1907 st->print("CodeCache:");
1908 st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT
1909 "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb",
1910 total_size, total_used, total_max_used, total_free);
1911 }
1912 st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT
1913 ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT,
1914 blob_count(), nmethod_count(), adapter_count(), full_count);
1915 st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d",
1916 CompileBroker::should_compile_new_jobs() ?
1917 "enabled" : Arguments::mode() == Arguments::_int ?
1918 "disabled (interpreter mode)" :
1919 "disabled (not enough contiguous free space left)",
1920 CompileBroker::get_total_compiler_stopped_count(),
1921 CompileBroker::get_total_compiler_restarted_count());
1922 }
1923 }
1924
1925 void CodeCache::print_codelist(outputStream* st) {
1926 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1927
1928 NMethodIterator iter(NMethodIterator::not_unloading);
1929 while (iter.next()) {
1930 nmethod* nm = iter.method();
1931 ResourceMark rm;
1932 char* method_name = nm->method()->name_and_sig_as_C_string();
1933 const char* jvmci_name = nullptr;
1934 #if INCLUDE_JVMCI
1935 jvmci_name = nm->jvmci_name();
1936 #endif
1937 st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1938 nm->compile_id(), nm->comp_level(), nm->get_state(),
1939 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "",
1940 (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1941 }
1942 }
1943
1944 void CodeCache::print_layout(outputStream* st) {
1945 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1946 ResourceMark rm;
1947 print_summary(st, true);
1948 }
1949
1950 void CodeCache::log_state(outputStream* st) {
1951 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1952 " adapters='" UINT32_FORMAT "' free_code_cache='%zu'",
1953 blob_count(), nmethod_count(), adapter_count(),
1954 unallocated_capacity());
1955 }
1956
1957 #ifdef LINUX
1958 void CodeCache::write_perf_map(const char* filename, outputStream* st) {
1959 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1960 char fname[JVM_MAXPATHLEN];
1961 if (filename == nullptr) {
1962 // Invocation outside of jcmd requires pid substitution.
1963 if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME,
1964 strlen(DEFAULT_PERFMAP_FILENAME),
1965 fname, JVM_MAXPATHLEN)) {
1966 st->print_cr("Warning: Not writing perf map as pid substitution failed.");
1967 return;
1968 }
1969 filename = fname;
1970 }
1971 fileStream fs(filename, "w");
1972 if (!fs.is_open()) {
1973 st->print_cr("Warning: Failed to create %s for perf map", filename);
1974 return;
1975 }
1976
1977 AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading);
1978 while (iter.next()) {
1979 CodeBlob *cb = iter.method();
1980 ResourceMark rm;
1981 const char* method_name = nullptr;
1982 const char* jvmci_name = nullptr;
1983 if (cb->is_nmethod()) {
1984 nmethod* nm = cb->as_nmethod();
1985 method_name = nm->method()->external_name();
1986 #if INCLUDE_JVMCI
1987 jvmci_name = nm->jvmci_name();
1988 #endif
1989 } else {
1990 method_name = cb->name();
1991 }
1992 fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s",
1993 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1994 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "");
1995 }
1996 }
1997 #endif // LINUX
1998
1999 //---< BEGIN >--- CodeHeap State Analytics.
2000
2001 void CodeCache::aggregate(outputStream *out, size_t granularity) {
2002 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2003 CodeHeapState::aggregate(out, (*heap), granularity);
2004 }
2005 }
2006
2007 void CodeCache::discard(outputStream *out) {
2008 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2009 CodeHeapState::discard(out, (*heap));
2010 }
2011 }
2012
2013 void CodeCache::print_usedSpace(outputStream *out) {
2014 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2015 CodeHeapState::print_usedSpace(out, (*heap));
2016 }
2017 }
2018
2019 void CodeCache::print_freeSpace(outputStream *out) {
2020 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2021 CodeHeapState::print_freeSpace(out, (*heap));
2022 }
2023 }
2024
2025 void CodeCache::print_count(outputStream *out) {
2026 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2027 CodeHeapState::print_count(out, (*heap));
2028 }
2029 }
2030
2031 void CodeCache::print_space(outputStream *out) {
2032 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2033 CodeHeapState::print_space(out, (*heap));
2034 }
2035 }
2036
2037 void CodeCache::print_age(outputStream *out) {
2038 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2039 CodeHeapState::print_age(out, (*heap));
2040 }
2041 }
2042
2043 void CodeCache::print_names(outputStream *out) {
2044 FOR_ALL_ALLOCABLE_HEAPS(heap) {
2045 CodeHeapState::print_names(out, (*heap));
2046 }
2047 }
2048 //---< END >--- CodeHeap State Analytics.