1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_CODECACHE_HPP
26 #define SHARE_CODE_CODECACHE_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/nmethod.hpp"
30 #include "gc/shared/gcBehaviours.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/heap.hpp"
33 #include "oops/instanceKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/numberSeq.hpp"
37
38 // The CodeCache implements the code cache for various pieces of generated
39 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
40 // The entries in the CodeCache are all CodeBlob's.
41
42 // -- Implementation --
43 // The CodeCache consists of one or more CodeHeaps, each of which contains
44 // CodeBlobs of a specific CodeBlobType. Currently heaps for the following
45 // types are available:
46 // - Non-nmethods: Non-nmethods like Buffers, Adapters and Runtime Stubs
47 // - Profiled nmethods: nmethods that are profiled, i.e., those
48 // executed at level 2 or 3
49 // - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
50 // executed at level 1 or 4 and native methods
51 // - All: Used for code of all types if code cache segmentation is disabled.
52 //
53 // In the rare case of the non-nmethod code heap getting full, non-nmethod code
54 // will be stored in the non-profiled code heap as a fallback solution.
55 //
56 // Depending on the availability of compilers and compilation mode there
57 // may be fewer heaps. The size of the code heaps depends on the values of
58 // ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
59 // (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
60 // for details).
61 //
62 // Code cache segmentation is controlled by the flag SegmentedCodeCache.
63 // If turned off, all code types are stored in a single code heap. By default
64 // code cache segmentation is turned on if tiered mode is enabled and
65 // ReservedCodeCacheSize >= 240 MB.
66 //
67 // All methods of the CodeCache accepting a CodeBlobType only apply to
68 // CodeBlobs of the given type. For example, iteration over the
69 // CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
70 // and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
71 //
72 // IMPORTANT: If you add new CodeHeaps to the code cache or change the
73 // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
74 // Solaris and BSD.
75
76 class ExceptionCache;
77 class KlassDepChange;
78 class OopClosure;
79 class ShenandoahParallelCodeHeapIterator;
80 class NativePostCallNop;
81 class DeoptimizationScope;
82 class ReservedSpace;
83
84 #ifdef LINUX
85 #define DEFAULT_PERFMAP_FILENAME "/tmp/perf-%p.map"
86 #endif
87
88 class CodeCache : AllStatic {
89 friend class VMStructs;
90 friend class JVMCIVMStructs;
91 template <class T, class Filter, bool is_relaxed> friend class CodeBlobIterator;
92 friend class WhiteBox;
93 friend class ShenandoahParallelCodeHeapIterator;
94 private:
95 // CodeHeaps of the cache
96 static GrowableArray<CodeHeap*>* _heaps;
97 static GrowableArray<CodeHeap*>* _nmethod_heaps;
98 static GrowableArray<CodeHeap*>* _allocable_heaps;
99
100 static address _low_bound; // Lower bound of CodeHeap addresses
101 static address _high_bound; // Upper bound of CodeHeap addresses
102 static volatile int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
103
104 static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
105 static uint64_t _gc_epoch; // Global state for tracking when nmethods were found to be on-stack
106 static uint64_t _cold_gc_count; // Global state for determining how many GCs are needed before an nmethod is cold
107 static size_t _last_unloading_used;
108 static double _last_unloading_time;
109 static TruncatedSeq _unloading_gc_intervals;
110 static TruncatedSeq _unloading_allocation_rates;
111 static volatile bool _unloading_threshold_gc_requested;
112
113 static ExceptionCache* volatile _exception_cache_purge_list;
114
115 // CodeHeap management
116 static void initialize_heaps(); // Initializes the CodeHeaps
117
118 // Creates a new heap with the given name and size, containing CodeBlobs of the given type
119 static void add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type);
120 static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or nullptr
121 static CodeHeap* get_code_heap(const void* cb); // Returns the CodeHeap for the given CodeBlob
122 static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType
123 // Returns the name of the VM option to set the size of the corresponding CodeHeap
124 static const char* get_code_heap_flag_name(CodeBlobType code_blob_type);
125 static ReservedSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps
126
127 // Iteration
128 static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
129 static CodeBlob* first_blob(CodeBlobType code_blob_type); // Returns the first CodeBlob of the given type
130 static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap
131
132 private:
133 static size_t bytes_allocated_in_freelists();
134 static int allocated_segments();
135 static size_t freelists_length();
136
137 // Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap.
138 static bool contains(CodeBlob *p) { fatal("don't call me!"); return false; }
139
140 public:
141 // Initialization
142 static void initialize();
143 static size_t page_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache
144
145 static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs);
146
147 static void add_heap(CodeHeap* heap);
148 static const GrowableArray<CodeHeap*>* heaps() { return _heaps; }
149 static const GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; }
150
151 static void* map_aot_code();
152 // Allocation/administration
153 static CodeBlob* allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure = true, CodeBlobType orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
154 static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
155 static void free(CodeBlob* cb); // frees a CodeBlob
156 static void free_unused_tail(CodeBlob* cb, size_t used); // frees the unused tail of a CodeBlob (only used by TemplateInterpreter::initialize())
157 static bool contains(void *p); // returns whether p is included
158 static bool contains(nmethod* nm); // returns whether nm is included
159 static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
160 static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
161 static void nmethods_do(NMethodClosure* cl); // iterates over all nmethods
162 static void metadata_do(MetadataClosure* f); // iterates over metadata in alive nmethods
163
164 // Lookup
165 static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
166 static CodeBlob* find_blob_fast(void* start); // Returns the CodeBlob containing the given address
167 static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address
168 static int find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise
169 static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
170
171 static int blob_count(); // Returns the total number of CodeBlobs in the cache
172 static int blob_count(CodeBlobType code_blob_type);
173 static int adapter_count(); // Returns the total number of Adapters in the cache
174 static int adapter_count(CodeBlobType code_blob_type);
175 static int nmethod_count(); // Returns the total number of nmethods in the cache
176 static int nmethod_count(CodeBlobType code_blob_type);
177
178 // GC support
179 static void verify_oops();
180
181 // Helper scope object managing code cache unlinking behavior, i.e. sets and
182 // restores the closure that determines which nmethods are going to be removed
183 // during the unlinking part of code cache unloading.
184 class UnlinkingScope : StackObj {
185 ClosureIsUnloadingBehaviour _is_unloading_behaviour;
186 IsUnloadingBehaviour* _saved_behaviour;
187
188 public:
189 UnlinkingScope(BoolObjectClosure* is_alive);
190 ~UnlinkingScope();
191 };
192
193 // Code cache unloading heuristics
194 static uint64_t cold_gc_count();
195 static void update_cold_gc_count();
196 static void gc_on_allocation();
197
198 // The GC epoch and marking_cycle code below is there to support sweeping
199 // nmethods in loom stack chunks.
200 static uint64_t gc_epoch();
201 static bool is_gc_marking_cycle_active();
202 static uint64_t previous_completed_gc_marking_cycle();
203 static void on_gc_marking_cycle_start();
204 static void on_gc_marking_cycle_finish();
205 // Arm nmethods so that special actions are taken (nmethod_entry_barrier) for
206 // on-stack nmethods. It's used in two places:
207 // 1. Used before the start of concurrent marking so that oops inside
208 // on-stack nmethods are visited.
209 // 2. Used at the end of (stw/concurrent) marking so that nmethod::_gc_epoch
210 // is up-to-date, which provides more accurate estimate of
211 // nmethod::is_cold.
212 static void arm_all_nmethods();
213
214 static void maybe_restart_compiler(size_t freed_memory);
215 static void do_unloading(bool unloading_occurred);
216 static uint8_t unloading_cycle() { return _unloading_cycle; }
217
218 static void increment_unloading_cycle();
219
220 static void release_exception_cache(ExceptionCache* entry);
221 static void purge_exception_caches();
222
223 // Printing/debugging
224 static void print(); // prints summary
225 static void print_internals();
226 static void print_nmethods_on(outputStream* st);
227 static void print_memory_overhead();
228 static void verify(); // verifies the code cache
229 static void print_trace(const char* event, CodeBlob* cb, uint size = 0) PRODUCT_RETURN;
230 static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
231 static void log_state(outputStream* st);
232 LINUX_ONLY(static void write_perf_map(const char* filename, outputStream* st);) // Prints warnings and error messages to outputStream
233 static const char* get_code_heap_name(CodeBlobType code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
234 static void report_codemem_full(CodeBlobType code_blob_type, bool print);
235
236 static void print_nmethod_statistics_on(outputStream* st);
237
238 // Dcmd (Diagnostic commands)
239 static void print_codelist(outputStream* st);
240 static void print_layout(outputStream* st);
241
242 // The full limits of the codeCache
243 static address low_bound() { return _low_bound; }
244 static address low_bound(CodeBlobType code_blob_type);
245 static address high_bound() { return _high_bound; }
246 static address high_bound(CodeBlobType code_blob_type);
247
248 // Profiling
249 static size_t capacity();
250 static size_t unallocated_capacity(CodeBlobType code_blob_type);
251 static size_t unallocated_capacity();
252 static size_t max_capacity();
253
254 static double reverse_free_ratio();
255
256 static size_t max_distance_to_non_nmethod();
257 static bool is_non_nmethod(address addr);
258
259 static void clear_inline_caches(); // clear all inline caches
260 static void cleanup_inline_caches_whitebox(); // clean bad nmethods from inline caches
261
262 // Returns true if an own CodeHeap for the given CodeBlobType is available
263 static bool heap_available(CodeBlobType code_blob_type);
264
265 // Returns the CodeBlobType for the given nmethod
266 static CodeBlobType get_code_blob_type(const nmethod* nm) {
267 return get_code_heap(nm)->code_blob_type();
268 }
269
270 static bool code_blob_type_accepts_nmethod(CodeBlobType type) {
271 return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled;
272 }
273
274 static bool code_blob_type_accepts_allocable(CodeBlobType type) {
275 return type <= CodeBlobType::All;
276 }
277
278
279 // Returns the CodeBlobType for the given compilation level
280 static CodeBlobType get_code_blob_type(int comp_level) {
281 if (comp_level == CompLevel_none ||
282 comp_level == CompLevel_simple ||
283 comp_level == CompLevel_full_optimization) {
284 // Non profiled methods
285 return CodeBlobType::MethodNonProfiled;
286 } else if (comp_level == CompLevel_limited_profile ||
287 comp_level == CompLevel_full_profile) {
288 // Profiled methods
289 return CodeBlobType::MethodProfiled;
290 }
291 ShouldNotReachHere();
292 return static_cast<CodeBlobType>(0);
293 }
294
295 static void verify_clean_inline_caches();
296
297 // Deoptimization
298 private:
299 static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
300
301 public:
302 static void mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope);
303 static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee);
304 static void make_marked_nmethods_deoptimized();
305
306 // Marks dependents during classloading
307 static void mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee);
308
309 // RedefineClasses support
310 // Marks in case of evolution
311 static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
312 static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
313 static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN;
314 static void unregister_old_nmethod(nmethod* c) NOT_JVMTI_RETURN;
315
316 // Support for fullspeed debugging
317 static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee);
318
319 // tells if there are nmethods with dependencies
320 static bool has_nmethods_with_dependencies();
321
322 static int get_codemem_full_count(CodeBlobType code_blob_type) {
323 CodeHeap* heap = get_code_heap(code_blob_type);
324 return (heap != nullptr) ? heap->full_count() : 0;
325 }
326
327 // CodeHeap State Analytics.
328 // interface methods for CodeHeap printing, called by CompileBroker
329 static void aggregate(outputStream *out, size_t granularity);
330 static void discard(outputStream *out);
331 static void print_usedSpace(outputStream *out);
332 static void print_freeSpace(outputStream *out);
333 static void print_count(outputStream *out);
334 static void print_space(outputStream *out);
335 static void print_age(outputStream *out);
336 static void print_names(outputStream *out);
337 };
338
339
340 // Iterator to iterate over code blobs in the CodeCache.
341 // The relaxed iterators only hold the CodeCache_lock across next calls
342 template <class T, class Filter, bool is_relaxed> class CodeBlobIterator : public StackObj {
343 public:
344 enum LivenessFilter { all, not_unloading };
345
346 private:
347 CodeBlob* _code_blob; // Current CodeBlob
348 GrowableArrayIterator<CodeHeap*> _heap;
349 GrowableArrayIterator<CodeHeap*> _end;
350 bool _not_unloading; // Those nmethods that are not unloading
351
352 void initialize_iteration(T* nm) {
353 }
354
355 bool next_impl() {
356 for (;;) {
357 // Walk through heaps as required
358 if (!next_blob()) {
359 if (_heap == _end) {
360 return false;
361 }
362 ++_heap;
363 continue;
364 }
365
366 // Filter is_unloading as required
367 if (_not_unloading) {
368 nmethod* nm = _code_blob->as_nmethod_or_null();
369 if (nm != nullptr && nm->is_unloading()) {
370 continue;
371 }
372 }
373
374 return true;
375 }
376 }
377
378 public:
379 CodeBlobIterator(LivenessFilter filter, T* nm = nullptr)
380 : _not_unloading(filter == not_unloading)
381 {
382 if (Filter::heaps() == nullptr) {
383 // The iterator is supposed to shortcut since we have
384 // _heap == _end, but make sure we do not have garbage
385 // in other fields as well.
386 _code_blob = nullptr;
387 return;
388 }
389 _heap = Filter::heaps()->begin();
390 _end = Filter::heaps()->end();
391 // If set to nullptr, initialized by first call to next()
392 _code_blob = nm;
393 if (nm != nullptr) {
394 while(!(*_heap)->contains(_code_blob)) {
395 ++_heap;
396 }
397 assert((*_heap)->contains(_code_blob), "match not found");
398 }
399 }
400
401 // Advance iterator to next blob
402 bool next() {
403 if (is_relaxed) {
404 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
405 return next_impl();
406 } else {
407 assert_locked_or_safepoint(CodeCache_lock);
408 return next_impl();
409 }
410 }
411
412 bool end() const { return _code_blob == nullptr; }
413 T* method() const { return (T*)_code_blob; }
414
415 private:
416
417 // Advance iterator to the next blob in the current code heap
418 bool next_blob() {
419 if (_heap == _end) {
420 return false;
421 }
422 CodeHeap *heap = *_heap;
423 // Get first method CodeBlob
424 if (_code_blob == nullptr) {
425 _code_blob = CodeCache::first_blob(heap);
426 if (_code_blob == nullptr) {
427 return false;
428 } else if (Filter::apply(_code_blob)) {
429 return true;
430 }
431 }
432 // Search for next method CodeBlob
433 _code_blob = CodeCache::next_blob(heap, _code_blob);
434 while (_code_blob != nullptr && !Filter::apply(_code_blob)) {
435 _code_blob = CodeCache::next_blob(heap, _code_blob);
436 }
437 return _code_blob != nullptr;
438 }
439 };
440
441 struct NMethodFilter {
442 static bool apply(CodeBlob* cb) { return cb->is_nmethod(); }
443 static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::nmethod_heaps(); }
444 };
445
446 struct AllCodeBlobsFilter {
447 static bool apply(CodeBlob* cb) { return true; }
448 static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::heaps(); }
449 };
450
451 typedef CodeBlobIterator<nmethod, NMethodFilter, false /* is_relaxed */> NMethodIterator;
452 typedef CodeBlobIterator<nmethod, NMethodFilter, true /* is_relaxed */> RelaxedNMethodIterator;
453 typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter, false /* is_relaxed */> AllCodeBlobsIterator;
454
455 #endif // SHARE_CODE_CODECACHE_HPP