1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_CODECACHE_HPP
26 #define SHARE_CODE_CODECACHE_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/nmethod.hpp"
30 #include "gc/shared/gcBehaviours.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/heap.hpp"
33 #include "oops/instanceKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/numberSeq.hpp"
37
38 // The CodeCache implements the code cache for various pieces of generated
39 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
40 // The entries in the CodeCache are all CodeBlob's.
41
42 // -- Implementation --
43 // The CodeCache consists of one or more CodeHeaps, each of which contains
44 // CodeBlobs of a specific CodeBlobType. Currently heaps for the following
45 // types are available:
46 // - Non-nmethods: Non-nmethods like Buffers, Adapters and Runtime Stubs
47 // - Profiled nmethods: nmethods that are profiled, i.e., those
48 // executed at level 2 or 3
49 // - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
50 // executed at level 1 or 4 and native methods
51 // - All: Used for code of all types if code cache segmentation is disabled.
52 //
53 // In the rare case of the non-nmethod code heap getting full, non-nmethod code
54 // will be stored in the non-profiled code heap as a fallback solution.
55 //
56 // Depending on the availability of compilers and compilation mode there
57 // may be fewer heaps. The size of the code heaps depends on the values of
58 // ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
59 // (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
60 // for details).
61 //
62 // Code cache segmentation is controlled by the flag SegmentedCodeCache.
63 // If turned off, all code types are stored in a single code heap. By default
64 // code cache segmentation is turned on if tiered mode is enabled and
65 // ReservedCodeCacheSize >= 240 MB.
66 //
67 // All methods of the CodeCache accepting a CodeBlobType only apply to
68 // CodeBlobs of the given type. For example, iteration over the
69 // CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
70 // and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
71 //
72 // IMPORTANT: If you add new CodeHeaps to the code cache or change the
73 // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
74 // Solaris and BSD.
75
76 class ExceptionCache;
77 class KlassDepChange;
78 class OopClosure;
79 class ShenandoahParallelCodeHeapIterator;
80 class NativePostCallNop;
81 class DeoptimizationScope;
82 class ReservedSpace;
83
84 #ifdef LINUX
85 #define DEFAULT_PERFMAP_FILENAME "/tmp/perf-%p.map"
86 #endif
87
88 class CodeCache : AllStatic {
89 friend class VMStructs;
90 friend class JVMCIVMStructs;
91 template <class T, class Filter, bool is_relaxed> friend class CodeBlobIterator;
92 friend class WhiteBox;
93 friend class ShenandoahParallelCodeHeapIterator;
94 private:
95 // CodeHeaps of the cache
96 static GrowableArray<CodeHeap*>* _heaps;
97 static GrowableArray<CodeHeap*>* _nmethod_heaps;
98 static GrowableArray<CodeHeap*>* _allocable_heaps;
99
100 static address _low_bound; // Lower bound of CodeHeap addresses
101 static address _high_bound; // Upper bound of CodeHeap addresses
102 static volatile int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
103
104 static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
105 static uint64_t _gc_epoch; // Global state for tracking when nmethods were found to be on-stack
106 static uint64_t _cold_gc_count; // Global state for determining how many GCs are needed before an nmethod is cold
107 static size_t _last_unloading_used;
108 static double _last_unloading_time;
109 static TruncatedSeq _unloading_gc_intervals;
110 static TruncatedSeq _unloading_allocation_rates;
111 static volatile bool _unloading_threshold_gc_requested;
112
113 static ExceptionCache* volatile _exception_cache_purge_list;
114
115 // CodeHeap management
116 static void initialize_heaps(); // Initializes the CodeHeaps
117
118 // Creates a new heap with the given name and size, containing CodeBlobs of the given type
119 static void add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type);
120 static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or nullptr
121 static CodeHeap* get_code_heap(const void* cb); // Returns the CodeHeap for the given CodeBlob
122 static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType
123 // Returns the name of the VM option to set the size of the corresponding CodeHeap
124 static const char* get_code_heap_flag_name(CodeBlobType code_blob_type);
125 static ReservedSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps
126
127 // Iteration
128 static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
129 static CodeBlob* first_blob(CodeBlobType code_blob_type); // Returns the first CodeBlob of the given type
130 static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap
131
132 private:
133 static size_t bytes_allocated_in_freelists();
134 static int allocated_segments();
135 static size_t freelists_length();
136
137 // Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap.
138 static bool contains(CodeBlob *p) { fatal("don't call me!"); return false; }
139
140 public:
141 // Initialization
142 static void initialize();
143 static size_t page_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache
144
145 static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs);
146
147 static void add_heap(CodeHeap* heap);
148 static const GrowableArray<CodeHeap*>* heaps() { return _heaps; }
149 static const GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; }
150
151 // Allocation/administration
152 static CodeBlob* allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure = true, CodeBlobType orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
153 static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
154 static void free(CodeBlob* cb); // frees a CodeBlob
155 static void free_unused_tail(CodeBlob* cb, size_t used); // frees the unused tail of a CodeBlob (only used by TemplateInterpreter::initialize())
156 static bool contains(void *p); // returns whether p is included
157 static bool contains(nmethod* nm); // returns whether nm is included
158 static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
159 static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
160 static void nmethods_do(NMethodClosure* cl); // iterates over all nmethods
161 static void metadata_do(MetadataClosure* f); // iterates over metadata in alive nmethods
162
163 // Lookup
164 static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
165 static CodeBlob* find_blob_fast(void* start); // Returns the CodeBlob containing the given address
166 static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address
167 static int find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise
168 static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
169
170 static int blob_count(); // Returns the total number of CodeBlobs in the cache
171 static int blob_count(CodeBlobType code_blob_type);
172 static int adapter_count(); // Returns the total number of Adapters in the cache
173 static int adapter_count(CodeBlobType code_blob_type);
174 static int nmethod_count(); // Returns the total number of nmethods in the cache
175 static int nmethod_count(CodeBlobType code_blob_type);
176
177 // GC support
178 static void verify_oops();
179
180 // Helper scope object managing code cache unlinking behavior, i.e. sets and
181 // restores the closure that determines which nmethods are going to be removed
182 // during the unlinking part of code cache unloading.
183 class UnlinkingScope : StackObj {
184 ClosureIsUnloadingBehaviour _is_unloading_behaviour;
185 IsUnloadingBehaviour* _saved_behaviour;
186
187 public:
188 UnlinkingScope(BoolObjectClosure* is_alive);
189 ~UnlinkingScope();
190 };
191
192 // Code cache unloading heuristics
193 static uint64_t cold_gc_count();
194 static void update_cold_gc_count();
195 static void gc_on_allocation();
196
197 // The GC epoch and marking_cycle code below is there to support sweeping
198 // nmethods in loom stack chunks.
199 static uint64_t gc_epoch();
200 static bool is_gc_marking_cycle_active();
201 static uint64_t previous_completed_gc_marking_cycle();
202 static void on_gc_marking_cycle_start();
203 static void on_gc_marking_cycle_finish();
204 // Arm nmethods so that special actions are taken (nmethod_entry_barrier) for
205 // on-stack nmethods. It's used in two places:
206 // 1. Used before the start of concurrent marking so that oops inside
207 // on-stack nmethods are visited.
208 // 2. Used at the end of (stw/concurrent) marking so that nmethod::_gc_epoch
209 // is up-to-date, which provides more accurate estimate of
210 // nmethod::is_cold.
211 static void arm_all_nmethods();
212
213 static void maybe_restart_compiler(size_t freed_memory);
214 static void do_unloading(bool unloading_occurred);
215 static uint8_t unloading_cycle() { return _unloading_cycle; }
216
217 static void increment_unloading_cycle();
218
219 static void release_exception_cache(ExceptionCache* entry);
220 static void purge_exception_caches();
221
222 // Printing/debugging
223 static void print(); // prints summary
224 static void print_internals();
225 static void print_memory_overhead();
226 static void verify(); // verifies the code cache
227 static void print_trace(const char* event, CodeBlob* cb, uint size = 0) PRODUCT_RETURN;
228 static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
229 static void log_state(outputStream* st);
230 LINUX_ONLY(static void write_perf_map(const char* filename, outputStream* st);) // Prints warnings and error messages to outputStream
231 static const char* get_code_heap_name(CodeBlobType code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
232 static void report_codemem_full(CodeBlobType code_blob_type, bool print);
233
234 // Dcmd (Diagnostic commands)
235 static void print_codelist(outputStream* st);
236 static void print_layout(outputStream* st);
237
238 // The full limits of the codeCache
239 static address low_bound() { return _low_bound; }
240 static address low_bound(CodeBlobType code_blob_type);
241 static address high_bound() { return _high_bound; }
242 static address high_bound(CodeBlobType code_blob_type);
243
244 // Profiling
245 static size_t capacity();
246 static size_t unallocated_capacity(CodeBlobType code_blob_type);
247 static size_t unallocated_capacity();
248 static size_t max_capacity();
249
250 static double reverse_free_ratio();
251
252 static size_t max_distance_to_non_nmethod();
253 static bool is_non_nmethod(address addr);
254
255 static void clear_inline_caches(); // clear all inline caches
256 static void cleanup_inline_caches_whitebox(); // clean bad nmethods from inline caches
257
258 // Returns true if an own CodeHeap for the given CodeBlobType is available
259 static bool heap_available(CodeBlobType code_blob_type);
260
261 // Returns the CodeBlobType for the given nmethod
262 static CodeBlobType get_code_blob_type(const nmethod* nm) {
263 return get_code_heap(nm)->code_blob_type();
264 }
265
266 static bool code_blob_type_accepts_nmethod(CodeBlobType type) {
267 return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled;
268 }
269
270 static bool code_blob_type_accepts_allocable(CodeBlobType type) {
271 return type <= CodeBlobType::All;
272 }
273
274
275 // Returns the CodeBlobType for the given compilation level
276 static CodeBlobType get_code_blob_type(int comp_level) {
277 if (comp_level == CompLevel_none ||
278 comp_level == CompLevel_simple ||
279 comp_level == CompLevel_full_optimization) {
280 // Non profiled methods
281 return CodeBlobType::MethodNonProfiled;
282 } else if (comp_level == CompLevel_limited_profile ||
283 comp_level == CompLevel_full_profile) {
284 // Profiled methods
285 return CodeBlobType::MethodProfiled;
286 }
287 ShouldNotReachHere();
288 return static_cast<CodeBlobType>(0);
289 }
290
291 static void verify_clean_inline_caches();
292
293 // Deoptimization
294 private:
295 static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
296
297 public:
298 static void mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope);
299 static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee);
300 static void make_marked_nmethods_deoptimized();
301
302 // Marks dependents during classloading
303 static void mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee);
304
305 // RedefineClasses support
306 // Marks in case of evolution
307 static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
308 static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
309 static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN;
310 static void unregister_old_nmethod(nmethod* c) NOT_JVMTI_RETURN;
311
312 // Support for fullspeed debugging
313 static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee);
314
315 // tells if there are nmethods with dependencies
316 static bool has_nmethods_with_dependencies();
317
318 static int get_codemem_full_count(CodeBlobType code_blob_type) {
319 CodeHeap* heap = get_code_heap(code_blob_type);
320 return (heap != nullptr) ? heap->full_count() : 0;
321 }
322
323 // CodeHeap State Analytics.
324 // interface methods for CodeHeap printing, called by CompileBroker
325 static void aggregate(outputStream *out, size_t granularity);
326 static void discard(outputStream *out);
327 static void print_usedSpace(outputStream *out);
328 static void print_freeSpace(outputStream *out);
329 static void print_count(outputStream *out);
330 static void print_space(outputStream *out);
331 static void print_age(outputStream *out);
332 static void print_names(outputStream *out);
333 };
334
335
336 // Iterator to iterate over code blobs in the CodeCache.
337 // The relaxed iterators only hold the CodeCache_lock across next calls
338 template <class T, class Filter, bool is_relaxed> class CodeBlobIterator : public StackObj {
339 public:
340 enum LivenessFilter { all, not_unloading };
341
342 private:
343 CodeBlob* _code_blob; // Current CodeBlob
344 GrowableArrayIterator<CodeHeap*> _heap;
345 GrowableArrayIterator<CodeHeap*> _end;
346 bool _not_unloading; // Those nmethods that are not unloading
347
348 void initialize_iteration(T* nm) {
349 }
350
351 bool next_impl() {
352 for (;;) {
353 // Walk through heaps as required
354 if (!next_blob()) {
355 if (_heap == _end) {
356 return false;
357 }
358 ++_heap;
359 continue;
360 }
361
362 // Filter is_unloading as required
363 if (_not_unloading) {
364 nmethod* nm = _code_blob->as_nmethod_or_null();
365 if (nm != nullptr && nm->is_unloading()) {
366 continue;
367 }
368 }
369
370 return true;
371 }
372 }
373
374 public:
375 CodeBlobIterator(LivenessFilter filter, T* nm = nullptr)
376 : _not_unloading(filter == not_unloading)
377 {
378 if (Filter::heaps() == nullptr) {
379 // The iterator is supposed to shortcut since we have
380 // _heap == _end, but make sure we do not have garbage
381 // in other fields as well.
382 _code_blob = nullptr;
383 return;
384 }
385 _heap = Filter::heaps()->begin();
386 _end = Filter::heaps()->end();
387 // If set to nullptr, initialized by first call to next()
388 _code_blob = nm;
389 if (nm != nullptr) {
390 while(!(*_heap)->contains(_code_blob)) {
391 ++_heap;
392 }
393 assert((*_heap)->contains(_code_blob), "match not found");
394 }
395 }
396
397 // Advance iterator to next blob
398 bool next() {
399 if (is_relaxed) {
400 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
401 return next_impl();
402 } else {
403 assert_locked_or_safepoint(CodeCache_lock);
404 return next_impl();
405 }
406 }
407
408 bool end() const { return _code_blob == nullptr; }
409 T* method() const { return (T*)_code_blob; }
410
411 private:
412
413 // Advance iterator to the next blob in the current code heap
414 bool next_blob() {
415 if (_heap == _end) {
416 return false;
417 }
418 CodeHeap *heap = *_heap;
419 // Get first method CodeBlob
420 if (_code_blob == nullptr) {
421 _code_blob = CodeCache::first_blob(heap);
422 if (_code_blob == nullptr) {
423 return false;
424 } else if (Filter::apply(_code_blob)) {
425 return true;
426 }
427 }
428 // Search for next method CodeBlob
429 _code_blob = CodeCache::next_blob(heap, _code_blob);
430 while (_code_blob != nullptr && !Filter::apply(_code_blob)) {
431 _code_blob = CodeCache::next_blob(heap, _code_blob);
432 }
433 return _code_blob != nullptr;
434 }
435 };
436
437 struct NMethodFilter {
438 static bool apply(CodeBlob* cb) { return cb->is_nmethod(); }
439 static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::nmethod_heaps(); }
440 };
441
442 struct AllCodeBlobsFilter {
443 static bool apply(CodeBlob* cb) { return true; }
444 static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::heaps(); }
445 };
446
447 typedef CodeBlobIterator<nmethod, NMethodFilter, false /* is_relaxed */> NMethodIterator;
448 typedef CodeBlobIterator<nmethod, NMethodFilter, true /* is_relaxed */> RelaxedNMethodIterator;
449 typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter, false /* is_relaxed */> AllCodeBlobsIterator;
450
451 #endif // SHARE_CODE_CODECACHE_HPP