5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/dependencyContext.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compilationPolicy.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/barrierSetNMethod.hpp"
38 #include "gc/shared/classUnloadingContext.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "jfr/jfrEvents.hpp"
41 #include "jvm_io.h"
42 #include "logging/log.hpp"
43 #include "logging/logStream.hpp"
44 #include "memory/allocation.inline.hpp"
153 scopes_data_size += nm->scopes_data_size();
154 scopes_pcs_size += nm->scopes_pcs_size();
155 } else {
156 code_size += cb->code_size();
157 }
158 }
159 };
160
161 // Iterate over all CodeHeaps
162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
164
165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
167
168 address CodeCache::_low_bound = nullptr;
169 address CodeCache::_high_bound = nullptr;
170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
172
173 // Initialize arrays of CodeHeap subsets
174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
177
178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
179 if (size < required_size) {
180 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
181 codeheap, size/K, required_size/K);
182 err_msg title("Not enough space in %s to run VM", codeheap);
183 err_msg message("%zuK < %zuK", size/K, required_size/K);
184 vm_exit_during_initialization(title, message);
185 }
186 }
187
188 struct CodeHeapInfo {
189 size_t size;
190 bool set;
191 bool enabled;
192 };
193
194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
195 assert(!heap->set, "sanity");
196 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
197 }
198
199 void CodeCache::initialize_heaps() {
200
201 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
202 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
203 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
204
205 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
206 const size_t ps = page_size(false, 8);
207 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
208 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
209 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
210
211 // Prerequisites
212 if (!heap_available(CodeBlobType::MethodProfiled)) {
213 // For compatibility reasons, disabled tiered compilation overrides
214 // segment size even if it is set explicitly.
215 non_profiled.size += profiled.size;
216 // Profiled code heap is not available, forcibly set size to 0
217 profiled.size = 0;
218 profiled.set = true;
219 profiled.enabled = false;
220 }
221
222 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
223
224 size_t compiler_buffer_size = 0;
225 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
226 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
227
228 if (!non_nmethod.set) {
229 non_nmethod.size += compiler_buffer_size;
230 }
231
232 if (!profiled.set && !non_profiled.set) {
233 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
234 (cache_size - non_nmethod.size) / 2 : min_size;
235 }
236
237 if (profiled.set && !non_profiled.set) {
238 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
239 }
240
241 if (!profiled.set && non_profiled.set) {
242 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
243 }
244
245 // Compatibility.
246 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
247 if (!non_nmethod.set && profiled.set && non_profiled.set) {
295 const size_t lg_ps = page_size(false, 1);
296 if (ps < lg_ps) {
297 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
298 "Reverting to smaller page size (" PROPERFMT ").",
299 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
300 }
301 }
302
303 // Note: if large page support is enabled, min_size is at least the large
304 // page size. This ensures that the code cache is covered by large pages.
305 non_nmethod.size = align_up(non_nmethod.size, min_size);
306 profiled.size = align_up(profiled.size, min_size);
307 non_profiled.size = align_up(non_profiled.size, min_size);
308 cache_size = non_nmethod.size + profiled.size + non_profiled.size;
309
310 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
311 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
312 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
313 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
314
315 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
316
317 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
318 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
319
320 size_t offset = 0;
321 if (profiled.enabled) {
322 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
323 offset += profiled.size;
324 // Tier 2 and tier 3 (profiled) methods
325 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
326 }
327
328 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
329 offset += non_nmethod.size;
330 // Non-nmethods (stubs, adapters, ...)
331 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
332
333 if (non_profiled.enabled) {
334 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
335 // Tier 1 and tier 4 (non-profiled) methods and native methods
336 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
337 }
338 }
339
340 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
341 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
342 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
343 }
344
345 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
346 // Align and reserve space for code cache
347 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
348 const size_t rs_size = align_up(size, rs_align);
349
350 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
351 if (!rs.is_reserved()) {
352 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
353 rs_size/K));
354 }
355
356 // Initialize bounds
357 _low_bound = (address)rs.base();
358 _high_bound = _low_bound + rs.size();
359 return rs;
1037 size_t CodeCache::max_capacity() {
1038 size_t max_cap = 0;
1039 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1040 max_cap += (*heap)->max_capacity();
1041 }
1042 return max_cap;
1043 }
1044
1045 bool CodeCache::is_non_nmethod(address addr) {
1046 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1047 return blob->contains(addr);
1048 }
1049
1050 size_t CodeCache::max_distance_to_non_nmethod() {
1051 if (!SegmentedCodeCache) {
1052 return ReservedCodeCacheSize;
1053 } else {
1054 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1055 // the max distance is minimized by placing the NonNMethod segment
1056 // in between MethodProfiled and MethodNonProfiled segments
1057 size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1058 size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1059 return dist1 > dist2 ? dist1 : dist2;
1060 }
1061 }
1062
1063 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1064 // is free, reverse_free_ratio() returns 4.
1065 // Since code heap for each type of code blobs falls forward to the next
1066 // type of code heap, return the reverse free ratio for the entire
1067 // code cache.
1068 double CodeCache::reverse_free_ratio() {
1069 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1070 double max = (double)max_capacity();
1071 double result = max / unallocated;
1072 assert (max >= unallocated, "Must be");
1073 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1074 return result;
1075 }
1076
1077 size_t CodeCache::bytes_allocated_in_freelists() {
1078 size_t allocated_bytes = 0;
1183 AnyObj::RESOURCE_AREA, mtInternal,
1184 &DependencySignature::hash,
1185 &DependencySignature::equals> DepTable;
1186
1187 DepTable* table = new DepTable();
1188
1189 // Iterate over live nmethods and check dependencies of all nmethods that are not
1190 // marked for deoptimization. A particular dependency is only checked once.
1191 NMethodIterator iter(NMethodIterator::not_unloading);
1192 while(iter.next()) {
1193 nmethod* nm = iter.method();
1194 // Only notify for live nmethods
1195 if (!nm->is_marked_for_deoptimization()) {
1196 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1197 // Construct abstraction of a dependency.
1198 DependencySignature* current_sig = new DependencySignature(deps);
1199
1200 // Determine if dependency is already checked. table->put(...) returns
1201 // 'true' if the dependency is added (i.e., was not in the hashtable).
1202 if (table->put(*current_sig, 1)) {
1203 if (deps.check_dependency() != nullptr) {
1204 // Dependency checking failed. Print out information about the failed
1205 // dependency and finally fail with an assert. We can fail here, since
1206 // dependency checking is never done in a product build.
1207 tty->print_cr("Failed dependency:");
1208 changes.print();
1209 nm->print();
1210 nm->print_dependencies_on(tty);
1211 assert(false, "Should have been marked for deoptimization");
1212 }
1213 }
1214 }
1215 }
1216 }
1217 }
1218 #endif
1219
1220 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1221 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1222
1223 // search the hierarchy looking for nmethods which are affected by the loading of this class
1224
1225 // then search the interfaces this class implements looking for nmethods
1226 // which might be dependent of the fact that an interface only had one
1227 // implementor.
1228 // nmethod::check_all_dependencies works only correctly, if no safepoint
1229 // can happen
1230 NoSafepointVerifier nsv;
1231 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1232 InstanceKlass* d = str.klass();
1233 d->mark_dependent_nmethods(deopt_scope, changes);
1234 }
1235
1236 #ifndef PRODUCT
1237 if (VerifyDependencies) {
1238 // Object pointers are used as unique identifiers for dependency arguments. This
1239 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1240 dependentCheckTime.start();
1241 check_live_nmethods_dependencies(changes);
1242 dependentCheckTime.stop();
1243 }
1244 #endif
1245 }
1246
1247 #if INCLUDE_JVMTI
1248 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1249 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1250 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1251
1252 static void add_to_old_table(nmethod* c) {
1471 }
1472 PRAGMA_DIAG_POP
1473
1474 void CodeCache::print_memory_overhead() {
1475 size_t wasted_bytes = 0;
1476 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1477 CodeHeap* curr_heap = *heap;
1478 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1479 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1480 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1481 }
1482 }
1483 // Print bytes that are allocated in the freelist
1484 ttyLocker ttl;
1485 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1486 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1487 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1488 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1489 }
1490
1491 //------------------------------------------------------------------------------------------------
1492 // Non-product version
1493
1494 #ifndef PRODUCT
1495
1496 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1497 if (PrintCodeCache2) { // Need to add a new flag
1498 ResourceMark rm;
1499 if (size == 0) {
1500 int s = cb->size();
1501 assert(s >= 0, "CodeBlob size is negative: %d", s);
1502 size = (uint) s;
1503 }
1504 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1505 }
1506 }
1507
1508 void CodeCache::print_internals() {
1509 int nmethodCount = 0;
1510 int runtimeStubCount = 0;
1511 int upcallStubCount = 0;
1512 int adapterCount = 0;
1513 int mhAdapterCount = 0;
1514 int vtableBlobCount = 0;
1515 int deoptimizationStubCount = 0;
1516 int uncommonTrapStubCount = 0;
1517 int exceptionStubCount = 0;
1518 int safepointStubCount = 0;
1519 int bufferBlobCount = 0;
1520 int total = 0;
1521 int nmethodNotEntrant = 0;
1522 int nmethodJava = 0;
1523 int nmethodNative = 0;
1524 int max_nm_size = 0;
1525 ResourceMark rm;
1526
1527 int i = 0;
1528 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1529 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1530 tty->print_cr("-- %s --", (*heap)->name());
1531 }
1532 FOR_ALL_BLOBS(cb, *heap) {
1533 total++;
1534 if (cb->is_nmethod()) {
1535 nmethod* nm = (nmethod*)cb;
1536
1537 if (Verbose && nm->method() != nullptr) {
1538 ResourceMark rm;
1539 char *method_name = nm->method()->name_and_sig_as_C_string();
1540 tty->print("%s", method_name);
1541 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1542 }
1543
1544 nmethodCount++;
1545
1546 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1547 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1548
1549 if(nm->method() != nullptr && nm->is_java_method()) {
1550 nmethodJava++;
1551 max_nm_size = MAX2(max_nm_size, nm->size());
1552 }
1553 } else if (cb->is_runtime_stub()) {
1554 runtimeStubCount++;
1555 } else if (cb->is_upcall_stub()) {
1556 upcallStubCount++;
1557 } else if (cb->is_deoptimization_stub()) {
1558 deoptimizationStubCount++;
1559 } else if (cb->is_uncommon_trap_stub()) {
1560 uncommonTrapStubCount++;
1561 } else if (cb->is_exception_stub()) {
1562 exceptionStubCount++;
1719 FOR_ALL_BLOBS(cb, *heap) {
1720 number_of_blobs++;
1721 code_size += cb->code_size();
1722 ImmutableOopMapSet* set = cb->oop_maps();
1723 if (set != nullptr) {
1724 number_of_oop_maps += set->count();
1725 map_size += set->nr_of_bytes();
1726 }
1727 }
1728 }
1729 tty->print_cr("OopMaps");
1730 tty->print_cr(" #blobs = %d", number_of_blobs);
1731 tty->print_cr(" code size = %d", code_size);
1732 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1733 tty->print_cr(" map size = %d", map_size);
1734 }
1735
1736 #endif // !PRODUCT
1737 }
1738
1739 void CodeCache::print_summary(outputStream* st, bool detailed) {
1740 int full_count = 0;
1741 julong total_used = 0;
1742 julong total_max_used = 0;
1743 julong total_free = 0;
1744 julong total_size = 0;
1745 FOR_ALL_HEAPS(heap_iterator) {
1746 CodeHeap* heap = (*heap_iterator);
1747 size_t total = (heap->high_boundary() - heap->low_boundary());
1748 if (_heaps->length() >= 1) {
1749 st->print("%s:", heap->name());
1750 } else {
1751 st->print("CodeCache:");
1752 }
1753 size_t size = total/K;
1754 size_t used = (total - heap->unallocated_capacity())/K;
1755 size_t max_used = heap->max_allocated_capacity()/K;
1756 size_t free = heap->unallocated_capacity()/K;
1757 total_size += size;
1758 total_used += used;
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCacheAccess.hpp"
26 #include "code/aotCodeCache.hpp"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/codeHeapState.hpp"
30 #include "code/compiledIC.hpp"
31 #include "code/dependencies.hpp"
32 #include "code/dependencyContext.hpp"
33 #include "code/nmethod.hpp"
34 #include "code/pcDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/compilerDefinitions.inline.hpp"
38 #include "compiler/oopMap.hpp"
39 #include "gc/shared/barrierSetNMethod.hpp"
40 #include "gc/shared/classUnloadingContext.hpp"
41 #include "gc/shared/collectedHeap.hpp"
42 #include "jfr/jfrEvents.hpp"
43 #include "jvm_io.h"
44 #include "logging/log.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/allocation.inline.hpp"
155 scopes_data_size += nm->scopes_data_size();
156 scopes_pcs_size += nm->scopes_pcs_size();
157 } else {
158 code_size += cb->code_size();
159 }
160 }
161 };
162
163 // Iterate over all CodeHeaps
164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
165 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
166
167 // Iterate over all CodeBlobs (cb) on the given CodeHeap
168 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
169
170 address CodeCache::_low_bound = nullptr;
171 address CodeCache::_high_bound = nullptr;
172 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
173 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
174
175 static ReservedSpace _cds_code_space;
176
177 // Initialize arrays of CodeHeap subsets
178 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
179 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
180 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
181
182 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
183 if (size < required_size) {
184 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
185 codeheap, size/K, required_size/K);
186 err_msg title("Not enough space in %s to run VM", codeheap);
187 err_msg message("%zuK < %zuK", size/K, required_size/K);
188 vm_exit_during_initialization(title, message);
189 }
190 }
191
192 struct CodeHeapInfo {
193 size_t size;
194 bool set;
195 bool enabled;
196 };
197
198 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
199 assert(!heap->set, "sanity");
200 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
201 }
202
203 void CodeCache::initialize_heaps() {
204 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
205 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
206 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
207
208 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
209 const size_t ps = page_size(false, 8);
210 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
211 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
212 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
213
214 // Prerequisites
215 if (!heap_available(CodeBlobType::MethodProfiled)) {
216 // For compatibility reasons, disabled tiered compilation overrides
217 // segment size even if it is set explicitly.
218 non_profiled.size += profiled.size;
219 // Profiled code heap is not available, forcibly set size to 0
220 profiled.size = 0;
221 profiled.set = true;
222 profiled.enabled = false;
223 }
224
225 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
226
227 size_t compiler_buffer_size = 0;
228 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
229 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
230
231 // During AOT assembly phase more compiler threads are used
232 // and C2 temp buffer is bigger.
233 // But due to rounding issue the total code cache size could be smaller
234 // than during production run. We can not use AOT code in such case
235 // because branch and call instructions will be incorrect.
236 //
237 // Increase code cache size to guarantee that total size
238 // will be bigger during assembly phase.
239 if (AOTCodeCache::maybe_dumping_code()) {
240 cache_size += align_up(compiler_buffer_size, min_size);
241 cache_size = MIN2(cache_size, CODE_CACHE_SIZE_LIMIT);
242 }
243
244 if (!non_nmethod.set) {
245 non_nmethod.size += compiler_buffer_size;
246 }
247
248 if (!profiled.set && !non_profiled.set) {
249 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
250 (cache_size - non_nmethod.size) / 2 : min_size;
251 }
252
253 if (profiled.set && !non_profiled.set) {
254 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
255 }
256
257 if (!profiled.set && non_profiled.set) {
258 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
259 }
260
261 // Compatibility.
262 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
263 if (!non_nmethod.set && profiled.set && non_profiled.set) {
311 const size_t lg_ps = page_size(false, 1);
312 if (ps < lg_ps) {
313 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
314 "Reverting to smaller page size (" PROPERFMT ").",
315 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
316 }
317 }
318
319 // Note: if large page support is enabled, min_size is at least the large
320 // page size. This ensures that the code cache is covered by large pages.
321 non_nmethod.size = align_up(non_nmethod.size, min_size);
322 profiled.size = align_up(profiled.size, min_size);
323 non_profiled.size = align_up(non_profiled.size, min_size);
324 cache_size = non_nmethod.size + profiled.size + non_profiled.size;
325
326 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
327 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
328 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
329 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
330
331 const size_t cds_code_size = 0;
332 // FIXME: we should not increase CodeCache size - it affects branches.
333 // Instead we need to create separate code heap in CodeCache for AOT code.
334 // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size);
335 // cache_size += cds_code_size;
336
337 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
338
339 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
340 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
341
342 size_t offset = 0;
343 if (cds_code_size > 0) {
344 // FIXME: use CodeHeapInfo for this hack ...
345 _cds_code_space = rs.partition(offset, cds_code_size);
346 offset += cds_code_size;
347 }
348
349 if (profiled.enabled) {
350 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
351 offset += profiled.size;
352 // Tier 2 and tier 3 (profiled) methods
353 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
354 }
355
356 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
357 offset += non_nmethod.size;
358 // Non-nmethods (stubs, adapters, ...)
359 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
360
361 if (non_profiled.enabled) {
362 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
363 // Tier 1 and tier 4 (non-profiled) methods and native methods
364 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
365 }
366 }
367
368 void* CodeCache::map_aot_code() {
369 if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) {
370 return _cds_code_space.base();
371 } else {
372 return nullptr;
373 }
374 }
375
376 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
377 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
378 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
379 }
380
381 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
382 // Align and reserve space for code cache
383 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
384 const size_t rs_size = align_up(size, rs_align);
385
386 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
387 if (!rs.is_reserved()) {
388 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
389 rs_size/K));
390 }
391
392 // Initialize bounds
393 _low_bound = (address)rs.base();
394 _high_bound = _low_bound + rs.size();
395 return rs;
1073 size_t CodeCache::max_capacity() {
1074 size_t max_cap = 0;
1075 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1076 max_cap += (*heap)->max_capacity();
1077 }
1078 return max_cap;
1079 }
1080
1081 bool CodeCache::is_non_nmethod(address addr) {
1082 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1083 return blob->contains(addr);
1084 }
1085
1086 size_t CodeCache::max_distance_to_non_nmethod() {
1087 if (!SegmentedCodeCache) {
1088 return ReservedCodeCacheSize;
1089 } else {
1090 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1091 // the max distance is minimized by placing the NonNMethod segment
1092 // in between MethodProfiled and MethodNonProfiled segments
1093 size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound;
1094 size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary();
1095 return dist1 > dist2 ? dist1 : dist2;
1096 }
1097 }
1098
1099 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1100 // is free, reverse_free_ratio() returns 4.
1101 // Since code heap for each type of code blobs falls forward to the next
1102 // type of code heap, return the reverse free ratio for the entire
1103 // code cache.
1104 double CodeCache::reverse_free_ratio() {
1105 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1106 double max = (double)max_capacity();
1107 double result = max / unallocated;
1108 assert (max >= unallocated, "Must be");
1109 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1110 return result;
1111 }
1112
1113 size_t CodeCache::bytes_allocated_in_freelists() {
1114 size_t allocated_bytes = 0;
1219 AnyObj::RESOURCE_AREA, mtInternal,
1220 &DependencySignature::hash,
1221 &DependencySignature::equals> DepTable;
1222
1223 DepTable* table = new DepTable();
1224
1225 // Iterate over live nmethods and check dependencies of all nmethods that are not
1226 // marked for deoptimization. A particular dependency is only checked once.
1227 NMethodIterator iter(NMethodIterator::not_unloading);
1228 while(iter.next()) {
1229 nmethod* nm = iter.method();
1230 // Only notify for live nmethods
1231 if (!nm->is_marked_for_deoptimization()) {
1232 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1233 // Construct abstraction of a dependency.
1234 DependencySignature* current_sig = new DependencySignature(deps);
1235
1236 // Determine if dependency is already checked. table->put(...) returns
1237 // 'true' if the dependency is added (i.e., was not in the hashtable).
1238 if (table->put(*current_sig, 1)) {
1239 Klass* witness = deps.check_dependency();
1240 if (witness != nullptr) {
1241 // Dependency checking failed. Print out information about the failed
1242 // dependency and finally fail with an assert. We can fail here, since
1243 // dependency checking is never done in a product build.
1244 deps.print_dependency(tty, witness, true);
1245 changes.print();
1246 nm->print();
1247 nm->print_dependencies_on(tty);
1248 assert(false, "Should have been marked for deoptimization");
1249 }
1250 }
1251 }
1252 }
1253 }
1254 }
1255 #endif
1256
1257 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1258 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1259
1260 // search the hierarchy looking for nmethods which are affected by the loading of this class
1261
1262 // then search the interfaces this class implements looking for nmethods
1263 // which might be dependent of the fact that an interface only had one
1264 // implementor.
1265 // nmethod::check_all_dependencies works only correctly, if no safepoint
1266 // can happen
1267 NoSafepointVerifier nsv;
1268 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1269 InstanceKlass* d = str.klass();
1270 {
1271 LogStreamHandle(Trace, dependencies) log;
1272 if (log.is_enabled()) {
1273 log.print("Processing context ");
1274 d->name()->print_value_on(&log);
1275 }
1276 }
1277 d->mark_dependent_nmethods(deopt_scope, changes);
1278 }
1279
1280 #ifndef PRODUCT
1281 if (VerifyDependencies) {
1282 // Object pointers are used as unique identifiers for dependency arguments. This
1283 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1284 dependentCheckTime.start();
1285 check_live_nmethods_dependencies(changes);
1286 dependentCheckTime.stop();
1287 }
1288 #endif
1289 }
1290
1291 #if INCLUDE_JVMTI
1292 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1293 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1294 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1295
1296 static void add_to_old_table(nmethod* c) {
1515 }
1516 PRAGMA_DIAG_POP
1517
1518 void CodeCache::print_memory_overhead() {
1519 size_t wasted_bytes = 0;
1520 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1521 CodeHeap* curr_heap = *heap;
1522 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1523 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1524 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1525 }
1526 }
1527 // Print bytes that are allocated in the freelist
1528 ttyLocker ttl;
1529 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1530 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1531 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1532 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1533 }
1534
1535 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1536 if (total > 0) {
1537 double ratio = (100.0 * used) / total;
1538 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1539 }
1540 }
1541
1542 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1543 int stats [2][6][3][2] = {0};
1544 int stats_used[2][6][3][2] = {0};
1545
1546 int total_osr = 0;
1547 int total_entrant = 0;
1548 int total_non_entrant = 0;
1549 int total_other = 0;
1550 int total_used = 0;
1551
1552 NMethodIterator iter(NMethodIterator::all);
1553 while (iter.next()) {
1554 nmethod* nm = iter.method();
1555 if (nm->is_in_use()) {
1556 ++total_entrant;
1557 } else if (nm->is_not_entrant()) {
1558 ++total_non_entrant;
1559 } else {
1560 ++total_other;
1561 }
1562 if (nm->is_osr_method()) {
1563 ++total_osr;
1564 }
1565 if (nm->used()) {
1566 ++total_used;
1567 }
1568 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1569
1570 int idx1 = nm->is_aot() ? 1 : 0;
1571 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1572 int idx3 = (nm->is_in_use() ? 0 :
1573 (nm->is_not_entrant() ? 1 :
1574 2));
1575 int idx4 = (nm->is_osr_method() ? 1 : 0);
1576 stats[idx1][idx2][idx3][idx4] += 1;
1577 if (nm->used()) {
1578 stats_used[idx1][idx2][idx3][idx4] += 1;
1579 }
1580 }
1581
1582 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1583 total_entrant + total_non_entrant + total_other,
1584 total_entrant, total_non_entrant, total_osr);
1585 if (total_other > 0) {
1586 st->print("; %d other", total_other);
1587 }
1588 st->print_cr(")");
1589
1590 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1591 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1592 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1593 if (total_normal + total_osr > 0) {
1594 st->print(" Tier%d:", i);
1595 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1596 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1597 st->cr();
1598 }
1599 }
1600 st->cr();
1601 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1602 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1603 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1604 assert(total_osr == 0, "sanity");
1605 if (total_normal + total_osr > 0) {
1606 st->print(" AOT Code T%d:", i);
1607 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1608 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1609 st->cr();
1610 }
1611 }
1612 }
1613
1614 //------------------------------------------------------------------------------------------------
1615 // Non-product version
1616
1617 #ifndef PRODUCT
1618
1619 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1620 if (PrintCodeCache2) { // Need to add a new flag
1621 ResourceMark rm;
1622 if (size == 0) {
1623 int s = cb->size();
1624 assert(s >= 0, "CodeBlob size is negative: %d", s);
1625 size = (uint) s;
1626 }
1627 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1628 }
1629 }
1630
1631 void CodeCache::print_internals() {
1632 int nmethodCount = 0;
1633 int runtimeStubCount = 0;
1634 int upcallStubCount = 0;
1635 int adapterCount = 0;
1636 int mhAdapterCount = 0;
1637 int vtableBlobCount = 0;
1638 int deoptimizationStubCount = 0;
1639 int uncommonTrapStubCount = 0;
1640 int exceptionStubCount = 0;
1641 int safepointStubCount = 0;
1642 int bufferBlobCount = 0;
1643 int total = 0;
1644 int nmethodNotEntrant = 0;
1645 int nmethodJava = 0;
1646 int nmethodNative = 0;
1647 int max_nm_size = 0;
1648 ResourceMark rm;
1649
1650 int i = 0;
1651 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1652 int heap_total = 0;
1653 tty->print_cr("-- %s --", (*heap)->name());
1654 FOR_ALL_BLOBS(cb, *heap) {
1655 total++;
1656 heap_total++;
1657 if (cb->is_nmethod()) {
1658 nmethod* nm = (nmethod*)cb;
1659
1660 tty->print("%4d: ", heap_total);
1661 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1662
1663 nmethodCount++;
1664
1665 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1666 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1667
1668 if(nm->method() != nullptr && nm->is_java_method()) {
1669 nmethodJava++;
1670 max_nm_size = MAX2(max_nm_size, nm->size());
1671 }
1672 } else if (cb->is_runtime_stub()) {
1673 runtimeStubCount++;
1674 } else if (cb->is_upcall_stub()) {
1675 upcallStubCount++;
1676 } else if (cb->is_deoptimization_stub()) {
1677 deoptimizationStubCount++;
1678 } else if (cb->is_uncommon_trap_stub()) {
1679 uncommonTrapStubCount++;
1680 } else if (cb->is_exception_stub()) {
1681 exceptionStubCount++;
1838 FOR_ALL_BLOBS(cb, *heap) {
1839 number_of_blobs++;
1840 code_size += cb->code_size();
1841 ImmutableOopMapSet* set = cb->oop_maps();
1842 if (set != nullptr) {
1843 number_of_oop_maps += set->count();
1844 map_size += set->nr_of_bytes();
1845 }
1846 }
1847 }
1848 tty->print_cr("OopMaps");
1849 tty->print_cr(" #blobs = %d", number_of_blobs);
1850 tty->print_cr(" code size = %d", code_size);
1851 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1852 tty->print_cr(" map size = %d", map_size);
1853 }
1854
1855 #endif // !PRODUCT
1856 }
1857
1858 void CodeCache::print_nmethods_on(outputStream* st) {
1859 ResourceMark rm;
1860 int i = 0;
1861 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1862 st->print_cr("-- %s --", (*heap)->name());
1863 FOR_ALL_BLOBS(cb, *heap) {
1864 i++;
1865 if (cb->is_nmethod()) {
1866 nmethod* nm = (nmethod*)cb;
1867 st->print("%4d: ", i);
1868 CompileTask::print(st, nm, nullptr, true, false);
1869
1870 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1871 st->print_cr(" %c", non_entrant_char);
1872 }
1873 }
1874 }
1875 }
1876
1877 void CodeCache::print_summary(outputStream* st, bool detailed) {
1878 int full_count = 0;
1879 julong total_used = 0;
1880 julong total_max_used = 0;
1881 julong total_free = 0;
1882 julong total_size = 0;
1883 FOR_ALL_HEAPS(heap_iterator) {
1884 CodeHeap* heap = (*heap_iterator);
1885 size_t total = (heap->high_boundary() - heap->low_boundary());
1886 if (_heaps->length() >= 1) {
1887 st->print("%s:", heap->name());
1888 } else {
1889 st->print("CodeCache:");
1890 }
1891 size_t size = total/K;
1892 size_t used = (total - heap->unallocated_capacity())/K;
1893 size_t max_used = heap->max_allocated_capacity()/K;
1894 size_t free = heap->unallocated_capacity()/K;
1895 total_size += size;
1896 total_used += used;
|