5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/dependencyContext.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compilationPolicy.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/barrierSetNMethod.hpp"
38 #include "gc/shared/classUnloadingContext.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "jfr/jfrEvents.hpp"
41 #include "jvm_io.h"
42 #include "logging/log.hpp"
43 #include "logging/logStream.hpp"
44 #include "memory/allocation.inline.hpp"
153 scopes_data_size += nm->scopes_data_size();
154 scopes_pcs_size += nm->scopes_pcs_size();
155 } else {
156 code_size += cb->code_size();
157 }
158 }
159 };
160
161 // Iterate over all CodeHeaps
162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
164
165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
167
168 address CodeCache::_low_bound = nullptr;
169 address CodeCache::_high_bound = nullptr;
170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
172
173 // Initialize arrays of CodeHeap subsets
174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
177
178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
179 if (size < required_size) {
180 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
181 codeheap, size/K, required_size/K);
182 err_msg title("Not enough space in %s to run VM", codeheap);
183 err_msg message("%zuK < %zuK", size/K, required_size/K);
184 vm_exit_during_initialization(title, message);
185 }
186 }
187
188 struct CodeHeapInfo {
189 size_t size;
190 bool set;
191 bool enabled;
192 };
193
194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
195 assert(!heap->set, "sanity");
196 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
197 }
198
199 void CodeCache::initialize_heaps() {
200
201 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
202 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
203 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
204
205 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
206 const size_t ps = page_size(false, 8);
207 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
208 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
209 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
210
211 // Prerequisites
212 if (!heap_available(CodeBlobType::MethodProfiled)) {
213 // For compatibility reasons, disabled tiered compilation overrides
214 // segment size even if it is set explicitly.
215 non_profiled.size += profiled.size;
216 // Profiled code heap is not available, forcibly set size to 0
217 profiled.size = 0;
218 profiled.set = true;
219 profiled.enabled = false;
220 }
301 if (ps < lg_ps) {
302 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
303 "Reverting to smaller page size (" PROPERFMT ").",
304 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
305 }
306 }
307
308 // Note: if large page support is enabled, min_size is at least the large
309 // page size. This ensures that the code cache is covered by large pages.
310 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
311 non_profiled.size += profiled.size & alignment_mask(min_size);
312 non_nmethod.size = align_down(non_nmethod.size, min_size);
313 profiled.size = align_down(profiled.size, min_size);
314 non_profiled.size = align_down(non_profiled.size, min_size);
315
316 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
317 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
318 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
319 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
320
321 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
322
323 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
324 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
325
326 size_t offset = 0;
327 if (profiled.enabled) {
328 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
329 offset += profiled.size;
330 // Tier 2 and tier 3 (profiled) methods
331 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
332 }
333
334 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
335 offset += non_nmethod.size;
336 // Non-nmethods (stubs, adapters, ...)
337 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
338
339 if (non_profiled.enabled) {
340 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
341 // Tier 1 and tier 4 (non-profiled) methods and native methods
342 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
343 }
344 }
345
346 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
347 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
348 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
349 }
350
351 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
352 // Align and reserve space for code cache
353 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
354 const size_t rs_size = align_up(size, rs_align);
355
356 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
357 if (!rs.is_reserved()) {
358 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
359 rs_size/K));
360 }
361
362 // Initialize bounds
363 _low_bound = (address)rs.base();
364 _high_bound = _low_bound + rs.size();
365 return rs;
1042 size_t CodeCache::max_capacity() {
1043 size_t max_cap = 0;
1044 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1045 max_cap += (*heap)->max_capacity();
1046 }
1047 return max_cap;
1048 }
1049
1050 bool CodeCache::is_non_nmethod(address addr) {
1051 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1052 return blob->contains(addr);
1053 }
1054
1055 size_t CodeCache::max_distance_to_non_nmethod() {
1056 if (!SegmentedCodeCache) {
1057 return ReservedCodeCacheSize;
1058 } else {
1059 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1060 // the max distance is minimized by placing the NonNMethod segment
1061 // in between MethodProfiled and MethodNonProfiled segments
1062 size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1063 size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1064 return dist1 > dist2 ? dist1 : dist2;
1065 }
1066 }
1067
1068 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1069 // is free, reverse_free_ratio() returns 4.
1070 // Since code heap for each type of code blobs falls forward to the next
1071 // type of code heap, return the reverse free ratio for the entire
1072 // code cache.
1073 double CodeCache::reverse_free_ratio() {
1074 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1075 double max = (double)max_capacity();
1076 double result = max / unallocated;
1077 assert (max >= unallocated, "Must be");
1078 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1079 return result;
1080 }
1081
1082 size_t CodeCache::bytes_allocated_in_freelists() {
1083 size_t allocated_bytes = 0;
1188 AnyObj::RESOURCE_AREA, mtInternal,
1189 &DependencySignature::hash,
1190 &DependencySignature::equals> DepTable;
1191
1192 DepTable* table = new DepTable();
1193
1194 // Iterate over live nmethods and check dependencies of all nmethods that are not
1195 // marked for deoptimization. A particular dependency is only checked once.
1196 NMethodIterator iter(NMethodIterator::not_unloading);
1197 while(iter.next()) {
1198 nmethod* nm = iter.method();
1199 // Only notify for live nmethods
1200 if (!nm->is_marked_for_deoptimization()) {
1201 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1202 // Construct abstraction of a dependency.
1203 DependencySignature* current_sig = new DependencySignature(deps);
1204
1205 // Determine if dependency is already checked. table->put(...) returns
1206 // 'true' if the dependency is added (i.e., was not in the hashtable).
1207 if (table->put(*current_sig, 1)) {
1208 if (deps.check_dependency() != nullptr) {
1209 // Dependency checking failed. Print out information about the failed
1210 // dependency and finally fail with an assert. We can fail here, since
1211 // dependency checking is never done in a product build.
1212 tty->print_cr("Failed dependency:");
1213 changes.print();
1214 nm->print();
1215 nm->print_dependencies_on(tty);
1216 assert(false, "Should have been marked for deoptimization");
1217 }
1218 }
1219 }
1220 }
1221 }
1222 }
1223 #endif
1224
1225 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1226 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1227
1228 // search the hierarchy looking for nmethods which are affected by the loading of this class
1229
1230 // then search the interfaces this class implements looking for nmethods
1231 // which might be dependent of the fact that an interface only had one
1232 // implementor.
1233 // nmethod::check_all_dependencies works only correctly, if no safepoint
1234 // can happen
1235 NoSafepointVerifier nsv;
1236 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1237 InstanceKlass* d = str.klass();
1238 d->mark_dependent_nmethods(deopt_scope, changes);
1239 }
1240
1241 #ifndef PRODUCT
1242 if (VerifyDependencies) {
1243 // Object pointers are used as unique identifiers for dependency arguments. This
1244 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1245 dependentCheckTime.start();
1246 check_live_nmethods_dependencies(changes);
1247 dependentCheckTime.stop();
1248 }
1249 #endif
1250 }
1251
1252 #if INCLUDE_JVMTI
1253 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1254 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1255 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1256
1257 static void add_to_old_table(nmethod* c) {
1476 }
1477 PRAGMA_DIAG_POP
1478
1479 void CodeCache::print_memory_overhead() {
1480 size_t wasted_bytes = 0;
1481 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1482 CodeHeap* curr_heap = *heap;
1483 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1484 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1485 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1486 }
1487 }
1488 // Print bytes that are allocated in the freelist
1489 ttyLocker ttl;
1490 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1491 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1492 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1493 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1494 }
1495
1496 //------------------------------------------------------------------------------------------------
1497 // Non-product version
1498
1499 #ifndef PRODUCT
1500
1501 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1502 if (PrintCodeCache2) { // Need to add a new flag
1503 ResourceMark rm;
1504 if (size == 0) {
1505 int s = cb->size();
1506 assert(s >= 0, "CodeBlob size is negative: %d", s);
1507 size = (uint) s;
1508 }
1509 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1510 }
1511 }
1512
1513 void CodeCache::print_internals() {
1514 int nmethodCount = 0;
1515 int runtimeStubCount = 0;
1516 int upcallStubCount = 0;
1517 int adapterCount = 0;
1518 int mhAdapterCount = 0;
1519 int vtableBlobCount = 0;
1520 int deoptimizationStubCount = 0;
1521 int uncommonTrapStubCount = 0;
1522 int exceptionStubCount = 0;
1523 int safepointStubCount = 0;
1524 int bufferBlobCount = 0;
1525 int total = 0;
1526 int nmethodNotEntrant = 0;
1527 int nmethodJava = 0;
1528 int nmethodNative = 0;
1529 int max_nm_size = 0;
1530 ResourceMark rm;
1531
1532 int i = 0;
1533 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1534 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1535 tty->print_cr("-- %s --", (*heap)->name());
1536 }
1537 FOR_ALL_BLOBS(cb, *heap) {
1538 total++;
1539 if (cb->is_nmethod()) {
1540 nmethod* nm = (nmethod*)cb;
1541
1542 if (Verbose && nm->method() != nullptr) {
1543 ResourceMark rm;
1544 char *method_name = nm->method()->name_and_sig_as_C_string();
1545 tty->print("%s", method_name);
1546 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1547 }
1548
1549 nmethodCount++;
1550
1551 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1552 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1553
1554 if(nm->method() != nullptr && nm->is_java_method()) {
1555 nmethodJava++;
1556 max_nm_size = MAX2(max_nm_size, nm->size());
1557 }
1558 } else if (cb->is_runtime_stub()) {
1559 runtimeStubCount++;
1560 } else if (cb->is_upcall_stub()) {
1561 upcallStubCount++;
1562 } else if (cb->is_deoptimization_stub()) {
1563 deoptimizationStubCount++;
1564 } else if (cb->is_uncommon_trap_stub()) {
1565 uncommonTrapStubCount++;
1566 } else if (cb->is_exception_stub()) {
1567 exceptionStubCount++;
1724 FOR_ALL_BLOBS(cb, *heap) {
1725 number_of_blobs++;
1726 code_size += cb->code_size();
1727 ImmutableOopMapSet* set = cb->oop_maps();
1728 if (set != nullptr) {
1729 number_of_oop_maps += set->count();
1730 map_size += set->nr_of_bytes();
1731 }
1732 }
1733 }
1734 tty->print_cr("OopMaps");
1735 tty->print_cr(" #blobs = %d", number_of_blobs);
1736 tty->print_cr(" code size = %d", code_size);
1737 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1738 tty->print_cr(" map size = %d", map_size);
1739 }
1740
1741 #endif // !PRODUCT
1742 }
1743
1744 void CodeCache::print_summary(outputStream* st, bool detailed) {
1745 int full_count = 0;
1746 julong total_used = 0;
1747 julong total_max_used = 0;
1748 julong total_free = 0;
1749 julong total_size = 0;
1750 FOR_ALL_HEAPS(heap_iterator) {
1751 CodeHeap* heap = (*heap_iterator);
1752 size_t total = (heap->high_boundary() - heap->low_boundary());
1753 if (_heaps->length() >= 1) {
1754 st->print("%s:", heap->name());
1755 } else {
1756 st->print("CodeCache:");
1757 }
1758 size_t size = total/K;
1759 size_t used = (total - heap->unallocated_capacity())/K;
1760 size_t max_used = heap->max_allocated_capacity()/K;
1761 size_t free = heap->unallocated_capacity()/K;
1762 total_size += size;
1763 total_used += used;
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCacheAccess.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/codeHeapState.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/dependencyContext.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSetNMethod.hpp"
39 #include "gc/shared/classUnloadingContext.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/allocation.inline.hpp"
154 scopes_data_size += nm->scopes_data_size();
155 scopes_pcs_size += nm->scopes_pcs_size();
156 } else {
157 code_size += cb->code_size();
158 }
159 }
160 };
161
162 // Iterate over all CodeHeaps
163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
165
166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
168
169 address CodeCache::_low_bound = nullptr;
170 address CodeCache::_high_bound = nullptr;
171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
173
174 static ReservedSpace _cds_code_space;
175
176 // Initialize arrays of CodeHeap subsets
177 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
178 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
179 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
180
181 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
182 if (size < required_size) {
183 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
184 codeheap, size/K, required_size/K);
185 err_msg title("Not enough space in %s to run VM", codeheap);
186 err_msg message("%zuK < %zuK", size/K, required_size/K);
187 vm_exit_during_initialization(title, message);
188 }
189 }
190
191 struct CodeHeapInfo {
192 size_t size;
193 bool set;
194 bool enabled;
195 };
196
197 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
198 assert(!heap->set, "sanity");
199 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
200 }
201
202 void CodeCache::initialize_heaps() {
203 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
204 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
205 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
206
207 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
208 const size_t ps = page_size(false, 8);
209 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
210 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
211 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
212
213 // Prerequisites
214 if (!heap_available(CodeBlobType::MethodProfiled)) {
215 // For compatibility reasons, disabled tiered compilation overrides
216 // segment size even if it is set explicitly.
217 non_profiled.size += profiled.size;
218 // Profiled code heap is not available, forcibly set size to 0
219 profiled.size = 0;
220 profiled.set = true;
221 profiled.enabled = false;
222 }
303 if (ps < lg_ps) {
304 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
305 "Reverting to smaller page size (" PROPERFMT ").",
306 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
307 }
308 }
309
310 // Note: if large page support is enabled, min_size is at least the large
311 // page size. This ensures that the code cache is covered by large pages.
312 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
313 non_profiled.size += profiled.size & alignment_mask(min_size);
314 non_nmethod.size = align_down(non_nmethod.size, min_size);
315 profiled.size = align_down(profiled.size, min_size);
316 non_profiled.size = align_down(non_profiled.size, min_size);
317
318 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
319 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
320 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
321 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
322
323 const size_t cds_code_size = 0;
324 // FIXME: we should not increase CodeCache size - it affects branches.
325 // Instead we need to create separate code heap in CodeCache for AOT code.
326 // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size);
327 // cache_size += cds_code_size;
328
329 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
330
331 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
332 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
333
334 size_t offset = 0;
335 if (cds_code_size > 0) {
336 // FIXME: use CodeHeapInfo for this hack ...
337 _cds_code_space = rs.partition(offset, cds_code_size);
338 offset += cds_code_size;
339 }
340
341 if (profiled.enabled) {
342 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
343 offset += profiled.size;
344 // Tier 2 and tier 3 (profiled) methods
345 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
346 }
347
348 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
349 offset += non_nmethod.size;
350 // Non-nmethods (stubs, adapters, ...)
351 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
352
353 if (non_profiled.enabled) {
354 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
355 // Tier 1 and tier 4 (non-profiled) methods and native methods
356 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
357 }
358 }
359
360 void* CodeCache::map_aot_code() {
361 if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) {
362 return _cds_code_space.base();
363 } else {
364 return nullptr;
365 }
366 }
367
368 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
369 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
370 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
371 }
372
373 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
374 // Align and reserve space for code cache
375 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
376 const size_t rs_size = align_up(size, rs_align);
377
378 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
379 if (!rs.is_reserved()) {
380 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
381 rs_size/K));
382 }
383
384 // Initialize bounds
385 _low_bound = (address)rs.base();
386 _high_bound = _low_bound + rs.size();
387 return rs;
1064 size_t CodeCache::max_capacity() {
1065 size_t max_cap = 0;
1066 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1067 max_cap += (*heap)->max_capacity();
1068 }
1069 return max_cap;
1070 }
1071
1072 bool CodeCache::is_non_nmethod(address addr) {
1073 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1074 return blob->contains(addr);
1075 }
1076
1077 size_t CodeCache::max_distance_to_non_nmethod() {
1078 if (!SegmentedCodeCache) {
1079 return ReservedCodeCacheSize;
1080 } else {
1081 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1082 // the max distance is minimized by placing the NonNMethod segment
1083 // in between MethodProfiled and MethodNonProfiled segments
1084 size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound;
1085 size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary();
1086 return dist1 > dist2 ? dist1 : dist2;
1087 }
1088 }
1089
1090 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1091 // is free, reverse_free_ratio() returns 4.
1092 // Since code heap for each type of code blobs falls forward to the next
1093 // type of code heap, return the reverse free ratio for the entire
1094 // code cache.
1095 double CodeCache::reverse_free_ratio() {
1096 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1097 double max = (double)max_capacity();
1098 double result = max / unallocated;
1099 assert (max >= unallocated, "Must be");
1100 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1101 return result;
1102 }
1103
1104 size_t CodeCache::bytes_allocated_in_freelists() {
1105 size_t allocated_bytes = 0;
1210 AnyObj::RESOURCE_AREA, mtInternal,
1211 &DependencySignature::hash,
1212 &DependencySignature::equals> DepTable;
1213
1214 DepTable* table = new DepTable();
1215
1216 // Iterate over live nmethods and check dependencies of all nmethods that are not
1217 // marked for deoptimization. A particular dependency is only checked once.
1218 NMethodIterator iter(NMethodIterator::not_unloading);
1219 while(iter.next()) {
1220 nmethod* nm = iter.method();
1221 // Only notify for live nmethods
1222 if (!nm->is_marked_for_deoptimization()) {
1223 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1224 // Construct abstraction of a dependency.
1225 DependencySignature* current_sig = new DependencySignature(deps);
1226
1227 // Determine if dependency is already checked. table->put(...) returns
1228 // 'true' if the dependency is added (i.e., was not in the hashtable).
1229 if (table->put(*current_sig, 1)) {
1230 Klass* witness = deps.check_dependency();
1231 if (witness != nullptr) {
1232 // Dependency checking failed. Print out information about the failed
1233 // dependency and finally fail with an assert. We can fail here, since
1234 // dependency checking is never done in a product build.
1235 deps.print_dependency(tty, witness, true);
1236 changes.print();
1237 nm->print();
1238 nm->print_dependencies_on(tty);
1239 assert(false, "Should have been marked for deoptimization");
1240 }
1241 }
1242 }
1243 }
1244 }
1245 }
1246 #endif
1247
1248 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1249 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1250
1251 // search the hierarchy looking for nmethods which are affected by the loading of this class
1252
1253 // then search the interfaces this class implements looking for nmethods
1254 // which might be dependent of the fact that an interface only had one
1255 // implementor.
1256 // nmethod::check_all_dependencies works only correctly, if no safepoint
1257 // can happen
1258 NoSafepointVerifier nsv;
1259 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1260 InstanceKlass* d = str.klass();
1261 {
1262 LogStreamHandle(Trace, dependencies) log;
1263 if (log.is_enabled()) {
1264 log.print("Processing context ");
1265 d->name()->print_value_on(&log);
1266 }
1267 }
1268 d->mark_dependent_nmethods(deopt_scope, changes);
1269 }
1270
1271 #ifndef PRODUCT
1272 if (VerifyDependencies) {
1273 // Object pointers are used as unique identifiers for dependency arguments. This
1274 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1275 dependentCheckTime.start();
1276 check_live_nmethods_dependencies(changes);
1277 dependentCheckTime.stop();
1278 }
1279 #endif
1280 }
1281
1282 #if INCLUDE_JVMTI
1283 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1284 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1285 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1286
1287 static void add_to_old_table(nmethod* c) {
1506 }
1507 PRAGMA_DIAG_POP
1508
1509 void CodeCache::print_memory_overhead() {
1510 size_t wasted_bytes = 0;
1511 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1512 CodeHeap* curr_heap = *heap;
1513 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1514 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1515 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1516 }
1517 }
1518 // Print bytes that are allocated in the freelist
1519 ttyLocker ttl;
1520 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1521 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1522 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1523 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1524 }
1525
1526 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1527 if (total > 0) {
1528 double ratio = (100.0 * used) / total;
1529 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1530 }
1531 }
1532
1533 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1534 int stats [2][6][3][2] = {0};
1535 int stats_used[2][6][3][2] = {0};
1536
1537 int total_osr = 0;
1538 int total_entrant = 0;
1539 int total_non_entrant = 0;
1540 int total_other = 0;
1541 int total_used = 0;
1542
1543 NMethodIterator iter(NMethodIterator::all);
1544 while (iter.next()) {
1545 nmethod* nm = iter.method();
1546 if (nm->is_in_use()) {
1547 ++total_entrant;
1548 } else if (nm->is_not_entrant()) {
1549 ++total_non_entrant;
1550 } else {
1551 ++total_other;
1552 }
1553 if (nm->is_osr_method()) {
1554 ++total_osr;
1555 }
1556 if (nm->used()) {
1557 ++total_used;
1558 }
1559 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1560
1561 int idx1 = nm->is_aot() ? 1 : 0;
1562 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1563 int idx3 = (nm->is_in_use() ? 0 :
1564 (nm->is_not_entrant() ? 1 :
1565 2));
1566 int idx4 = (nm->is_osr_method() ? 1 : 0);
1567 stats[idx1][idx2][idx3][idx4] += 1;
1568 if (nm->used()) {
1569 stats_used[idx1][idx2][idx3][idx4] += 1;
1570 }
1571 }
1572
1573 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1574 total_entrant + total_non_entrant + total_other,
1575 total_entrant, total_non_entrant, total_osr);
1576 if (total_other > 0) {
1577 st->print("; %d other", total_other);
1578 }
1579 st->print_cr(")");
1580
1581 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1582 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1583 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1584 if (total_normal + total_osr > 0) {
1585 st->print(" Tier%d:", i);
1586 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1587 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1588 st->cr();
1589 }
1590 }
1591 st->cr();
1592 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1593 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1594 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1595 assert(total_osr == 0, "sanity");
1596 if (total_normal + total_osr > 0) {
1597 st->print(" AOT Code T%d:", i);
1598 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1599 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1600 st->cr();
1601 }
1602 }
1603 }
1604
1605 //------------------------------------------------------------------------------------------------
1606 // Non-product version
1607
1608 #ifndef PRODUCT
1609
1610 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1611 if (PrintCodeCache2) { // Need to add a new flag
1612 ResourceMark rm;
1613 if (size == 0) {
1614 int s = cb->size();
1615 assert(s >= 0, "CodeBlob size is negative: %d", s);
1616 size = (uint) s;
1617 }
1618 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1619 }
1620 }
1621
1622 void CodeCache::print_internals() {
1623 int nmethodCount = 0;
1624 int runtimeStubCount = 0;
1625 int upcallStubCount = 0;
1626 int adapterCount = 0;
1627 int mhAdapterCount = 0;
1628 int vtableBlobCount = 0;
1629 int deoptimizationStubCount = 0;
1630 int uncommonTrapStubCount = 0;
1631 int exceptionStubCount = 0;
1632 int safepointStubCount = 0;
1633 int bufferBlobCount = 0;
1634 int total = 0;
1635 int nmethodNotEntrant = 0;
1636 int nmethodJava = 0;
1637 int nmethodNative = 0;
1638 int max_nm_size = 0;
1639 ResourceMark rm;
1640
1641 int i = 0;
1642 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1643 int heap_total = 0;
1644 tty->print_cr("-- %s --", (*heap)->name());
1645 FOR_ALL_BLOBS(cb, *heap) {
1646 total++;
1647 heap_total++;
1648 if (cb->is_nmethod()) {
1649 nmethod* nm = (nmethod*)cb;
1650
1651 tty->print("%4d: ", heap_total);
1652 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1653
1654 nmethodCount++;
1655
1656 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1657 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1658
1659 if(nm->method() != nullptr && nm->is_java_method()) {
1660 nmethodJava++;
1661 max_nm_size = MAX2(max_nm_size, nm->size());
1662 }
1663 } else if (cb->is_runtime_stub()) {
1664 runtimeStubCount++;
1665 } else if (cb->is_upcall_stub()) {
1666 upcallStubCount++;
1667 } else if (cb->is_deoptimization_stub()) {
1668 deoptimizationStubCount++;
1669 } else if (cb->is_uncommon_trap_stub()) {
1670 uncommonTrapStubCount++;
1671 } else if (cb->is_exception_stub()) {
1672 exceptionStubCount++;
1829 FOR_ALL_BLOBS(cb, *heap) {
1830 number_of_blobs++;
1831 code_size += cb->code_size();
1832 ImmutableOopMapSet* set = cb->oop_maps();
1833 if (set != nullptr) {
1834 number_of_oop_maps += set->count();
1835 map_size += set->nr_of_bytes();
1836 }
1837 }
1838 }
1839 tty->print_cr("OopMaps");
1840 tty->print_cr(" #blobs = %d", number_of_blobs);
1841 tty->print_cr(" code size = %d", code_size);
1842 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1843 tty->print_cr(" map size = %d", map_size);
1844 }
1845
1846 #endif // !PRODUCT
1847 }
1848
1849 void CodeCache::print_nmethods_on(outputStream* st) {
1850 ResourceMark rm;
1851 int i = 0;
1852 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1853 st->print_cr("-- %s --", (*heap)->name());
1854 FOR_ALL_BLOBS(cb, *heap) {
1855 i++;
1856 if (cb->is_nmethod()) {
1857 nmethod* nm = (nmethod*)cb;
1858 st->print("%4d: ", i);
1859 CompileTask::print(st, nm, nullptr, true, false);
1860
1861 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1862 st->print_cr(" %c", non_entrant_char);
1863 }
1864 }
1865 }
1866 }
1867
1868 void CodeCache::print_summary(outputStream* st, bool detailed) {
1869 int full_count = 0;
1870 julong total_used = 0;
1871 julong total_max_used = 0;
1872 julong total_free = 0;
1873 julong total_size = 0;
1874 FOR_ALL_HEAPS(heap_iterator) {
1875 CodeHeap* heap = (*heap_iterator);
1876 size_t total = (heap->high_boundary() - heap->low_boundary());
1877 if (_heaps->length() >= 1) {
1878 st->print("%s:", heap->name());
1879 } else {
1880 st->print("CodeCache:");
1881 }
1882 size_t size = total/K;
1883 size_t used = (total - heap->unallocated_capacity())/K;
1884 size_t max_used = heap->max_allocated_capacity()/K;
1885 size_t free = heap->unallocated_capacity()/K;
1886 total_size += size;
1887 total_used += used;
|