6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/codeHeapState.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/dependencyContext.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSetNMethod.hpp"
39 #include "gc/shared/classUnloadingContext.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/allocation.inline.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/memoryReserver.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/method.inline.hpp"
51 #include "oops/objArrayOop.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "oops/verifyOopClosure.hpp"
154 scopes_data_size += nm->scopes_data_size();
155 scopes_pcs_size += nm->scopes_pcs_size();
156 } else {
157 code_size += cb->code_size();
158 }
159 }
160 };
161
162 // Iterate over all CodeHeaps
163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
165
166 // Iterate over all CodeBlobs (cb) on the given CodeHeap
167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
168
169 address CodeCache::_low_bound = nullptr;
170 address CodeCache::_high_bound = nullptr;
171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
173
174 // Initialize arrays of CodeHeap subsets
175 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
176 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
177 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
178
179 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
180 if (size < required_size) {
181 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
182 codeheap, size/K, required_size/K);
183 err_msg title("Not enough space in %s to run VM", codeheap);
184 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
185 vm_exit_during_initialization(title, message);
186 }
187 }
188
189 struct CodeHeapInfo {
190 size_t size;
191 bool set;
192 bool enabled;
193 };
194
195 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
196 assert(!heap->set, "sanity");
197 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
198 }
199
200 void CodeCache::initialize_heaps() {
201
202 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
203 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
204 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
205
206 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
207 const size_t ps = page_size(false, 8);
208 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
209 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
210 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
211
212 // Prerequisites
213 if (!heap_available(CodeBlobType::MethodProfiled)) {
214 // For compatibility reasons, disabled tiered compilation overrides
215 // segment size even if it is set explicitly.
216 non_profiled.size += profiled.size;
217 // Profiled code heap is not available, forcibly set size to 0
218 profiled.size = 0;
219 profiled.set = true;
220 profiled.enabled = false;
221 }
222
223 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
224
225 size_t compiler_buffer_size = 0;
226 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
227 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
228
229 if (!non_nmethod.set) {
230 non_nmethod.size += compiler_buffer_size;
231 // Further down, just before FLAG_SET_ERGO(), all segment sizes are
232 // aligned down to the next lower multiple of min_size. For large page
233 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
234 // Therefore, force non_nmethod.size to at least min_size.
235 non_nmethod.size = MAX2(non_nmethod.size, min_size);
236 }
237
238 if (!profiled.set && !non_profiled.set) {
239 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
240 (cache_size - non_nmethod.size) / 2 : min_size;
241 }
242
243 if (profiled.set && !non_profiled.set) {
244 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
245 }
246
247 if (!profiled.set && non_profiled.set) {
302 if (ps < lg_ps) {
303 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
304 "Reverting to smaller page size (" PROPERFMT ").",
305 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
306 }
307 }
308
309 // Note: if large page support is enabled, min_size is at least the large
310 // page size. This ensures that the code cache is covered by large pages.
311 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
312 non_profiled.size += profiled.size & alignment_mask(min_size);
313 non_nmethod.size = align_down(non_nmethod.size, min_size);
314 profiled.size = align_down(profiled.size, min_size);
315 non_profiled.size = align_down(non_profiled.size, min_size);
316
317 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
318 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
319 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
320 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
321
322 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
323
324 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
325 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
326
327 size_t offset = 0;
328 if (profiled.enabled) {
329 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
330 offset += profiled.size;
331 // Tier 2 and tier 3 (profiled) methods
332 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
333 }
334
335 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
336 offset += non_nmethod.size;
337 // Non-nmethods (stubs, adapters, ...)
338 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
339
340 if (non_profiled.enabled) {
341 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
342 // Tier 1 and tier 4 (non-profiled) methods and native methods
343 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
344 }
345 }
346
347 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
348 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
349 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
350 }
351
352 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
353 // Align and reserve space for code cache
354 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
355 const size_t rs_size = align_up(size, rs_align);
356
357 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
358 if (!rs.is_reserved()) {
359 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
360 rs_size/K));
361 }
362
363 // Initialize bounds
364 _low_bound = (address)rs.base();
365 _high_bound = _low_bound + rs.size();
366 return rs;
1192 AnyObj::RESOURCE_AREA, mtInternal,
1193 &DependencySignature::hash,
1194 &DependencySignature::equals> DepTable;
1195
1196 DepTable* table = new DepTable();
1197
1198 // Iterate over live nmethods and check dependencies of all nmethods that are not
1199 // marked for deoptimization. A particular dependency is only checked once.
1200 NMethodIterator iter(NMethodIterator::not_unloading);
1201 while(iter.next()) {
1202 nmethod* nm = iter.method();
1203 // Only notify for live nmethods
1204 if (!nm->is_marked_for_deoptimization()) {
1205 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1206 // Construct abstraction of a dependency.
1207 DependencySignature* current_sig = new DependencySignature(deps);
1208
1209 // Determine if dependency is already checked. table->put(...) returns
1210 // 'true' if the dependency is added (i.e., was not in the hashtable).
1211 if (table->put(*current_sig, 1)) {
1212 if (deps.check_dependency() != nullptr) {
1213 // Dependency checking failed. Print out information about the failed
1214 // dependency and finally fail with an assert. We can fail here, since
1215 // dependency checking is never done in a product build.
1216 tty->print_cr("Failed dependency:");
1217 changes.print();
1218 nm->print();
1219 nm->print_dependencies_on(tty);
1220 assert(false, "Should have been marked for deoptimization");
1221 }
1222 }
1223 }
1224 }
1225 }
1226 }
1227 #endif
1228
1229 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1230 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1231
1232 // search the hierarchy looking for nmethods which are affected by the loading of this class
1233
1234 // then search the interfaces this class implements looking for nmethods
1235 // which might be dependent of the fact that an interface only had one
1236 // implementor.
1237 // nmethod::check_all_dependencies works only correctly, if no safepoint
1238 // can happen
1239 NoSafepointVerifier nsv;
1240 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1241 InstanceKlass* d = str.klass();
1242 d->mark_dependent_nmethods(deopt_scope, changes);
1243 }
1244
1245 #ifndef PRODUCT
1246 if (VerifyDependencies) {
1247 // Object pointers are used as unique identifiers for dependency arguments. This
1248 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1249 dependentCheckTime.start();
1250 check_live_nmethods_dependencies(changes);
1251 dependentCheckTime.stop();
1252 }
1253 #endif
1254 }
1255
1256 #if INCLUDE_JVMTI
1257 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1258 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1259 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1260
1261 static void add_to_old_table(nmethod* c) {
1480 }
1481 PRAGMA_DIAG_POP
1482
1483 void CodeCache::print_memory_overhead() {
1484 size_t wasted_bytes = 0;
1485 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1486 CodeHeap* curr_heap = *heap;
1487 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1488 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1489 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1490 }
1491 }
1492 // Print bytes that are allocated in the freelist
1493 ttyLocker ttl;
1494 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1495 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1496 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1497 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1498 }
1499
1500 //------------------------------------------------------------------------------------------------
1501 // Non-product version
1502
1503 #ifndef PRODUCT
1504
1505 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1506 if (PrintCodeCache2) { // Need to add a new flag
1507 ResourceMark rm;
1508 if (size == 0) {
1509 int s = cb->size();
1510 assert(s >= 0, "CodeBlob size is negative: %d", s);
1511 size = (uint) s;
1512 }
1513 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1514 }
1515 }
1516
1517 void CodeCache::print_internals() {
1518 int nmethodCount = 0;
1519 int runtimeStubCount = 0;
1520 int adapterCount = 0;
1521 int deoptimizationStubCount = 0;
1522 int uncommonTrapStubCount = 0;
1523 int bufferBlobCount = 0;
1524 int total = 0;
1525 int nmethodNotEntrant = 0;
1526 int nmethodJava = 0;
1527 int nmethodNative = 0;
1528 int max_nm_size = 0;
1529 ResourceMark rm;
1530
1531 int i = 0;
1532 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1533 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1534 tty->print_cr("-- %s --", (*heap)->name());
1535 }
1536 FOR_ALL_BLOBS(cb, *heap) {
1537 total++;
1538 if (cb->is_nmethod()) {
1539 nmethod* nm = (nmethod*)cb;
1540
1541 if (Verbose && nm->method() != nullptr) {
1542 ResourceMark rm;
1543 char *method_name = nm->method()->name_and_sig_as_C_string();
1544 tty->print("%s", method_name);
1545 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1546 }
1547
1548 nmethodCount++;
1549
1550 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1551 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1552
1553 if(nm->method() != nullptr && nm->is_java_method()) {
1554 nmethodJava++;
1555 max_nm_size = MAX2(max_nm_size, nm->size());
1556 }
1557 } else if (cb->is_runtime_stub()) {
1558 runtimeStubCount++;
1559 } else if (cb->is_deoptimization_stub()) {
1560 deoptimizationStubCount++;
1561 } else if (cb->is_uncommon_trap_stub()) {
1562 uncommonTrapStubCount++;
1563 } else if (cb->is_adapter_blob()) {
1564 adapterCount++;
1565 } else if (cb->is_buffer_blob()) {
1566 bufferBlobCount++;
1688 FOR_ALL_BLOBS(cb, *heap) {
1689 number_of_blobs++;
1690 code_size += cb->code_size();
1691 ImmutableOopMapSet* set = cb->oop_maps();
1692 if (set != nullptr) {
1693 number_of_oop_maps += set->count();
1694 map_size += set->nr_of_bytes();
1695 }
1696 }
1697 }
1698 tty->print_cr("OopMaps");
1699 tty->print_cr(" #blobs = %d", number_of_blobs);
1700 tty->print_cr(" code size = %d", code_size);
1701 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1702 tty->print_cr(" map size = %d", map_size);
1703 }
1704
1705 #endif // !PRODUCT
1706 }
1707
1708 void CodeCache::print_summary(outputStream* st, bool detailed) {
1709 int full_count = 0;
1710 julong total_used = 0;
1711 julong total_max_used = 0;
1712 julong total_free = 0;
1713 julong total_size = 0;
1714 FOR_ALL_HEAPS(heap_iterator) {
1715 CodeHeap* heap = (*heap_iterator);
1716 size_t total = (heap->high_boundary() - heap->low_boundary());
1717 if (_heaps->length() >= 1) {
1718 st->print("%s:", heap->name());
1719 } else {
1720 st->print("CodeCache:");
1721 }
1722 size_t size = total/K;
1723 size_t used = (total - heap->unallocated_capacity())/K;
1724 size_t max_used = heap->max_allocated_capacity()/K;
1725 size_t free = heap->unallocated_capacity()/K;
1726 total_size += size;
1727 total_used += used;
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/cdsAccess.hpp"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/codeHeapState.hpp"
30 #include "code/compiledIC.hpp"
31 #include "code/dependencies.hpp"
32 #include "code/dependencyContext.hpp"
33 #include "code/nmethod.hpp"
34 #include "code/pcDesc.hpp"
35 #include "code/SCCache.hpp"
36 #include "compiler/compilationPolicy.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/compilerDefinitions.inline.hpp"
39 #include "compiler/oopMap.hpp"
40 #include "gc/shared/barrierSetNMethod.hpp"
41 #include "gc/shared/classUnloadingContext.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "jfr/jfrEvents.hpp"
44 #include "jvm_io.h"
45 #include "logging/log.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/iterator.hpp"
49 #include "memory/memoryReserver.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayOop.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "oops/verifyOopClosure.hpp"
156 scopes_data_size += nm->scopes_data_size();
157 scopes_pcs_size += nm->scopes_pcs_size();
158 } else {
159 code_size += cb->code_size();
160 }
161 }
162 };
163
164 // Iterate over all CodeHeaps
165 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
166 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
167
168 // Iterate over all CodeBlobs (cb) on the given CodeHeap
169 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
170
171 address CodeCache::_low_bound = nullptr;
172 address CodeCache::_high_bound = nullptr;
173 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
174 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
175
176 static ReservedSpace _cds_code_space;
177
178 // Initialize arrays of CodeHeap subsets
179 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
180 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
181 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
182
183 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
184 if (size < required_size) {
185 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
186 codeheap, size/K, required_size/K);
187 err_msg title("Not enough space in %s to run VM", codeheap);
188 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
189 vm_exit_during_initialization(title, message);
190 }
191 }
192
193 struct CodeHeapInfo {
194 size_t size;
195 bool set;
196 bool enabled;
197 };
198
199 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
200 assert(!heap->set, "sanity");
201 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
202 }
203
204 void CodeCache::initialize_heaps() {
205 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
206 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
207 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
208
209 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
210 const size_t ps = page_size(false, 8);
211 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
212 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
213 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
214
215 // Prerequisites
216 if (!heap_available(CodeBlobType::MethodProfiled)) {
217 // For compatibility reasons, disabled tiered compilation overrides
218 // segment size even if it is set explicitly.
219 non_profiled.size += profiled.size;
220 // Profiled code heap is not available, forcibly set size to 0
221 profiled.size = 0;
222 profiled.set = true;
223 profiled.enabled = false;
224 }
225
226 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
227
228 size_t compiler_buffer_size = 0;
229 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
230 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
231 COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size());
232
233 if (!non_nmethod.set) {
234 non_nmethod.size += compiler_buffer_size;
235 // Further down, just before FLAG_SET_ERGO(), all segment sizes are
236 // aligned down to the next lower multiple of min_size. For large page
237 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
238 // Therefore, force non_nmethod.size to at least min_size.
239 non_nmethod.size = MAX2(non_nmethod.size, min_size);
240 }
241
242 if (!profiled.set && !non_profiled.set) {
243 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
244 (cache_size - non_nmethod.size) / 2 : min_size;
245 }
246
247 if (profiled.set && !non_profiled.set) {
248 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
249 }
250
251 if (!profiled.set && non_profiled.set) {
306 if (ps < lg_ps) {
307 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
308 "Reverting to smaller page size (" PROPERFMT ").",
309 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
310 }
311 }
312
313 // Note: if large page support is enabled, min_size is at least the large
314 // page size. This ensures that the code cache is covered by large pages.
315 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
316 non_profiled.size += profiled.size & alignment_mask(min_size);
317 non_nmethod.size = align_down(non_nmethod.size, min_size);
318 profiled.size = align_down(profiled.size, min_size);
319 non_profiled.size = align_down(non_profiled.size, min_size);
320
321 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
322 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
323 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
324 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
325
326 const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
327 cache_size += cds_code_size;
328
329 ReservedSpace rs = reserve_heap_memory(cache_size, ps);
330
331 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
332 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
333
334 size_t offset = 0;
335 if (cds_code_size > 0) {
336 // FIXME: use CodeHeapInfo for this hack ...
337 _cds_code_space = rs.partition(offset, cds_code_size);
338 offset += cds_code_size;
339 }
340
341 if (profiled.enabled) {
342 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
343 offset += profiled.size;
344 // Tier 2 and tier 3 (profiled) methods
345 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
346 }
347
348 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
349 offset += non_nmethod.size;
350 // Non-nmethods (stubs, adapters, ...)
351 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
352
353 if (non_profiled.enabled) {
354 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
355 // Tier 1 and tier 4 (non-profiled) methods and native methods
356 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
357 }
358 }
359
360 void* CodeCache::map_cached_code() {
361 if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
362 return _cds_code_space.base();
363 } else {
364 return nullptr;
365 }
366 }
367
368 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
369 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
370 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
371 }
372
373 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
374 // Align and reserve space for code cache
375 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
376 const size_t rs_size = align_up(size, rs_align);
377
378 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
379 if (!rs.is_reserved()) {
380 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
381 rs_size/K));
382 }
383
384 // Initialize bounds
385 _low_bound = (address)rs.base();
386 _high_bound = _low_bound + rs.size();
387 return rs;
1213 AnyObj::RESOURCE_AREA, mtInternal,
1214 &DependencySignature::hash,
1215 &DependencySignature::equals> DepTable;
1216
1217 DepTable* table = new DepTable();
1218
1219 // Iterate over live nmethods and check dependencies of all nmethods that are not
1220 // marked for deoptimization. A particular dependency is only checked once.
1221 NMethodIterator iter(NMethodIterator::not_unloading);
1222 while(iter.next()) {
1223 nmethod* nm = iter.method();
1224 // Only notify for live nmethods
1225 if (!nm->is_marked_for_deoptimization()) {
1226 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1227 // Construct abstraction of a dependency.
1228 DependencySignature* current_sig = new DependencySignature(deps);
1229
1230 // Determine if dependency is already checked. table->put(...) returns
1231 // 'true' if the dependency is added (i.e., was not in the hashtable).
1232 if (table->put(*current_sig, 1)) {
1233 Klass* witness = deps.check_dependency();
1234 if (witness != nullptr) {
1235 // Dependency checking failed. Print out information about the failed
1236 // dependency and finally fail with an assert. We can fail here, since
1237 // dependency checking is never done in a product build.
1238 deps.print_dependency(tty, witness, true);
1239 changes.print();
1240 nm->print();
1241 nm->print_dependencies_on(tty);
1242 assert(false, "Should have been marked for deoptimization");
1243 }
1244 }
1245 }
1246 }
1247 }
1248 }
1249 #endif
1250
1251 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1252 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1253
1254 // search the hierarchy looking for nmethods which are affected by the loading of this class
1255
1256 // then search the interfaces this class implements looking for nmethods
1257 // which might be dependent of the fact that an interface only had one
1258 // implementor.
1259 // nmethod::check_all_dependencies works only correctly, if no safepoint
1260 // can happen
1261 NoSafepointVerifier nsv;
1262 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1263 InstanceKlass* d = str.klass();
1264 {
1265 LogStreamHandle(Trace, dependencies) log;
1266 if (log.is_enabled()) {
1267 log.print("Processing context ");
1268 d->name()->print_value_on(&log);
1269 }
1270 }
1271 d->mark_dependent_nmethods(deopt_scope, changes);
1272 }
1273
1274 #ifndef PRODUCT
1275 if (VerifyDependencies) {
1276 // Object pointers are used as unique identifiers for dependency arguments. This
1277 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1278 dependentCheckTime.start();
1279 check_live_nmethods_dependencies(changes);
1280 dependentCheckTime.stop();
1281 }
1282 #endif
1283 }
1284
1285 #if INCLUDE_JVMTI
1286 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1287 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1288 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1289
1290 static void add_to_old_table(nmethod* c) {
1509 }
1510 PRAGMA_DIAG_POP
1511
1512 void CodeCache::print_memory_overhead() {
1513 size_t wasted_bytes = 0;
1514 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1515 CodeHeap* curr_heap = *heap;
1516 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1517 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1518 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1519 }
1520 }
1521 // Print bytes that are allocated in the freelist
1522 ttyLocker ttl;
1523 tty->print_cr("Number of elements in freelist: %zd", freelists_length());
1524 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K);
1525 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K));
1526 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment
1527 }
1528
1529 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1530 if (total > 0) {
1531 double ratio = (100.0 * used) / total;
1532 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1533 }
1534 }
1535
1536 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1537 int stats [2][6][3][2] = {0};
1538 int stats_used[2][6][3][2] = {0};
1539
1540 int total_osr = 0;
1541 int total_entrant = 0;
1542 int total_non_entrant = 0;
1543 int total_other = 0;
1544 int total_used = 0;
1545
1546 NMethodIterator iter(NMethodIterator::all);
1547 while (iter.next()) {
1548 nmethod* nm = iter.method();
1549 if (nm->is_in_use()) {
1550 ++total_entrant;
1551 } else if (nm->is_not_entrant()) {
1552 ++total_non_entrant;
1553 } else {
1554 ++total_other;
1555 }
1556 if (nm->is_osr_method()) {
1557 ++total_osr;
1558 }
1559 if (nm->used()) {
1560 ++total_used;
1561 }
1562 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1563
1564 int idx1 = nm->is_scc() ? 1 : 0;
1565 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1566 int idx3 = (nm->is_in_use() ? 0 :
1567 (nm->is_not_entrant() ? 1 :
1568 2));
1569 int idx4 = (nm->is_osr_method() ? 1 : 0);
1570 stats[idx1][idx2][idx3][idx4] += 1;
1571 if (nm->used()) {
1572 stats_used[idx1][idx2][idx3][idx4] += 1;
1573 }
1574 }
1575
1576 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1577 total_entrant + total_non_entrant + total_other,
1578 total_entrant, total_non_entrant, total_osr);
1579 if (total_other > 0) {
1580 st->print("; %d other", total_other);
1581 }
1582 st->print_cr(")");
1583
1584 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1585 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1586 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1587 if (total_normal + total_osr > 0) {
1588 st->print(" Tier%d:", i);
1589 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1590 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1591 st->cr();
1592 }
1593 }
1594 st->cr();
1595 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1596 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1597 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1598 assert(total_osr == 0, "sanity");
1599 if (total_normal + total_osr > 0) {
1600 st->print(" SC T%d:", i);
1601 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1602 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1603 st->cr();
1604 }
1605 }
1606 }
1607
1608 //------------------------------------------------------------------------------------------------
1609 // Non-product version
1610
1611 #ifndef PRODUCT
1612
1613 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1614 if (PrintCodeCache2) { // Need to add a new flag
1615 ResourceMark rm;
1616 if (size == 0) {
1617 int s = cb->size();
1618 assert(s >= 0, "CodeBlob size is negative: %d", s);
1619 size = (uint) s;
1620 }
1621 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1622 }
1623 }
1624
1625 void CodeCache::print_internals() {
1626 int nmethodCount = 0;
1627 int runtimeStubCount = 0;
1628 int adapterCount = 0;
1629 int deoptimizationStubCount = 0;
1630 int uncommonTrapStubCount = 0;
1631 int bufferBlobCount = 0;
1632 int total = 0;
1633 int nmethodNotEntrant = 0;
1634 int nmethodJava = 0;
1635 int nmethodNative = 0;
1636 int max_nm_size = 0;
1637 ResourceMark rm;
1638
1639 int i = 0;
1640 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1641 int heap_total = 0;
1642 tty->print_cr("-- %s --", (*heap)->name());
1643 FOR_ALL_BLOBS(cb, *heap) {
1644 total++;
1645 heap_total++;
1646 if (cb->is_nmethod()) {
1647 nmethod* nm = (nmethod*)cb;
1648
1649 tty->print("%4d: ", heap_total);
1650 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1651
1652 nmethodCount++;
1653
1654 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1655 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1656
1657 if(nm->method() != nullptr && nm->is_java_method()) {
1658 nmethodJava++;
1659 max_nm_size = MAX2(max_nm_size, nm->size());
1660 }
1661 } else if (cb->is_runtime_stub()) {
1662 runtimeStubCount++;
1663 } else if (cb->is_deoptimization_stub()) {
1664 deoptimizationStubCount++;
1665 } else if (cb->is_uncommon_trap_stub()) {
1666 uncommonTrapStubCount++;
1667 } else if (cb->is_adapter_blob()) {
1668 adapterCount++;
1669 } else if (cb->is_buffer_blob()) {
1670 bufferBlobCount++;
1792 FOR_ALL_BLOBS(cb, *heap) {
1793 number_of_blobs++;
1794 code_size += cb->code_size();
1795 ImmutableOopMapSet* set = cb->oop_maps();
1796 if (set != nullptr) {
1797 number_of_oop_maps += set->count();
1798 map_size += set->nr_of_bytes();
1799 }
1800 }
1801 }
1802 tty->print_cr("OopMaps");
1803 tty->print_cr(" #blobs = %d", number_of_blobs);
1804 tty->print_cr(" code size = %d", code_size);
1805 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1806 tty->print_cr(" map size = %d", map_size);
1807 }
1808
1809 #endif // !PRODUCT
1810 }
1811
1812 void CodeCache::print_nmethods_on(outputStream* st) {
1813 ResourceMark rm;
1814 int i = 0;
1815 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1816 st->print_cr("-- %s --", (*heap)->name());
1817 FOR_ALL_BLOBS(cb, *heap) {
1818 i++;
1819 if (cb->is_nmethod()) {
1820 nmethod* nm = (nmethod*)cb;
1821 st->print("%4d: ", i);
1822 CompileTask::print(st, nm, nullptr, true, false);
1823
1824 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1825 st->print_cr(" %c", non_entrant_char);
1826 }
1827 }
1828 }
1829 }
1830
1831 void CodeCache::print_summary(outputStream* st, bool detailed) {
1832 int full_count = 0;
1833 julong total_used = 0;
1834 julong total_max_used = 0;
1835 julong total_free = 0;
1836 julong total_size = 0;
1837 FOR_ALL_HEAPS(heap_iterator) {
1838 CodeHeap* heap = (*heap_iterator);
1839 size_t total = (heap->high_boundary() - heap->low_boundary());
1840 if (_heaps->length() >= 1) {
1841 st->print("%s:", heap->name());
1842 } else {
1843 st->print("CodeCache:");
1844 }
1845 size_t size = total/K;
1846 size_t used = (total - heap->unallocated_capacity())/K;
1847 size_t max_used = heap->max_allocated_capacity()/K;
1848 size_t free = heap->unallocated_capacity()/K;
1849 total_size += size;
1850 total_used += used;
|