6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/codeHeapState.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/dependencyContext.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSetNMethod.hpp"
39 #include "gc/shared/classUnloadingContext.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/allocation.inline.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/method.inline.hpp"
50 #include "oops/objArrayOop.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/verifyOopClosure.hpp"
53 #include "runtime/arguments.hpp"
153 scopes_data_size += nm->scopes_data_size();
154 scopes_pcs_size += nm->scopes_pcs_size();
155 } else {
156 code_size += cb->code_size();
157 }
158 }
159 };
160
161 // Iterate over all CodeHeaps
162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
164
165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
167
168 address CodeCache::_low_bound = nullptr;
169 address CodeCache::_high_bound = nullptr;
170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
172
173 // Initialize arrays of CodeHeap subsets
174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
177
178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
179 if (size < required_size) {
180 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
181 codeheap, size/K, required_size/K);
182 err_msg title("Not enough space in %s to run VM", codeheap);
183 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
184 vm_exit_during_initialization(title, message);
185 }
186 }
187
188 struct CodeHeapInfo {
189 size_t size;
190 bool set;
191 bool enabled;
192 };
193
194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
195 assert(!heap->set, "sanity");
196 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
197 }
198
199 void CodeCache::initialize_heaps() {
200
201 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
202 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
203 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
204
205 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
206 const size_t ps = page_size(false, 8);
207 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
208 const size_t min_cache_size = CompilerConfig::min_code_cache_size(); // Make sure we have enough space for VM internal code
209 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
210
211 // Prerequisites
212 if (!heap_available(CodeBlobType::MethodProfiled)) {
213 // For compatibility reasons, disabled tiered compilation overrides
214 // segment size even if it is set explicitly.
215 non_profiled.size += profiled.size;
216 // Profiled code heap is not available, forcibly set size to 0
217 profiled.size = 0;
218 profiled.set = true;
219 profiled.enabled = false;
220 }
221
222 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
223
224 size_t compiler_buffer_size = 0;
225 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
226 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
227
228 if (!non_nmethod.set) {
229 non_nmethod.size += compiler_buffer_size;
230 // Further down, just before FLAG_SET_ERGO(), all segment sizes are
231 // aligned down to the next lower multiple of min_size. For large page
232 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
233 // Therefore, force non_nmethod.size to at least min_size.
234 non_nmethod.size = MAX2(non_nmethod.size, min_size);
235 }
236
237 if (!profiled.set && !non_profiled.set) {
238 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
239 (cache_size - non_nmethod.size) / 2 : min_size;
240 }
241
242 if (profiled.set && !non_profiled.set) {
243 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
244 }
245
246 if (!profiled.set && non_profiled.set) {
301 if (ps < lg_ps) {
302 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
303 "Reverting to smaller page size (" PROPERFMT ").",
304 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
305 }
306 }
307
308 // Note: if large page support is enabled, min_size is at least the large
309 // page size. This ensures that the code cache is covered by large pages.
310 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
311 non_profiled.size += profiled.size & alignment_mask(min_size);
312 non_nmethod.size = align_down(non_nmethod.size, min_size);
313 profiled.size = align_down(profiled.size, min_size);
314 non_profiled.size = align_down(non_profiled.size, min_size);
315
316 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
317 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
318 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
319 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
320
321 ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
322
323 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
324 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
325
326 size_t offset = 0;
327 if (profiled.enabled) {
328 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
329 offset += profiled.size;
330 // Tier 2 and tier 3 (profiled) methods
331 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
332 }
333
334 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
335 offset += non_nmethod.size;
336 // Non-nmethods (stubs, adapters, ...)
337 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
338
339 if (non_profiled.enabled) {
340 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
341 // Tier 1 and tier 4 (non-profiled) methods and native methods
342 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
343 }
344 }
345
346 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
347 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
348 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
349 }
350
351 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
352 // Align and reserve space for code cache
353 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
354 const size_t rs_size = align_up(size, rs_align);
355 ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
356 if (!rs.is_reserved()) {
357 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
358 rs_size/K));
359 }
360
361 // Initialize bounds
362 _low_bound = (address)rs.base();
363 _high_bound = _low_bound + rs.size();
364 return rs;
365 }
1190 AnyObj::RESOURCE_AREA, mtInternal,
1191 &DependencySignature::hash,
1192 &DependencySignature::equals> DepTable;
1193
1194 DepTable* table = new DepTable();
1195
1196 // Iterate over live nmethods and check dependencies of all nmethods that are not
1197 // marked for deoptimization. A particular dependency is only checked once.
1198 NMethodIterator iter(NMethodIterator::not_unloading);
1199 while(iter.next()) {
1200 nmethod* nm = iter.method();
1201 // Only notify for live nmethods
1202 if (!nm->is_marked_for_deoptimization()) {
1203 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1204 // Construct abstraction of a dependency.
1205 DependencySignature* current_sig = new DependencySignature(deps);
1206
1207 // Determine if dependency is already checked. table->put(...) returns
1208 // 'true' if the dependency is added (i.e., was not in the hashtable).
1209 if (table->put(*current_sig, 1)) {
1210 if (deps.check_dependency() != nullptr) {
1211 // Dependency checking failed. Print out information about the failed
1212 // dependency and finally fail with an assert. We can fail here, since
1213 // dependency checking is never done in a product build.
1214 tty->print_cr("Failed dependency:");
1215 changes.print();
1216 nm->print();
1217 nm->print_dependencies_on(tty);
1218 assert(false, "Should have been marked for deoptimization");
1219 }
1220 }
1221 }
1222 }
1223 }
1224 }
1225 #endif
1226
1227 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1228 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1229
1230 // search the hierarchy looking for nmethods which are affected by the loading of this class
1231
1232 // then search the interfaces this class implements looking for nmethods
1233 // which might be dependent of the fact that an interface only had one
1234 // implementor.
1235 // nmethod::check_all_dependencies works only correctly, if no safepoint
1236 // can happen
1237 NoSafepointVerifier nsv;
1238 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1239 InstanceKlass* d = str.klass();
1240 d->mark_dependent_nmethods(deopt_scope, changes);
1241 }
1242
1243 #ifndef PRODUCT
1244 if (VerifyDependencies) {
1245 // Object pointers are used as unique identifiers for dependency arguments. This
1246 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1247 dependentCheckTime.start();
1248 check_live_nmethods_dependencies(changes);
1249 dependentCheckTime.stop();
1250 }
1251 #endif
1252 }
1253
1254 #if INCLUDE_JVMTI
1255 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1256 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1257 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1258
1259 static void add_to_old_table(nmethod* c) {
1478 }
1479 PRAGMA_DIAG_POP
1480
1481 void CodeCache::print_memory_overhead() {
1482 size_t wasted_bytes = 0;
1483 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1484 CodeHeap* curr_heap = *heap;
1485 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1486 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1487 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1488 }
1489 }
1490 // Print bytes that are allocated in the freelist
1491 ttyLocker ttl;
1492 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1493 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1494 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1495 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1496 }
1497
1498 //------------------------------------------------------------------------------------------------
1499 // Non-product version
1500
1501 #ifndef PRODUCT
1502
1503 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1504 if (PrintCodeCache2) { // Need to add a new flag
1505 ResourceMark rm;
1506 if (size == 0) {
1507 int s = cb->size();
1508 assert(s >= 0, "CodeBlob size is negative: %d", s);
1509 size = (uint) s;
1510 }
1511 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1512 }
1513 }
1514
1515 void CodeCache::print_internals() {
1516 int nmethodCount = 0;
1517 int runtimeStubCount = 0;
1518 int adapterCount = 0;
1519 int deoptimizationStubCount = 0;
1520 int uncommonTrapStubCount = 0;
1521 int bufferBlobCount = 0;
1522 int total = 0;
1523 int nmethodNotEntrant = 0;
1524 int nmethodJava = 0;
1525 int nmethodNative = 0;
1526 int max_nm_size = 0;
1527 ResourceMark rm;
1528
1529 int i = 0;
1530 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1531 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1532 tty->print_cr("-- %s --", (*heap)->name());
1533 }
1534 FOR_ALL_BLOBS(cb, *heap) {
1535 total++;
1536 if (cb->is_nmethod()) {
1537 nmethod* nm = (nmethod*)cb;
1538
1539 if (Verbose && nm->method() != nullptr) {
1540 ResourceMark rm;
1541 char *method_name = nm->method()->name_and_sig_as_C_string();
1542 tty->print("%s", method_name);
1543 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1544 }
1545
1546 nmethodCount++;
1547
1548 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1549 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1550
1551 if(nm->method() != nullptr && nm->is_java_method()) {
1552 nmethodJava++;
1553 max_nm_size = MAX2(max_nm_size, nm->size());
1554 }
1555 } else if (cb->is_runtime_stub()) {
1556 runtimeStubCount++;
1557 } else if (cb->is_deoptimization_stub()) {
1558 deoptimizationStubCount++;
1559 } else if (cb->is_uncommon_trap_stub()) {
1560 uncommonTrapStubCount++;
1561 } else if (cb->is_adapter_blob()) {
1562 adapterCount++;
1563 } else if (cb->is_buffer_blob()) {
1564 bufferBlobCount++;
1686 FOR_ALL_BLOBS(cb, *heap) {
1687 number_of_blobs++;
1688 code_size += cb->code_size();
1689 ImmutableOopMapSet* set = cb->oop_maps();
1690 if (set != nullptr) {
1691 number_of_oop_maps += set->count();
1692 map_size += set->nr_of_bytes();
1693 }
1694 }
1695 }
1696 tty->print_cr("OopMaps");
1697 tty->print_cr(" #blobs = %d", number_of_blobs);
1698 tty->print_cr(" code size = %d", code_size);
1699 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1700 tty->print_cr(" map size = %d", map_size);
1701 }
1702
1703 #endif // !PRODUCT
1704 }
1705
1706 void CodeCache::print_summary(outputStream* st, bool detailed) {
1707 int full_count = 0;
1708 julong total_used = 0;
1709 julong total_max_used = 0;
1710 julong total_free = 0;
1711 julong total_size = 0;
1712 FOR_ALL_HEAPS(heap_iterator) {
1713 CodeHeap* heap = (*heap_iterator);
1714 size_t total = (heap->high_boundary() - heap->low_boundary());
1715 if (_heaps->length() >= 1) {
1716 st->print("%s:", heap->name());
1717 } else {
1718 st->print("CodeCache:");
1719 }
1720 size_t size = total/K;
1721 size_t used = (total - heap->unallocated_capacity())/K;
1722 size_t max_used = heap->max_allocated_capacity()/K;
1723 size_t free = heap->unallocated_capacity()/K;
1724 total_size += size;
1725 total_used += used;
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/cdsAccess.hpp"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/codeHeapState.hpp"
30 #include "code/compiledIC.hpp"
31 #include "code/dependencies.hpp"
32 #include "code/dependencyContext.hpp"
33 #include "code/nmethod.hpp"
34 #include "code/pcDesc.hpp"
35 #include "code/SCCache.hpp"
36 #include "compiler/compilationPolicy.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/compilerDefinitions.inline.hpp"
39 #include "compiler/oopMap.hpp"
40 #include "gc/shared/barrierSetNMethod.hpp"
41 #include "gc/shared/classUnloadingContext.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "jfr/jfrEvents.hpp"
44 #include "jvm_io.h"
45 #include "logging/log.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/iterator.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/method.inline.hpp"
52 #include "oops/objArrayOop.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "oops/verifyOopClosure.hpp"
55 #include "runtime/arguments.hpp"
155 scopes_data_size += nm->scopes_data_size();
156 scopes_pcs_size += nm->scopes_pcs_size();
157 } else {
158 code_size += cb->code_size();
159 }
160 }
161 };
162
163 // Iterate over all CodeHeaps
164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
165 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
166
167 // Iterate over all CodeBlobs (cb) on the given CodeHeap
168 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
169
170 address CodeCache::_low_bound = nullptr;
171 address CodeCache::_high_bound = nullptr;
172 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
173 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
174
175 static ReservedSpace _cds_code_space;
176
177 // Initialize arrays of CodeHeap subsets
178 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
179 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
180 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
181
182 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
183 if (size < required_size) {
184 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
185 codeheap, size/K, required_size/K);
186 err_msg title("Not enough space in %s to run VM", codeheap);
187 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
188 vm_exit_during_initialization(title, message);
189 }
190 }
191
192 struct CodeHeapInfo {
193 size_t size;
194 bool set;
195 bool enabled;
196 };
197
198 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
199 assert(!heap->set, "sanity");
200 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
201 }
202
203 void CodeCache::initialize_heaps() {
204 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
205 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
206 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
207
208 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
209 const size_t ps = page_size(false, 8);
210 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
211 const size_t min_cache_size = CompilerConfig::min_code_cache_size(); // Make sure we have enough space for VM internal code
212 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
213
214 // Prerequisites
215 if (!heap_available(CodeBlobType::MethodProfiled)) {
216 // For compatibility reasons, disabled tiered compilation overrides
217 // segment size even if it is set explicitly.
218 non_profiled.size += profiled.size;
219 // Profiled code heap is not available, forcibly set size to 0
220 profiled.size = 0;
221 profiled.set = true;
222 profiled.enabled = false;
223 }
224
225 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
226
227 size_t compiler_buffer_size = 0;
228 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
229 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
230 COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size());
231
232 if (!non_nmethod.set) {
233 non_nmethod.size += compiler_buffer_size;
234 // Further down, just before FLAG_SET_ERGO(), all segment sizes are
235 // aligned down to the next lower multiple of min_size. For large page
236 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
237 // Therefore, force non_nmethod.size to at least min_size.
238 non_nmethod.size = MAX2(non_nmethod.size, min_size);
239 }
240
241 if (!profiled.set && !non_profiled.set) {
242 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
243 (cache_size - non_nmethod.size) / 2 : min_size;
244 }
245
246 if (profiled.set && !non_profiled.set) {
247 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
248 }
249
250 if (!profiled.set && non_profiled.set) {
305 if (ps < lg_ps) {
306 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
307 "Reverting to smaller page size (" PROPERFMT ").",
308 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
309 }
310 }
311
312 // Note: if large page support is enabled, min_size is at least the large
313 // page size. This ensures that the code cache is covered by large pages.
314 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
315 non_profiled.size += profiled.size & alignment_mask(min_size);
316 non_nmethod.size = align_down(non_nmethod.size, min_size);
317 profiled.size = align_down(profiled.size, min_size);
318 non_profiled.size = align_down(non_profiled.size, min_size);
319
320 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
321 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
322 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
323 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
324
325 const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
326 cache_size += cds_code_size;
327
328 ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
329
330 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
331 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
332
333 size_t offset = 0;
334 if (cds_code_size > 0) {
335 // FIXME: use CodeHeapInfo for this hack ...
336 _cds_code_space = rs.partition(offset, cds_code_size);
337 offset += cds_code_size;
338 }
339
340 if (profiled.enabled) {
341 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
342 offset += profiled.size;
343 // Tier 2 and tier 3 (profiled) methods
344 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
345 }
346
347 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
348 offset += non_nmethod.size;
349 // Non-nmethods (stubs, adapters, ...)
350 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
351
352 if (non_profiled.enabled) {
353 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
354 // Tier 1 and tier 4 (non-profiled) methods and native methods
355 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
356 }
357 }
358
359 void* CodeCache::map_cached_code() {
360 if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
361 return _cds_code_space.base();
362 } else {
363 return nullptr;
364 }
365 }
366
367 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
368 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
369 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
370 }
371
372 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
373 // Align and reserve space for code cache
374 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
375 const size_t rs_size = align_up(size, rs_align);
376 ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
377 if (!rs.is_reserved()) {
378 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
379 rs_size/K));
380 }
381
382 // Initialize bounds
383 _low_bound = (address)rs.base();
384 _high_bound = _low_bound + rs.size();
385 return rs;
386 }
1211 AnyObj::RESOURCE_AREA, mtInternal,
1212 &DependencySignature::hash,
1213 &DependencySignature::equals> DepTable;
1214
1215 DepTable* table = new DepTable();
1216
1217 // Iterate over live nmethods and check dependencies of all nmethods that are not
1218 // marked for deoptimization. A particular dependency is only checked once.
1219 NMethodIterator iter(NMethodIterator::not_unloading);
1220 while(iter.next()) {
1221 nmethod* nm = iter.method();
1222 // Only notify for live nmethods
1223 if (!nm->is_marked_for_deoptimization()) {
1224 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1225 // Construct abstraction of a dependency.
1226 DependencySignature* current_sig = new DependencySignature(deps);
1227
1228 // Determine if dependency is already checked. table->put(...) returns
1229 // 'true' if the dependency is added (i.e., was not in the hashtable).
1230 if (table->put(*current_sig, 1)) {
1231 Klass* witness = deps.check_dependency();
1232 if (witness != nullptr) {
1233 // Dependency checking failed. Print out information about the failed
1234 // dependency and finally fail with an assert. We can fail here, since
1235 // dependency checking is never done in a product build.
1236 deps.print_dependency(tty, witness, true);
1237 changes.print();
1238 nm->print();
1239 nm->print_dependencies_on(tty);
1240 assert(false, "Should have been marked for deoptimization");
1241 }
1242 }
1243 }
1244 }
1245 }
1246 }
1247 #endif
1248
1249 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1250 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1251
1252 // search the hierarchy looking for nmethods which are affected by the loading of this class
1253
1254 // then search the interfaces this class implements looking for nmethods
1255 // which might be dependent of the fact that an interface only had one
1256 // implementor.
1257 // nmethod::check_all_dependencies works only correctly, if no safepoint
1258 // can happen
1259 NoSafepointVerifier nsv;
1260 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1261 InstanceKlass* d = str.klass();
1262 {
1263 LogStreamHandle(Trace, dependencies) log;
1264 if (log.is_enabled()) {
1265 log.print("Processing context ");
1266 d->name()->print_value_on(&log);
1267 }
1268 }
1269 d->mark_dependent_nmethods(deopt_scope, changes);
1270 }
1271
1272 #ifndef PRODUCT
1273 if (VerifyDependencies) {
1274 // Object pointers are used as unique identifiers for dependency arguments. This
1275 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1276 dependentCheckTime.start();
1277 check_live_nmethods_dependencies(changes);
1278 dependentCheckTime.stop();
1279 }
1280 #endif
1281 }
1282
1283 #if INCLUDE_JVMTI
1284 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1285 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1286 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1287
1288 static void add_to_old_table(nmethod* c) {
1507 }
1508 PRAGMA_DIAG_POP
1509
1510 void CodeCache::print_memory_overhead() {
1511 size_t wasted_bytes = 0;
1512 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1513 CodeHeap* curr_heap = *heap;
1514 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1515 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1516 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1517 }
1518 }
1519 // Print bytes that are allocated in the freelist
1520 ttyLocker ttl;
1521 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1522 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1523 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1524 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1525 }
1526
1527 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1528 if (total > 0) {
1529 double ratio = (100.0 * used) / total;
1530 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1531 }
1532 }
1533
1534 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1535 int stats [2][6][3][2] = {0};
1536 int stats_used[2][6][3][2] = {0};
1537
1538 int total_osr = 0;
1539 int total_entrant = 0;
1540 int total_non_entrant = 0;
1541 int total_other = 0;
1542 int total_used = 0;
1543
1544 NMethodIterator iter(NMethodIterator::all);
1545 while (iter.next()) {
1546 nmethod* nm = iter.method();
1547 if (nm->is_in_use()) {
1548 ++total_entrant;
1549 } else if (nm->is_not_entrant()) {
1550 ++total_non_entrant;
1551 } else {
1552 ++total_other;
1553 }
1554 if (nm->is_osr_method()) {
1555 ++total_osr;
1556 }
1557 if (nm->used()) {
1558 ++total_used;
1559 }
1560 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1561
1562 int idx1 = nm->is_scc() ? 1 : 0;
1563 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1564 int idx3 = (nm->is_in_use() ? 0 :
1565 (nm->is_not_entrant() ? 1 :
1566 2));
1567 int idx4 = (nm->is_osr_method() ? 1 : 0);
1568 stats[idx1][idx2][idx3][idx4] += 1;
1569 if (nm->used()) {
1570 stats_used[idx1][idx2][idx3][idx4] += 1;
1571 }
1572 }
1573
1574 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1575 total_entrant + total_non_entrant + total_other,
1576 total_entrant, total_non_entrant, total_osr);
1577 if (total_other > 0) {
1578 st->print("; %d other", total_other);
1579 }
1580 st->print_cr(")");
1581
1582 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1583 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1584 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1585 if (total_normal + total_osr > 0) {
1586 st->print(" Tier%d:", i);
1587 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1588 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1589 st->cr();
1590 }
1591 }
1592 st->cr();
1593 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1594 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1595 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1596 assert(total_osr == 0, "sanity");
1597 if (total_normal + total_osr > 0) {
1598 st->print(" SC T%d:", i);
1599 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1600 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1601 st->cr();
1602 }
1603 }
1604 }
1605
1606 //------------------------------------------------------------------------------------------------
1607 // Non-product version
1608
1609 #ifndef PRODUCT
1610
1611 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1612 if (PrintCodeCache2) { // Need to add a new flag
1613 ResourceMark rm;
1614 if (size == 0) {
1615 int s = cb->size();
1616 assert(s >= 0, "CodeBlob size is negative: %d", s);
1617 size = (uint) s;
1618 }
1619 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1620 }
1621 }
1622
1623 void CodeCache::print_internals() {
1624 int nmethodCount = 0;
1625 int runtimeStubCount = 0;
1626 int adapterCount = 0;
1627 int deoptimizationStubCount = 0;
1628 int uncommonTrapStubCount = 0;
1629 int bufferBlobCount = 0;
1630 int total = 0;
1631 int nmethodNotEntrant = 0;
1632 int nmethodJava = 0;
1633 int nmethodNative = 0;
1634 int max_nm_size = 0;
1635 ResourceMark rm;
1636
1637 int i = 0;
1638 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1639 int heap_total = 0;
1640 tty->print_cr("-- %s --", (*heap)->name());
1641 FOR_ALL_BLOBS(cb, *heap) {
1642 total++;
1643 heap_total++;
1644 if (cb->is_nmethod()) {
1645 nmethod* nm = (nmethod*)cb;
1646
1647 tty->print("%4d: ", heap_total);
1648 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1649
1650 nmethodCount++;
1651
1652 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1653 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1654
1655 if(nm->method() != nullptr && nm->is_java_method()) {
1656 nmethodJava++;
1657 max_nm_size = MAX2(max_nm_size, nm->size());
1658 }
1659 } else if (cb->is_runtime_stub()) {
1660 runtimeStubCount++;
1661 } else if (cb->is_deoptimization_stub()) {
1662 deoptimizationStubCount++;
1663 } else if (cb->is_uncommon_trap_stub()) {
1664 uncommonTrapStubCount++;
1665 } else if (cb->is_adapter_blob()) {
1666 adapterCount++;
1667 } else if (cb->is_buffer_blob()) {
1668 bufferBlobCount++;
1790 FOR_ALL_BLOBS(cb, *heap) {
1791 number_of_blobs++;
1792 code_size += cb->code_size();
1793 ImmutableOopMapSet* set = cb->oop_maps();
1794 if (set != nullptr) {
1795 number_of_oop_maps += set->count();
1796 map_size += set->nr_of_bytes();
1797 }
1798 }
1799 }
1800 tty->print_cr("OopMaps");
1801 tty->print_cr(" #blobs = %d", number_of_blobs);
1802 tty->print_cr(" code size = %d", code_size);
1803 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1804 tty->print_cr(" map size = %d", map_size);
1805 }
1806
1807 #endif // !PRODUCT
1808 }
1809
1810 void CodeCache::print_nmethods_on(outputStream* st) {
1811 ResourceMark rm;
1812 int i = 0;
1813 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1814 st->print_cr("-- %s --", (*heap)->name());
1815 FOR_ALL_BLOBS(cb, *heap) {
1816 i++;
1817 if (cb->is_nmethod()) {
1818 nmethod* nm = (nmethod*)cb;
1819 st->print("%4d: ", i);
1820 CompileTask::print(st, nm, nullptr, true, false);
1821
1822 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1823 st->print_cr(" %c", non_entrant_char);
1824 }
1825 }
1826 }
1827 }
1828
1829 void CodeCache::print_summary(outputStream* st, bool detailed) {
1830 int full_count = 0;
1831 julong total_used = 0;
1832 julong total_max_used = 0;
1833 julong total_free = 0;
1834 julong total_size = 0;
1835 FOR_ALL_HEAPS(heap_iterator) {
1836 CodeHeap* heap = (*heap_iterator);
1837 size_t total = (heap->high_boundary() - heap->low_boundary());
1838 if (_heaps->length() >= 1) {
1839 st->print("%s:", heap->name());
1840 } else {
1841 st->print("CodeCache:");
1842 }
1843 size_t size = total/K;
1844 size_t used = (total - heap->unallocated_capacity())/K;
1845 size_t max_used = heap->max_allocated_capacity()/K;
1846 size_t free = heap->unallocated_capacity()/K;
1847 total_size += size;
1848 total_used += used;
|