6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/codeHeapState.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/dependencyContext.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSetNMethod.hpp"
39 #include "gc/shared/classUnloadingContext.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/allocation.inline.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/method.inline.hpp"
50 #include "oops/objArrayOop.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/verifyOopClosure.hpp"
53 #include "runtime/arguments.hpp"
153 scopes_data_size += nm->scopes_data_size();
154 scopes_pcs_size += nm->scopes_pcs_size();
155 } else {
156 code_size += cb->code_size();
157 }
158 }
159 };
160
161 // Iterate over all CodeHeaps
162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
164
165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
167
168 address CodeCache::_low_bound = 0;
169 address CodeCache::_high_bound = 0;
170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
172
173 // Initialize arrays of CodeHeap subsets
174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
177
178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
179 if (size < required_size) {
180 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
181 codeheap, size/K, required_size/K);
182 err_msg title("Not enough space in %s to run VM", codeheap);
183 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
184 vm_exit_during_initialization(title, message);
185 }
186 }
187
188 struct CodeHeapInfo {
189 size_t size;
190 bool set;
191 bool enabled;
192 };
193
194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
195 assert(!heap->set, "sanity");
196 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
197 }
198
199 void CodeCache::initialize_heaps() {
200
201 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
202 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
203 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
204
205 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
206 const size_t ps = page_size(false, 8);
207 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
208 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
209 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
210
211 // Prerequisites
212 if (!heap_available(CodeBlobType::MethodProfiled)) {
213 // For compatibility reasons, disabled tiered compilation overrides
214 // segment size even if it is set explicitly.
215 non_profiled.size += profiled.size;
216 // Profiled code heap is not available, forcibly set size to 0
217 profiled.size = 0;
218 profiled.set = true;
219 profiled.enabled = false;
220 }
221
222 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
223
224 size_t compiler_buffer_size = 0;
225 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
226 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
227
228 if (!non_nmethod.set) {
229 non_nmethod.size += compiler_buffer_size;
230 }
231
232 if (!profiled.set && !non_profiled.set) {
233 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
234 (cache_size - non_nmethod.size) / 2 : min_size;
235 }
236
237 if (profiled.set && !non_profiled.set) {
238 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
239 }
240
241 if (!profiled.set && non_profiled.set) {
242 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
243 }
244
245 // Compatibility.
246 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
296 if (ps < lg_ps) {
297 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
298 "Reverting to smaller page size (" PROPERFMT ").",
299 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
300 }
301 }
302
303 // Note: if large page support is enabled, min_size is at least the large
304 // page size. This ensures that the code cache is covered by large pages.
305 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
306 non_profiled.size += profiled.size & alignment_mask(min_size);
307 non_nmethod.size = align_down(non_nmethod.size, min_size);
308 profiled.size = align_down(profiled.size, min_size);
309 non_profiled.size = align_down(non_profiled.size, min_size);
310
311 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
312 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
313 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
314 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
315
316 ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
317
318 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
319 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
320
321 size_t offset = 0;
322 if (profiled.enabled) {
323 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
324 offset += profiled.size;
325 // Tier 2 and tier 3 (profiled) methods
326 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
327 }
328
329 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
330 offset += non_nmethod.size;
331 // Non-nmethods (stubs, adapters, ...)
332 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
333
334 if (non_profiled.enabled) {
335 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
336 // Tier 1 and tier 4 (non-profiled) methods and native methods
337 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
338 }
339 }
340
341 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
342 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
343 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
344 }
345
346 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
347 // Align and reserve space for code cache
348 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
349 const size_t rs_size = align_up(size, rs_align);
350 ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
351 if (!rs.is_reserved()) {
352 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
353 rs_size/K));
354 }
355
356 // Initialize bounds
357 _low_bound = (address)rs.base();
358 _high_bound = _low_bound + rs.size();
359 return rs;
360 }
1185 AnyObj::RESOURCE_AREA, mtInternal,
1186 &DependencySignature::hash,
1187 &DependencySignature::equals> DepTable;
1188
1189 DepTable* table = new DepTable();
1190
1191 // Iterate over live nmethods and check dependencies of all nmethods that are not
1192 // marked for deoptimization. A particular dependency is only checked once.
1193 NMethodIterator iter(NMethodIterator::not_unloading);
1194 while(iter.next()) {
1195 nmethod* nm = iter.method();
1196 // Only notify for live nmethods
1197 if (!nm->is_marked_for_deoptimization()) {
1198 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1199 // Construct abstraction of a dependency.
1200 DependencySignature* current_sig = new DependencySignature(deps);
1201
1202 // Determine if dependency is already checked. table->put(...) returns
1203 // 'true' if the dependency is added (i.e., was not in the hashtable).
1204 if (table->put(*current_sig, 1)) {
1205 if (deps.check_dependency() != nullptr) {
1206 // Dependency checking failed. Print out information about the failed
1207 // dependency and finally fail with an assert. We can fail here, since
1208 // dependency checking is never done in a product build.
1209 tty->print_cr("Failed dependency:");
1210 changes.print();
1211 nm->print();
1212 nm->print_dependencies_on(tty);
1213 assert(false, "Should have been marked for deoptimization");
1214 }
1215 }
1216 }
1217 }
1218 }
1219 }
1220 #endif
1221
1222 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1223 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1224
1225 // search the hierarchy looking for nmethods which are affected by the loading of this class
1226
1227 // then search the interfaces this class implements looking for nmethods
1228 // which might be dependent of the fact that an interface only had one
1229 // implementor.
1230 // nmethod::check_all_dependencies works only correctly, if no safepoint
1231 // can happen
1232 NoSafepointVerifier nsv;
1233 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1234 InstanceKlass* d = str.klass();
1235 d->mark_dependent_nmethods(deopt_scope, changes);
1236 }
1237
1238 #ifndef PRODUCT
1239 if (VerifyDependencies) {
1240 // Object pointers are used as unique identifiers for dependency arguments. This
1241 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1242 dependentCheckTime.start();
1243 check_live_nmethods_dependencies(changes);
1244 dependentCheckTime.stop();
1245 }
1246 #endif
1247 }
1248
1249 #if INCLUDE_JVMTI
1250 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1251 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1252 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1253
1254 static void add_to_old_table(nmethod* c) {
1473 }
1474 PRAGMA_DIAG_POP
1475
1476 void CodeCache::print_memory_overhead() {
1477 size_t wasted_bytes = 0;
1478 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1479 CodeHeap* curr_heap = *heap;
1480 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1481 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1482 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1483 }
1484 }
1485 // Print bytes that are allocated in the freelist
1486 ttyLocker ttl;
1487 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1488 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1489 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1490 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1491 }
1492
1493 //------------------------------------------------------------------------------------------------
1494 // Non-product version
1495
1496 #ifndef PRODUCT
1497
1498 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1499 if (PrintCodeCache2) { // Need to add a new flag
1500 ResourceMark rm;
1501 if (size == 0) {
1502 int s = cb->size();
1503 assert(s >= 0, "CodeBlob size is negative: %d", s);
1504 size = (uint) s;
1505 }
1506 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1507 }
1508 }
1509
1510 void CodeCache::print_internals() {
1511 int nmethodCount = 0;
1512 int runtimeStubCount = 0;
1513 int adapterCount = 0;
1514 int deoptimizationStubCount = 0;
1515 int uncommonTrapStubCount = 0;
1516 int bufferBlobCount = 0;
1517 int total = 0;
1518 int nmethodNotEntrant = 0;
1519 int nmethodJava = 0;
1520 int nmethodNative = 0;
1521 int max_nm_size = 0;
1522 ResourceMark rm;
1523
1524 int i = 0;
1525 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1526 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1527 tty->print_cr("-- %s --", (*heap)->name());
1528 }
1529 FOR_ALL_BLOBS(cb, *heap) {
1530 total++;
1531 if (cb->is_nmethod()) {
1532 nmethod* nm = (nmethod*)cb;
1533
1534 if (Verbose && nm->method() != nullptr) {
1535 ResourceMark rm;
1536 char *method_name = nm->method()->name_and_sig_as_C_string();
1537 tty->print("%s", method_name);
1538 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1539 }
1540
1541 nmethodCount++;
1542
1543 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1544 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1545
1546 if(nm->method() != nullptr && nm->is_java_method()) {
1547 nmethodJava++;
1548 max_nm_size = MAX2(max_nm_size, nm->size());
1549 }
1550 } else if (cb->is_runtime_stub()) {
1551 runtimeStubCount++;
1552 } else if (cb->is_deoptimization_stub()) {
1553 deoptimizationStubCount++;
1554 } else if (cb->is_uncommon_trap_stub()) {
1555 uncommonTrapStubCount++;
1556 } else if (cb->is_adapter_blob()) {
1557 adapterCount++;
1558 } else if (cb->is_buffer_blob()) {
1559 bufferBlobCount++;
1681 FOR_ALL_BLOBS(cb, *heap) {
1682 number_of_blobs++;
1683 code_size += cb->code_size();
1684 ImmutableOopMapSet* set = cb->oop_maps();
1685 if (set != nullptr) {
1686 number_of_oop_maps += set->count();
1687 map_size += set->nr_of_bytes();
1688 }
1689 }
1690 }
1691 tty->print_cr("OopMaps");
1692 tty->print_cr(" #blobs = %d", number_of_blobs);
1693 tty->print_cr(" code size = %d", code_size);
1694 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1695 tty->print_cr(" map size = %d", map_size);
1696 }
1697
1698 #endif // !PRODUCT
1699 }
1700
1701 void CodeCache::print_summary(outputStream* st, bool detailed) {
1702 int full_count = 0;
1703 julong total_used = 0;
1704 julong total_max_used = 0;
1705 julong total_free = 0;
1706 julong total_size = 0;
1707 FOR_ALL_HEAPS(heap_iterator) {
1708 CodeHeap* heap = (*heap_iterator);
1709 size_t total = (heap->high_boundary() - heap->low_boundary());
1710 if (_heaps->length() >= 1) {
1711 st->print("%s:", heap->name());
1712 } else {
1713 st->print("CodeCache:");
1714 }
1715 size_t size = total/K;
1716 size_t used = (total - heap->unallocated_capacity())/K;
1717 size_t max_used = heap->max_allocated_capacity()/K;
1718 size_t free = heap->unallocated_capacity()/K;
1719 total_size += size;
1720 total_used += used;
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/cdsAccess.hpp"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/codeHeapState.hpp"
30 #include "code/compiledIC.hpp"
31 #include "code/dependencies.hpp"
32 #include "code/dependencyContext.hpp"
33 #include "code/nmethod.hpp"
34 #include "code/pcDesc.hpp"
35 #include "code/SCCache.hpp"
36 #include "compiler/compilationPolicy.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/compilerDefinitions.inline.hpp"
39 #include "compiler/oopMap.hpp"
40 #include "gc/shared/barrierSetNMethod.hpp"
41 #include "gc/shared/classUnloadingContext.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "jfr/jfrEvents.hpp"
44 #include "jvm_io.h"
45 #include "logging/log.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/iterator.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/method.inline.hpp"
52 #include "oops/objArrayOop.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "oops/verifyOopClosure.hpp"
55 #include "runtime/arguments.hpp"
155 scopes_data_size += nm->scopes_data_size();
156 scopes_pcs_size += nm->scopes_pcs_size();
157 } else {
158 code_size += cb->code_size();
159 }
160 }
161 };
162
163 // Iterate over all CodeHeaps
164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
165 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
166
167 // Iterate over all CodeBlobs (cb) on the given CodeHeap
168 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
169
170 address CodeCache::_low_bound = 0;
171 address CodeCache::_high_bound = 0;
172 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
173 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
174
175 static ReservedSpace _cds_code_space;
176
177 // Initialize arrays of CodeHeap subsets
178 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
179 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
180 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
181
182 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
183 if (size < required_size) {
184 log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
185 codeheap, size/K, required_size/K);
186 err_msg title("Not enough space in %s to run VM", codeheap);
187 err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
188 vm_exit_during_initialization(title, message);
189 }
190 }
191
192 struct CodeHeapInfo {
193 size_t size;
194 bool set;
195 bool enabled;
196 };
197
198 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
199 assert(!heap->set, "sanity");
200 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
201 }
202
203 void CodeCache::initialize_heaps() {
204 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
205 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
206 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
207
208 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
209 const size_t ps = page_size(false, 8);
210 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
211 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
212 size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
213
214 // Prerequisites
215 if (!heap_available(CodeBlobType::MethodProfiled)) {
216 // For compatibility reasons, disabled tiered compilation overrides
217 // segment size even if it is set explicitly.
218 non_profiled.size += profiled.size;
219 // Profiled code heap is not available, forcibly set size to 0
220 profiled.size = 0;
221 profiled.set = true;
222 profiled.enabled = false;
223 }
224
225 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
226
227 size_t compiler_buffer_size = 0;
228 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
229 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
230 COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size());
231
232 if (!non_nmethod.set) {
233 non_nmethod.size += compiler_buffer_size;
234 }
235
236 if (!profiled.set && !non_profiled.set) {
237 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
238 (cache_size - non_nmethod.size) / 2 : min_size;
239 }
240
241 if (profiled.set && !non_profiled.set) {
242 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
243 }
244
245 if (!profiled.set && non_profiled.set) {
246 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
247 }
248
249 // Compatibility.
250 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
300 if (ps < lg_ps) {
301 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
302 "Reverting to smaller page size (" PROPERFMT ").",
303 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
304 }
305 }
306
307 // Note: if large page support is enabled, min_size is at least the large
308 // page size. This ensures that the code cache is covered by large pages.
309 non_profiled.size += non_nmethod.size & alignment_mask(min_size);
310 non_profiled.size += profiled.size & alignment_mask(min_size);
311 non_nmethod.size = align_down(non_nmethod.size, min_size);
312 profiled.size = align_down(profiled.size, min_size);
313 non_profiled.size = align_down(non_profiled.size, min_size);
314
315 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
316 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
317 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
318 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
319
320 const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size);
321 cache_size += cds_code_size;
322
323 ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
324
325 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
326 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
327
328 size_t offset = 0;
329 if (cds_code_size > 0) {
330 // FIXME: use CodeHeapInfo for this hack ...
331 _cds_code_space = rs.partition(offset, cds_code_size);
332 offset += cds_code_size;
333 }
334
335 if (profiled.enabled) {
336 ReservedSpace profiled_space = rs.partition(offset, profiled.size);
337 offset += profiled.size;
338 // Tier 2 and tier 3 (profiled) methods
339 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
340 }
341
342 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
343 offset += non_nmethod.size;
344 // Non-nmethods (stubs, adapters, ...)
345 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
346
347 if (non_profiled.enabled) {
348 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
349 // Tier 1 and tier 4 (non-profiled) methods and native methods
350 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
351 }
352 }
353
354 void* CodeCache::map_cached_code() {
355 if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) {
356 return _cds_code_space.base();
357 } else {
358 return nullptr;
359 }
360 }
361
362 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
363 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
364 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
365 }
366
367 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
368 // Align and reserve space for code cache
369 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
370 const size_t rs_size = align_up(size, rs_align);
371 ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
372 if (!rs.is_reserved()) {
373 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
374 rs_size/K));
375 }
376
377 // Initialize bounds
378 _low_bound = (address)rs.base();
379 _high_bound = _low_bound + rs.size();
380 return rs;
381 }
1206 AnyObj::RESOURCE_AREA, mtInternal,
1207 &DependencySignature::hash,
1208 &DependencySignature::equals> DepTable;
1209
1210 DepTable* table = new DepTable();
1211
1212 // Iterate over live nmethods and check dependencies of all nmethods that are not
1213 // marked for deoptimization. A particular dependency is only checked once.
1214 NMethodIterator iter(NMethodIterator::not_unloading);
1215 while(iter.next()) {
1216 nmethod* nm = iter.method();
1217 // Only notify for live nmethods
1218 if (!nm->is_marked_for_deoptimization()) {
1219 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1220 // Construct abstraction of a dependency.
1221 DependencySignature* current_sig = new DependencySignature(deps);
1222
1223 // Determine if dependency is already checked. table->put(...) returns
1224 // 'true' if the dependency is added (i.e., was not in the hashtable).
1225 if (table->put(*current_sig, 1)) {
1226 Klass* witness = deps.check_dependency();
1227 if (witness != nullptr) {
1228 // Dependency checking failed. Print out information about the failed
1229 // dependency and finally fail with an assert. We can fail here, since
1230 // dependency checking is never done in a product build.
1231 deps.print_dependency(tty, witness, true);
1232 changes.print();
1233 nm->print();
1234 nm->print_dependencies_on(tty);
1235 assert(false, "Should have been marked for deoptimization");
1236 }
1237 }
1238 }
1239 }
1240 }
1241 }
1242 #endif
1243
1244 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1245 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1246
1247 // search the hierarchy looking for nmethods which are affected by the loading of this class
1248
1249 // then search the interfaces this class implements looking for nmethods
1250 // which might be dependent of the fact that an interface only had one
1251 // implementor.
1252 // nmethod::check_all_dependencies works only correctly, if no safepoint
1253 // can happen
1254 NoSafepointVerifier nsv;
1255 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1256 InstanceKlass* d = str.klass();
1257 {
1258 LogStreamHandle(Trace, dependencies) log;
1259 if (log.is_enabled()) {
1260 log.print("Processing context ");
1261 d->name()->print_value_on(&log);
1262 }
1263 }
1264 d->mark_dependent_nmethods(deopt_scope, changes);
1265 }
1266
1267 #ifndef PRODUCT
1268 if (VerifyDependencies) {
1269 // Object pointers are used as unique identifiers for dependency arguments. This
1270 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1271 dependentCheckTime.start();
1272 check_live_nmethods_dependencies(changes);
1273 dependentCheckTime.stop();
1274 }
1275 #endif
1276 }
1277
1278 #if INCLUDE_JVMTI
1279 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1280 // We don't really expect this table to grow very large. If it does, it can become a hashtable.
1281 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1282
1283 static void add_to_old_table(nmethod* c) {
1502 }
1503 PRAGMA_DIAG_POP
1504
1505 void CodeCache::print_memory_overhead() {
1506 size_t wasted_bytes = 0;
1507 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1508 CodeHeap* curr_heap = *heap;
1509 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1510 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1511 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1512 }
1513 }
1514 // Print bytes that are allocated in the freelist
1515 ttyLocker ttl;
1516 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1517 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1518 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1519 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1520 }
1521
1522 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1523 if (total > 0) {
1524 double ratio = (100.0 * used) / total;
1525 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1526 }
1527 }
1528
1529 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1530 int stats [2][6][3][2] = {0};
1531 int stats_used[2][6][3][2] = {0};
1532
1533 int total_osr = 0;
1534 int total_entrant = 0;
1535 int total_non_entrant = 0;
1536 int total_other = 0;
1537 int total_used = 0;
1538
1539 NMethodIterator iter(NMethodIterator::all);
1540 while (iter.next()) {
1541 nmethod* nm = iter.method();
1542 if (nm->is_in_use()) {
1543 ++total_entrant;
1544 } else if (nm->is_not_entrant()) {
1545 ++total_non_entrant;
1546 } else {
1547 ++total_other;
1548 }
1549 if (nm->is_osr_method()) {
1550 ++total_osr;
1551 }
1552 if (nm->used()) {
1553 ++total_used;
1554 }
1555 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1556
1557 int idx1 = nm->is_scc() ? 1 : 0;
1558 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1559 int idx3 = (nm->is_in_use() ? 0 :
1560 (nm->is_not_entrant() ? 1 :
1561 2));
1562 int idx4 = (nm->is_osr_method() ? 1 : 0);
1563 stats[idx1][idx2][idx3][idx4] += 1;
1564 if (nm->used()) {
1565 stats_used[idx1][idx2][idx3][idx4] += 1;
1566 }
1567 }
1568
1569 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1570 total_entrant + total_non_entrant + total_other,
1571 total_entrant, total_non_entrant, total_osr);
1572 if (total_other > 0) {
1573 st->print("; %d other", total_other);
1574 }
1575 st->print_cr(")");
1576
1577 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1578 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1579 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1580 if (total_normal + total_osr > 0) {
1581 st->print(" Tier%d:", i);
1582 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1583 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1584 st->cr();
1585 }
1586 }
1587 st->cr();
1588 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1589 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1590 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1591 assert(total_osr == 0, "sanity");
1592 if (total_normal + total_osr > 0) {
1593 st->print(" SC T%d:", i);
1594 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1595 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1596 st->cr();
1597 }
1598 }
1599 }
1600
1601 //------------------------------------------------------------------------------------------------
1602 // Non-product version
1603
1604 #ifndef PRODUCT
1605
1606 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1607 if (PrintCodeCache2) { // Need to add a new flag
1608 ResourceMark rm;
1609 if (size == 0) {
1610 int s = cb->size();
1611 assert(s >= 0, "CodeBlob size is negative: %d", s);
1612 size = (uint) s;
1613 }
1614 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1615 }
1616 }
1617
1618 void CodeCache::print_internals() {
1619 int nmethodCount = 0;
1620 int runtimeStubCount = 0;
1621 int adapterCount = 0;
1622 int deoptimizationStubCount = 0;
1623 int uncommonTrapStubCount = 0;
1624 int bufferBlobCount = 0;
1625 int total = 0;
1626 int nmethodNotEntrant = 0;
1627 int nmethodJava = 0;
1628 int nmethodNative = 0;
1629 int max_nm_size = 0;
1630 ResourceMark rm;
1631
1632 int i = 0;
1633 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1634 int heap_total = 0;
1635 tty->print_cr("-- %s --", (*heap)->name());
1636 FOR_ALL_BLOBS(cb, *heap) {
1637 total++;
1638 heap_total++;
1639 if (cb->is_nmethod()) {
1640 nmethod* nm = (nmethod*)cb;
1641
1642 tty->print("%4d: ", heap_total);
1643 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);
1644
1645 nmethodCount++;
1646
1647 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1648 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1649
1650 if(nm->method() != nullptr && nm->is_java_method()) {
1651 nmethodJava++;
1652 max_nm_size = MAX2(max_nm_size, nm->size());
1653 }
1654 } else if (cb->is_runtime_stub()) {
1655 runtimeStubCount++;
1656 } else if (cb->is_deoptimization_stub()) {
1657 deoptimizationStubCount++;
1658 } else if (cb->is_uncommon_trap_stub()) {
1659 uncommonTrapStubCount++;
1660 } else if (cb->is_adapter_blob()) {
1661 adapterCount++;
1662 } else if (cb->is_buffer_blob()) {
1663 bufferBlobCount++;
1785 FOR_ALL_BLOBS(cb, *heap) {
1786 number_of_blobs++;
1787 code_size += cb->code_size();
1788 ImmutableOopMapSet* set = cb->oop_maps();
1789 if (set != nullptr) {
1790 number_of_oop_maps += set->count();
1791 map_size += set->nr_of_bytes();
1792 }
1793 }
1794 }
1795 tty->print_cr("OopMaps");
1796 tty->print_cr(" #blobs = %d", number_of_blobs);
1797 tty->print_cr(" code size = %d", code_size);
1798 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1799 tty->print_cr(" map size = %d", map_size);
1800 }
1801
1802 #endif // !PRODUCT
1803 }
1804
1805 void CodeCache::print_nmethods_on(outputStream* st) {
1806 ResourceMark rm;
1807 int i = 0;
1808 FOR_ALL_ALLOCABLE_HEAPS(heap) {
1809 st->print_cr("-- %s --", (*heap)->name());
1810 FOR_ALL_BLOBS(cb, *heap) {
1811 i++;
1812 if (cb->is_nmethod()) {
1813 nmethod* nm = (nmethod*)cb;
1814 st->print("%4d: ", i);
1815 CompileTask::print(st, nm, nullptr, true, false);
1816
1817 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1818 st->print_cr(" %c", non_entrant_char);
1819 }
1820 }
1821 }
1822 }
1823
1824 void CodeCache::print_summary(outputStream* st, bool detailed) {
1825 int full_count = 0;
1826 julong total_used = 0;
1827 julong total_max_used = 0;
1828 julong total_free = 0;
1829 julong total_size = 0;
1830 FOR_ALL_HEAPS(heap_iterator) {
1831 CodeHeap* heap = (*heap_iterator);
1832 size_t total = (heap->high_boundary() - heap->low_boundary());
1833 if (_heaps->length() >= 1) {
1834 st->print("%s:", heap->name());
1835 } else {
1836 st->print("CodeCache:");
1837 }
1838 size_t size = total/K;
1839 size_t used = (total - heap->unallocated_capacity())/K;
1840 size_t max_used = heap->max_allocated_capacity()/K;
1841 size_t free = heap->unallocated_capacity()/K;
1842 total_size += size;
1843 total_used += used;
|