1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/relocInfo.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "interpreter/bytecode.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "jvm.h"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/heap.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/forte.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/javaFrameAnchor.hpp"
43 #include "runtime/jniHandles.inline.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/safepoint.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubCodeGenerator.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "runtime/vframe.hpp"
50 #include "services/memoryService.hpp"
51 #include "utilities/align.hpp"
52 #ifdef COMPILER1
53 #include "c1/c1_Runtime1.hpp"
54 #endif
55
56 #include <type_traits>
57
58 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
59 // Check all "leaf" subclasses of CodeBlob class.
60
61 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod");
62 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs");
63 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs");
64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
65 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs");
66 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
67 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs");
68 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs");
69 #ifdef COMPILER2
70 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs");
71 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs");
72 #endif
73
74 // Add proxy vtables.
75 // We need only few for now - they are used only from prints.
76 const nmethod::Vptr nmethod::_vpntr;
77 const BufferBlob::Vptr BufferBlob::_vpntr;
78 const RuntimeStub::Vptr RuntimeStub::_vpntr;
79 const SingletonBlob::Vptr SingletonBlob::_vpntr;
80 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr;
81 #ifdef COMPILER2
82 const ExceptionBlob::Vptr ExceptionBlob::_vpntr;
83 #endif // COMPILER2
84 const UpcallStub::Vptr UpcallStub::_vpntr;
85
86 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
87 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
88 nullptr/* None */,
89 &nmethod::_vpntr,
90 &BufferBlob::_vpntr,
91 &AdapterBlob::_vpntr,
92 &VtableBlob::_vpntr,
93 &MethodHandlesAdapterBlob::_vpntr,
94 &RuntimeStub::_vpntr,
95 &DeoptimizationBlob::_vpntr,
96 &SafepointBlob::_vpntr,
97 #ifdef COMPILER2
98 &ExceptionBlob::_vpntr,
99 &UncommonTrapBlob::_vpntr,
100 #endif
101 &UpcallStub::_vpntr
102 };
103
104 return array[(size_t)kind];
105 }
106
107 const CodeBlob::Vptr* CodeBlob::vptr() const {
108 return vptr(_kind);
109 }
110
111 unsigned int CodeBlob::align_code_offset(int offset) {
112 // align the size to CodeEntryAlignment
113 int header_size = (int)CodeHeap::header_size();
114 return align_up(offset + header_size, CodeEntryAlignment) - header_size;
115 }
116
117 // This must be consistent with the CodeBlob constructor's layout actions.
118 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
119 // align the size to CodeEntryAlignment
120 unsigned int size = align_code_offset(header_size);
121 size += align_up(cb->total_content_size(), oopSize);
122 size += align_up(cb->total_oop_size(), oopSize);
123 return size;
124 }
125
126 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
127 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
128 int mutable_data_size) :
129 _oop_maps(nullptr), // will be set by set_oop_maps() call
130 _name(name),
131 _mutable_data(header_begin() + size), // default value is blob_end()
132 _size(size),
133 _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
134 _content_offset(CodeBlob::align_code_offset(header_size)),
135 _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
136 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
137 _frame_size(frame_size),
138 _mutable_data_size(mutable_data_size),
139 S390_ONLY(_ctable_offset(0) COMMA)
140 _header_size(header_size),
141 _frame_complete_offset(frame_complete_offset),
142 _kind(kind),
143 _caller_must_gc_arguments(caller_must_gc_arguments)
144 {
145 assert(is_aligned(_size, oopSize), "unaligned size");
146 assert(is_aligned(header_size, oopSize), "unaligned size");
147 assert(is_aligned(_relocation_size, oopSize), "unaligned size");
148 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
149 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
150 assert(code_end() == content_end(), "must be the same - see code_end()");
151 #ifdef COMPILER1
152 // probably wrong for tiered
153 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
154 #endif // COMPILER1
155
156 if (_mutable_data_size > 0) {
157 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
158 if (_mutable_data == nullptr) {
159 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
160 }
161 } else {
162 // We need unique and valid not null address
163 assert(_mutable_data == blob_end(), "sanity");
164 }
165
166 set_oop_maps(oop_maps);
167 }
168
169 // Simple CodeBlob used for simple BufferBlob.
170 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
171 _oop_maps(nullptr),
172 _name(name),
173 _mutable_data(header_begin() + size), // default value is blob_end()
174 _size(size),
175 _relocation_size(0),
176 _content_offset(CodeBlob::align_code_offset(header_size)),
177 _code_offset(_content_offset),
178 _data_offset(size),
179 _frame_size(0),
180 _mutable_data_size(0),
181 S390_ONLY(_ctable_offset(0) COMMA)
182 _header_size(header_size),
183 _frame_complete_offset(CodeOffsets::frame_never_safe),
184 _kind(kind),
185 _caller_must_gc_arguments(false)
186 {
187 assert(is_aligned(size, oopSize), "unaligned size");
188 assert(is_aligned(header_size, oopSize), "unaligned size");
189 assert(_mutable_data == blob_end(), "sanity");
190 }
191
192 void CodeBlob::restore_mutable_data(address reloc_data) {
193 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
194 if (_mutable_data_size > 0) {
195 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
196 if (_mutable_data == nullptr) {
197 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
198 }
199 } else {
200 _mutable_data = blob_end(); // default value
201 }
202 if (_relocation_size > 0) {
203 assert(_mutable_data_size > 0, "relocation is part of mutable data section");
204 memcpy((address)relocation_begin(), reloc_data, relocation_size());
205 }
206 }
207
208 void CodeBlob::purge() {
209 assert(_mutable_data != nullptr, "should never be null");
210 if (_mutable_data != blob_end()) {
211 os::free(_mutable_data);
212 _mutable_data = blob_end(); // Valid not null address
213 _mutable_data_size = 0;
214 _relocation_size = 0;
215 }
216 if (_oop_maps != nullptr) {
217 delete _oop_maps;
218 _oop_maps = nullptr;
219 }
220 NOT_PRODUCT(_asm_remarks.clear());
221 NOT_PRODUCT(_dbg_strings.clear());
222 }
223
224 void CodeBlob::set_oop_maps(OopMapSet* p) {
225 // Danger Will Robinson! This method allocates a big
226 // chunk of memory, its your job to free it.
227 if (p != nullptr) {
228 _oop_maps = ImmutableOopMapSet::build_from(p);
229 } else {
230 _oop_maps = nullptr;
231 }
232 }
233
234 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
235 assert(_oop_maps != nullptr, "nope");
236 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
237 }
238
239 void CodeBlob::print_code_on(outputStream* st) {
240 ResourceMark m;
241 Disassembler::decode(this, st);
242 }
243
244 void CodeBlob::prepare_for_archiving_impl() {
245 set_name(nullptr);
246 _oop_maps = nullptr;
247 _mutable_data = nullptr;
248 #ifndef PRODUCT
249 asm_remarks().clear();
250 dbg_strings().clear();
251 #endif /* PRODUCT */
252 }
253
254 void CodeBlob::prepare_for_archiving() {
255 vptr(_kind)->prepare_for_archiving(this);
256 }
257
258 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
259 blob->copy_to(archive_buffer);
260 CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
261 archived_blob->prepare_for_archiving();
262 }
263
264 void CodeBlob::post_restore_impl() {
265 // Track memory usage statistic after releasing CodeCache_lock
266 MemoryService::track_code_cache_memory_usage();
267 }
268
269 void CodeBlob::post_restore() {
270 vptr(_kind)->post_restore(this);
271 }
272
273 CodeBlob* CodeBlob::restore(address code_cache_buffer,
274 const char* name,
275 address archived_reloc_data,
276 ImmutableOopMapSet* archived_oop_maps)
277 {
278 copy_to(code_cache_buffer);
279 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
280 code_blob->set_name(name);
281 code_blob->restore_mutable_data(archived_reloc_data);
282 code_blob->set_oop_maps(archived_oop_maps);
283 return code_blob;
284 }
285
286 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
287 const char* name,
288 address archived_reloc_data,
289 ImmutableOopMapSet* archived_oop_maps
290 )
291 {
292 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
293
294 CodeCache::gc_on_allocation();
295
296 CodeBlob* blob = nullptr;
297 unsigned int size = archived_blob->size();
298 {
299 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
300 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
301 if (code_cache_buffer != nullptr) {
302 blob = archived_blob->restore(code_cache_buffer,
303 name,
304 archived_reloc_data,
305 archived_oop_maps);
306 assert(blob != nullptr, "sanity check");
307
308 // Flush the code block
309 ICache::invalidate_range(blob->code_begin(), blob->code_size());
310 CodeCache::commit(blob); // Count adapters
311 }
312 }
313 if (blob != nullptr) {
314 blob->post_restore();
315 }
316 return blob;
317 }
318
319 //-----------------------------------------------------------------------------------------
320 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
321
322 RuntimeBlob::RuntimeBlob(
323 const char* name,
324 CodeBlobKind kind,
325 CodeBuffer* cb,
326 int size,
327 uint16_t header_size,
328 int16_t frame_complete,
329 int frame_size,
330 OopMapSet* oop_maps,
331 bool caller_must_gc_arguments)
332 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
333 align_up(cb->total_relocation_size(), oopSize))
334 {
335 cb->copy_code_and_locs_to(this);
336 }
337
338 void RuntimeBlob::free(RuntimeBlob* blob) {
339 assert(blob != nullptr, "caller must check for nullptr");
340 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
341 blob->purge();
342 {
343 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
344 CodeCache::free(blob);
345 }
346 // Track memory usage statistic after releasing CodeCache_lock
347 MemoryService::track_code_cache_memory_usage();
348 }
349
350 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
351 // Do not hold the CodeCache lock during name formatting.
352 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
353
354 if (stub != nullptr && (PrintStubCode ||
355 Forte::is_enabled() ||
356 JvmtiExport::should_post_dynamic_code_generated())) {
357 char stub_id[256];
358 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
359 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
360 if (PrintStubCode) {
361 ttyLocker ttyl;
362 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
363 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
364 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
365 Disassembler::decode(stub->code_begin(), stub->code_end(), tty
366 NOT_PRODUCT(COMMA &stub->asm_remarks()));
367 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
368 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
369 stub->oop_maps()->print();
370 }
371 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
372 tty->cr();
373 }
374 if (Forte::is_enabled()) {
375 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
376 }
377
378 if (JvmtiExport::should_post_dynamic_code_generated()) {
379 const char* stub_name = name2;
380 if (name2[0] == '\0') stub_name = name1;
381 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
382 }
383 }
384
385 // Track memory usage statistic after releasing CodeCache_lock
386 MemoryService::track_code_cache_memory_usage();
387 }
388
389 //----------------------------------------------------------------------------------------------------
390 // Implementation of BufferBlob
391
392 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
393 : RuntimeBlob(name, kind, size, header_size)
394 {}
395
396 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
397 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
398
399 BufferBlob* blob = nullptr;
400 unsigned int size = sizeof(BufferBlob);
401 // align the size to CodeEntryAlignment
402 size = CodeBlob::align_code_offset(size);
403 size += align_up(buffer_size, oopSize);
404 assert(name != nullptr, "must provide a name");
405 {
406 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
407 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
408 }
409 // Track memory usage statistic after releasing CodeCache_lock
410 MemoryService::track_code_cache_memory_usage();
411
412 return blob;
413 }
414
415
416 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size)
417 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
418 {}
419
420 // Used by gtest
421 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
422 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
423
424 BufferBlob* blob = nullptr;
425 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
426 assert(name != nullptr, "must provide a name");
427 {
428 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
429 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size);
430 }
431 // Track memory usage statistic after releasing CodeCache_lock
432 MemoryService::track_code_cache_memory_usage();
433
434 return blob;
435 }
436
437 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
438 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
439 }
440
441 void BufferBlob::free(BufferBlob *blob) {
442 RuntimeBlob::free(blob);
443 }
444
445
446 //----------------------------------------------------------------------------------------------------
447 // Implementation of AdapterBlob
448
449 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) :
450 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob)) {
451 assert(entry_offset[I2C] == 0, "sanity check");
452 #ifdef ASSERT
453 for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) {
454 // The entry is within the adapter blob or unset.
455 int offset = entry_offset[i];
456 assert((offset > 0 && offset < cb->insts()->size()) ||
457 (i >= C2I_No_Clinit_Check && offset == -1),
458 "invalid entry offset[%d] = 0x%x", i, offset);
459 }
460 #endif // ASSERT
461 _c2i_offset = entry_offset[C2I];
462 _c2i_unverified_offset = entry_offset[C2I_Unverified];
463 _c2i_no_clinit_check_offset = entry_offset[C2I_No_Clinit_Check];
464 CodeCache::commit(this);
465 }
466
467 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) {
468 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
469
470 CodeCache::gc_on_allocation();
471
472 AdapterBlob* blob = nullptr;
473 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
474 {
475 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
476 blob = new (size) AdapterBlob(size, cb, entry_offset);
477 }
478 // Track memory usage statistic after releasing CodeCache_lock
479 MemoryService::track_code_cache_memory_usage();
480
481 return blob;
482 }
483
484 //----------------------------------------------------------------------------------------------------
485 // Implementation of VtableBlob
486
487 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
488 // Handling of allocation failure stops compilation and prints a bunch of
489 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
490 // can be locked, and then re-locking the CodeCache_lock. That is not safe in
491 // this context as we hold the CompiledICLocker. So we just don't handle code
492 // cache exhaustion here; we leave that for a later allocation that does not
493 // hold the CompiledICLocker.
494 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
495 }
496
497 VtableBlob::VtableBlob(const char* name, int size) :
498 BufferBlob(name, CodeBlobKind::Vtable, size) {
499 }
500
501 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
502 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
503
504 VtableBlob* blob = nullptr;
505 unsigned int size = sizeof(VtableBlob);
506 // align the size to CodeEntryAlignment
507 size = align_code_offset(size);
508 size += align_up(buffer_size, oopSize);
509 assert(name != nullptr, "must provide a name");
510 {
511 if (!CodeCache_lock->try_lock()) {
512 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
513 // IC transition to megamorphic, for which this stub will be needed. It is better to
514 // bail out the transition, and wait for a more opportune moment. Not only is it not
515 // worth waiting for the lock blockingly for the megamorphic transition, it might
516 // also result in a deadlock to blockingly wait, when concurrent class unloading is
517 // performed. At this point in time, the CompiledICLocker is taken, so we are not
518 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
519 // consistently taken in the opposite order. Bailing out results in an IC transition to
520 // the clean state instead, which will cause subsequent calls to retry the transitioning
521 // eventually.
522 return nullptr;
523 }
524 blob = new (size) VtableBlob(name, size);
525 CodeCache_lock->unlock();
526 }
527 // Track memory usage statistic after releasing CodeCache_lock
528 MemoryService::track_code_cache_memory_usage();
529
530 return blob;
531 }
532
533 //----------------------------------------------------------------------------------------------------
534 // Implementation of MethodHandlesAdapterBlob
535
536 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
537 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
538
539 MethodHandlesAdapterBlob* blob = nullptr;
540 unsigned int size = sizeof(MethodHandlesAdapterBlob);
541 // align the size to CodeEntryAlignment
542 size = CodeBlob::align_code_offset(size);
543 size += align_up(buffer_size, oopSize);
544 {
545 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
546 blob = new (size) MethodHandlesAdapterBlob(size);
547 if (blob == nullptr) {
548 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
549 }
550 }
551 // Track memory usage statistic after releasing CodeCache_lock
552 MemoryService::track_code_cache_memory_usage();
553
554 return blob;
555 }
556
557 //----------------------------------------------------------------------------------------------------
558 // Implementation of RuntimeStub
559
560 RuntimeStub::RuntimeStub(
561 const char* name,
562 CodeBuffer* cb,
563 int size,
564 int16_t frame_complete,
565 int frame_size,
566 OopMapSet* oop_maps,
567 bool caller_must_gc_arguments
568 )
569 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
570 frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
571 {
572 }
573
574 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
575 CodeBuffer* cb,
576 int16_t frame_complete,
577 int frame_size,
578 OopMapSet* oop_maps,
579 bool caller_must_gc_arguments,
580 bool alloc_fail_is_fatal)
581 {
582 RuntimeStub* stub = nullptr;
583 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
584 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
585 {
586 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
587 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
588 if (stub == nullptr) {
589 if (!alloc_fail_is_fatal) {
590 return nullptr;
591 }
592 fatal("Initial size of CodeCache is too small");
593 }
594 }
595
596 trace_new_stub(stub, "RuntimeStub - ", stub_name);
597
598 return stub;
599 }
600
601
602 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
603 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
604 }
605
606 // operator new shared by all singletons:
607 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
608 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
609 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
610 return p;
611 }
612
613
614 //----------------------------------------------------------------------------------------------------
615 // Implementation of DeoptimizationBlob
616
617 DeoptimizationBlob::DeoptimizationBlob(
618 CodeBuffer* cb,
619 int size,
620 OopMapSet* oop_maps,
621 int unpack_offset,
622 int unpack_with_exception_offset,
623 int unpack_with_reexecution_offset,
624 int frame_size
625 )
626 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
627 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
628 {
629 _unpack_offset = unpack_offset;
630 _unpack_with_exception = unpack_with_exception_offset;
631 _unpack_with_reexecution = unpack_with_reexecution_offset;
632 #ifdef COMPILER1
633 _unpack_with_exception_in_tls = -1;
634 #endif
635 }
636
637
638 DeoptimizationBlob* DeoptimizationBlob::create(
639 CodeBuffer* cb,
640 OopMapSet* oop_maps,
641 int unpack_offset,
642 int unpack_with_exception_offset,
643 int unpack_with_reexecution_offset,
644 int frame_size)
645 {
646 DeoptimizationBlob* blob = nullptr;
647 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
648 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
649 {
650 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
651 blob = new (size) DeoptimizationBlob(cb,
652 size,
653 oop_maps,
654 unpack_offset,
655 unpack_with_exception_offset,
656 unpack_with_reexecution_offset,
657 frame_size);
658 }
659
660 trace_new_stub(blob, "DeoptimizationBlob");
661
662 return blob;
663 }
664
665 #ifdef COMPILER2
666
667 //----------------------------------------------------------------------------------------------------
668 // Implementation of UncommonTrapBlob
669
670 UncommonTrapBlob::UncommonTrapBlob(
671 CodeBuffer* cb,
672 int size,
673 OopMapSet* oop_maps,
674 int frame_size
675 )
676 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
677 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
678 {}
679
680
681 UncommonTrapBlob* UncommonTrapBlob::create(
682 CodeBuffer* cb,
683 OopMapSet* oop_maps,
684 int frame_size)
685 {
686 UncommonTrapBlob* blob = nullptr;
687 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
688 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
689 {
690 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
691 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
692 }
693
694 trace_new_stub(blob, "UncommonTrapBlob");
695
696 return blob;
697 }
698
699 //----------------------------------------------------------------------------------------------------
700 // Implementation of ExceptionBlob
701
702 ExceptionBlob::ExceptionBlob(
703 CodeBuffer* cb,
704 int size,
705 OopMapSet* oop_maps,
706 int frame_size
707 )
708 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
709 size, sizeof(ExceptionBlob), frame_size, oop_maps)
710 {}
711
712
713 ExceptionBlob* ExceptionBlob::create(
714 CodeBuffer* cb,
715 OopMapSet* oop_maps,
716 int frame_size)
717 {
718 ExceptionBlob* blob = nullptr;
719 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
720 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
721 {
722 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
723 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
724 }
725
726 trace_new_stub(blob, "ExceptionBlob");
727
728 return blob;
729 }
730
731 #endif // COMPILER2
732
733 //----------------------------------------------------------------------------------------------------
734 // Implementation of SafepointBlob
735
736 SafepointBlob::SafepointBlob(
737 CodeBuffer* cb,
738 int size,
739 OopMapSet* oop_maps,
740 int frame_size
741 )
742 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
743 size, sizeof(SafepointBlob), frame_size, oop_maps)
744 {}
745
746
747 SafepointBlob* SafepointBlob::create(
748 CodeBuffer* cb,
749 OopMapSet* oop_maps,
750 int frame_size)
751 {
752 SafepointBlob* blob = nullptr;
753 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
754 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
755 {
756 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
757 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
758 }
759
760 trace_new_stub(blob, "SafepointBlob");
761
762 return blob;
763 }
764
765 //----------------------------------------------------------------------------------------------------
766 // Implementation of UpcallStub
767
768 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
769 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
770 CodeOffsets::frame_never_safe, 0 /* no frame size */,
771 /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
772 _receiver(receiver),
773 _frame_data_offset(frame_data_offset)
774 {
775 CodeCache::commit(this);
776 }
777
778 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
779 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
780 }
781
782 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
783 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
784
785 UpcallStub* blob = nullptr;
786 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
787 {
788 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
789 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
790 }
791 if (blob == nullptr) {
792 return nullptr; // caller must handle this
793 }
794
795 // Track memory usage statistic after releasing CodeCache_lock
796 MemoryService::track_code_cache_memory_usage();
797
798 trace_new_stub(blob, "UpcallStub - ", name);
799
800 return blob;
801 }
802
803 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
804 frame_data_for_frame(frame)->old_handles->oops_do(f);
805 }
806
807 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
808 return &frame_data_for_frame(frame)->jfa;
809 }
810
811 void UpcallStub::free(UpcallStub* blob) {
812 assert(blob != nullptr, "caller must check for nullptr");
813 JNIHandles::destroy_global(blob->receiver());
814 RuntimeBlob::free(blob);
815 }
816
817 //----------------------------------------------------------------------------------------------------
818 // Verification and printing
819
820 void CodeBlob::verify() {
821 if (is_nmethod()) {
822 as_nmethod()->verify();
823 }
824 }
825
826 void CodeBlob::print_on(outputStream* st) const {
827 vptr()->print_on(this, st);
828 }
829
830 void CodeBlob::print() const { print_on(tty); }
831
832 void CodeBlob::print_value_on(outputStream* st) const {
833 vptr()->print_value_on(this, st);
834 }
835
836 void CodeBlob::print_on_impl(outputStream* st) const {
837 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
838 st->print_cr("Framesize: %d", _frame_size);
839 }
840
841 void CodeBlob::print_value_on_impl(outputStream* st) const {
842 st->print_cr("[CodeBlob]");
843 }
844
845 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
846 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
847 if (is_nmethod()) {
848 as_nmethod()->print_nmethod_labels(stream, block_begin);
849 }
850 #endif
851
852 #ifndef PRODUCT
853 ptrdiff_t offset = block_begin - code_begin();
854 assert(offset >= 0, "Expecting non-negative offset!");
855 _asm_remarks.print(uint(offset), stream);
856 #endif
857 }
858
859 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
860 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
861 // the interpreter is generated into a buffer blob
862 InterpreterCodelet* i = Interpreter::codelet_containing(addr);
863 if (i != nullptr) {
864 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
865 i->print_on(st);
866 return;
867 }
868 if (Interpreter::contains(addr)) {
869 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
870 " (not bytecode specific)", p2i(addr));
871 return;
872 }
873 //
874 if (AdapterHandlerLibrary::contains(this)) {
875 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
876 AdapterHandlerLibrary::print_handler_on(st, this);
877 }
878 // the stubroutines are generated into a buffer blob
879 StubCodeDesc* d = StubCodeDesc::desc_for(addr);
880 if (d != nullptr) {
881 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
882 d->print_on(st);
883 st->cr();
884 return;
885 }
886 if (StubRoutines::contains(addr)) {
887 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
888 return;
889 }
890 VtableStub* v = VtableStubs::stub_containing(addr);
891 if (v != nullptr) {
892 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
893 v->print_on(st);
894 st->cr();
895 return;
896 }
897 }
898 if (is_nmethod()) {
899 nmethod* nm = (nmethod*)this;
900 ResourceMark rm;
901 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
902 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
903 if (verbose) {
904 st->print(" for ");
905 nm->method()->print_value_on(st);
906 }
907 st->cr();
908 if (verbose && st == tty) {
909 // verbose is only ever true when called from findpc in debug.cpp
910 nm->print_nmethod(true);
911 } else {
912 nm->print_on(st);
913 }
914 return;
915 }
916 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
917 print_on(st);
918 }
919
920 void BufferBlob::print_on_impl(outputStream* st) const {
921 RuntimeBlob::print_on_impl(st);
922 print_value_on_impl(st);
923 }
924
925 void BufferBlob::print_value_on_impl(outputStream* st) const {
926 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
927 }
928
929 void RuntimeStub::print_on_impl(outputStream* st) const {
930 ttyLocker ttyl;
931 RuntimeBlob::print_on_impl(st);
932 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
933 st->print_cr("%s", name());
934 Disassembler::decode((RuntimeBlob*)this, st);
935 }
936
937 void RuntimeStub::print_value_on_impl(outputStream* st) const {
938 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
939 }
940
941 void SingletonBlob::print_on_impl(outputStream* st) const {
942 ttyLocker ttyl;
943 RuntimeBlob::print_on_impl(st);
944 st->print_cr("%s", name());
945 Disassembler::decode((RuntimeBlob*)this, st);
946 }
947
948 void SingletonBlob::print_value_on_impl(outputStream* st) const {
949 st->print_cr("%s", name());
950 }
951
952 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
953 st->print_cr("Deoptimization (frame not available)");
954 }
955
956 void UpcallStub::print_on_impl(outputStream* st) const {
957 RuntimeBlob::print_on_impl(st);
958 print_value_on_impl(st);
959 st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
960 oop recv = JNIHandles::resolve(_receiver);
961 st->print("Receiver MH=");
962 recv->print_on(st);
963 Disassembler::decode((RuntimeBlob*)this, st);
964 }
965
966 void UpcallStub::print_value_on_impl(outputStream* st) const {
967 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name());
968 }