1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/aotCodeCache.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/relocInfo.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "cppstdlib/type_traits.hpp"
33 #include "interpreter/bytecode.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "jvm.h"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/heap.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "prims/forte.hpp"
41 #include "prims/jvmtiExport.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/interfaceSupport.inline.hpp"
44 #include "runtime/javaFrameAnchor.hpp"
45 #include "runtime/jniHandles.inline.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/stubCodeGenerator.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "runtime/vframe.hpp"
52 #include "services/memoryService.hpp"
53 #include "utilities/align.hpp"
54 #ifdef COMPILER1
55 #include "c1/c1_Runtime1.hpp"
56 #endif
57
58 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
59 // Check all "leaf" subclasses of CodeBlob class.
60
61 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod");
62 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs");
63 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs");
64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
65 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs");
66 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
67 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs");
68 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs");
69 #ifdef COMPILER2
70 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs");
71 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs");
72 #endif
73
74 // Add proxy vtables.
75 // We need only few for now - they are used only from prints.
76 const nmethod::Vptr nmethod::_vpntr;
77 const BufferBlob::Vptr BufferBlob::_vpntr;
78 const RuntimeStub::Vptr RuntimeStub::_vpntr;
79 const SingletonBlob::Vptr SingletonBlob::_vpntr;
80 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr;
81 #ifdef COMPILER2
82 const ExceptionBlob::Vptr ExceptionBlob::_vpntr;
83 #endif // COMPILER2
84 const UpcallStub::Vptr UpcallStub::_vpntr;
85
86 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
87 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
88 nullptr/* None */,
89 &nmethod::_vpntr,
90 &BufferBlob::_vpntr,
91 &AdapterBlob::_vpntr,
92 &VtableBlob::_vpntr,
93 &MethodHandlesAdapterBlob::_vpntr,
94 &RuntimeStub::_vpntr,
95 &DeoptimizationBlob::_vpntr,
96 &SafepointBlob::_vpntr,
97 #ifdef COMPILER2
98 &ExceptionBlob::_vpntr,
99 &UncommonTrapBlob::_vpntr,
100 #endif
101 &UpcallStub::_vpntr
102 };
103
104 return array[(size_t)kind];
105 }
106
107 const CodeBlob::Vptr* CodeBlob::vptr() const {
108 return vptr(_kind);
109 }
110
111 unsigned int CodeBlob::align_code_offset(int offset) {
112 // align the size to CodeEntryAlignment
113 int header_size = (int)CodeHeap::header_size();
114 return align_up(offset + header_size, CodeEntryAlignment) - header_size;
115 }
116
117 // This must be consistent with the CodeBlob constructor's layout actions.
118 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
119 // align the size to CodeEntryAlignment
120 unsigned int size = align_code_offset(header_size);
121 size += align_up(cb->total_content_size(), oopSize);
122 size += align_up(cb->total_oop_size(), oopSize);
123 return size;
124 }
125
126 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
127 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
128 int mutable_data_size) :
129 _oop_maps(nullptr), // will be set by set_oop_maps() call
130 _name(name),
131 _mutable_data(header_begin() + size), // default value is blob_end()
132 _size(size),
133 _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
134 _content_offset(CodeBlob::align_code_offset(header_size)),
135 _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
136 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
137 _frame_size(frame_size),
138 _mutable_data_size(mutable_data_size),
139 S390_ONLY(_ctable_offset(0) COMMA)
140 _header_size(header_size),
141 _frame_complete_offset(frame_complete_offset),
142 _kind(kind),
143 _caller_must_gc_arguments(caller_must_gc_arguments)
144 {
145 assert(is_aligned(_size, oopSize), "unaligned size");
146 assert(is_aligned(header_size, oopSize), "unaligned size");
147 assert(is_aligned(_relocation_size, oopSize), "unaligned size");
148 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
149 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
150 assert(code_end() == content_end(), "must be the same - see code_end()");
151 #ifdef COMPILER1
152 // probably wrong for tiered
153 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
154 #endif // COMPILER1
155
156 if (_mutable_data_size > 0) {
157 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
158 if (_mutable_data == nullptr) {
159 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
160 }
161 } else {
162 // We need unique and valid not null address
163 assert(_mutable_data == blob_end(), "sanity");
164 }
165
166 set_oop_maps(oop_maps);
167 }
168
169 // Simple CodeBlob used for simple BufferBlob.
170 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
171 _oop_maps(nullptr),
172 _name(name),
173 _mutable_data(header_begin() + size), // default value is blob_end()
174 _size(size),
175 _relocation_size(0),
176 _content_offset(CodeBlob::align_code_offset(header_size)),
177 _code_offset(_content_offset),
178 _data_offset(size),
179 _frame_size(0),
180 _mutable_data_size(0),
181 S390_ONLY(_ctable_offset(0) COMMA)
182 _header_size(header_size),
183 _frame_complete_offset(CodeOffsets::frame_never_safe),
184 _kind(kind),
185 _caller_must_gc_arguments(false)
186 {
187 assert(is_aligned(size, oopSize), "unaligned size");
188 assert(is_aligned(header_size, oopSize), "unaligned size");
189 assert(_mutable_data == blob_end(), "sanity");
190 }
191
192 #ifdef ASSERT
193 CodeBlob::~CodeBlob() {
194 assert(_oop_maps == nullptr || AOTCodeCache::is_address_in_aot_cache((address)_oop_maps), "Not flushed");
195 }
196 #endif
197
198 void CodeBlob::restore_mutable_data(address reloc_data) {
199 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
200 if (_mutable_data_size > 0) {
201 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
202 if (_mutable_data == nullptr) {
203 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
204 }
205 } else {
206 _mutable_data = blob_end(); // default value
207 }
208 if (_relocation_size > 0) {
209 assert(_mutable_data_size > 0, "relocation is part of mutable data section");
210 memcpy((address)relocation_begin(), reloc_data, relocation_size());
211 }
212 }
213
214 void CodeBlob::purge() {
215 assert(_mutable_data != nullptr, "should never be null");
216 if (_mutable_data != blob_end()) {
217 os::free(_mutable_data);
218 _mutable_data = blob_end(); // Valid not null address
219 _mutable_data_size = 0;
220 _relocation_size = 0;
221 }
222 if (_oop_maps != nullptr && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) {
223 delete _oop_maps;
224 _oop_maps = nullptr;
225 }
226 NOT_PRODUCT(_asm_remarks.clear());
227 NOT_PRODUCT(_dbg_strings.clear());
228 }
229
230 void CodeBlob::set_oop_maps(OopMapSet* p) {
231 // Danger Will Robinson! This method allocates a big
232 // chunk of memory, its your job to free it.
233 if (p != nullptr) {
234 _oop_maps = ImmutableOopMapSet::build_from(p);
235 } else {
236 _oop_maps = nullptr;
237 }
238 }
239
240 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
241 assert(_oop_maps != nullptr, "nope");
242 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
243 }
244
245 void CodeBlob::print_code_on(outputStream* st) {
246 ResourceMark m;
247 Disassembler::decode(this, st);
248 }
249
250 void CodeBlob::prepare_for_archiving_impl() {
251 set_name(nullptr);
252 _oop_maps = nullptr;
253 _mutable_data = nullptr;
254 #ifndef PRODUCT
255 asm_remarks().clear_ref();
256 dbg_strings().clear_ref();
257 #endif /* PRODUCT */
258 }
259
260 void CodeBlob::prepare_for_archiving() {
261 vptr(_kind)->prepare_for_archiving(this);
262 }
263
264 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
265 blob->copy_to(archive_buffer);
266 CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
267 archived_blob->prepare_for_archiving();
268 }
269
270 void CodeBlob::post_restore_impl() {
271 // Track memory usage statistic after releasing CodeCache_lock
272 MemoryService::track_code_cache_memory_usage();
273 }
274
275 void CodeBlob::post_restore() {
276 vptr(_kind)->post_restore(this);
277 }
278
279 CodeBlob* CodeBlob::restore(address code_cache_buffer,
280 const char* name,
281 address archived_reloc_data,
282 ImmutableOopMapSet* archived_oop_maps)
283 {
284 copy_to(code_cache_buffer);
285 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
286 code_blob->set_name(name);
287 code_blob->restore_mutable_data(archived_reloc_data);
288 code_blob->set_oop_maps(archived_oop_maps);
289 return code_blob;
290 }
291
292 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
293 const char* name,
294 address archived_reloc_data,
295 ImmutableOopMapSet* archived_oop_maps
296 )
297 {
298 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
299
300 CodeCache::gc_on_allocation();
301
302 CodeBlob* blob = nullptr;
303 unsigned int size = archived_blob->size();
304 {
305 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
306 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
307 if (code_cache_buffer != nullptr) {
308 blob = archived_blob->restore(code_cache_buffer,
309 name,
310 archived_reloc_data,
311 archived_oop_maps);
312 assert(blob != nullptr, "sanity check");
313 // Flush the code block
314 ICache::invalidate_range(blob->code_begin(), blob->code_size());
315 CodeCache::commit(blob); // Count adapters
316 }
317 }
318 if (blob != nullptr) {
319 blob->post_restore();
320 }
321 return blob;
322 }
323
324 //-----------------------------------------------------------------------------------------
325 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
326
327 RuntimeBlob::RuntimeBlob(
328 const char* name,
329 CodeBlobKind kind,
330 CodeBuffer* cb,
331 int size,
332 uint16_t header_size,
333 int16_t frame_complete,
334 int frame_size,
335 OopMapSet* oop_maps,
336 bool caller_must_gc_arguments)
337 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
338 align_up(cb->total_relocation_size(), oopSize))
339 {
340 cb->copy_code_and_locs_to(this);
341 }
342
343 void RuntimeBlob::free(RuntimeBlob* blob) {
344 assert(blob != nullptr, "caller must check for nullptr");
345 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
346 blob->purge();
347 {
348 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
349 CodeCache::free(blob);
350 }
351 // Track memory usage statistic after releasing CodeCache_lock
352 MemoryService::track_code_cache_memory_usage();
353 }
354
355 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
356 // Do not hold the CodeCache lock during name formatting.
357 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
358
359 if (stub != nullptr && (PrintStubCode ||
360 Forte::is_enabled() ||
361 JvmtiExport::should_post_dynamic_code_generated())) {
362 char stub_id[256];
363 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
364 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
365 if (PrintStubCode) {
366 ttyLocker ttyl;
367 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
368 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
369 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
370 Disassembler::decode(stub->code_begin(), stub->code_end(), tty
371 NOT_PRODUCT(COMMA &stub->asm_remarks()));
372 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
373 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
374 stub->oop_maps()->print();
375 }
376 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
377 tty->cr();
378 }
379 if (Forte::is_enabled()) {
380 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
381 }
382
383 if (JvmtiExport::should_post_dynamic_code_generated()) {
384 const char* stub_name = name2;
385 if (name2[0] == '\0') stub_name = name1;
386 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
387 }
388 }
389
390 // Track memory usage statistic after releasing CodeCache_lock
391 MemoryService::track_code_cache_memory_usage();
392 }
393
394 //----------------------------------------------------------------------------------------------------
395 // Implementation of BufferBlob
396
397 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
398 : RuntimeBlob(name, kind, size, header_size)
399 {}
400
401 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
402 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
403
404 BufferBlob* blob = nullptr;
405 unsigned int size = sizeof(BufferBlob);
406 // align the size to CodeEntryAlignment
407 size = CodeBlob::align_code_offset(size);
408 size += align_up(buffer_size, oopSize);
409 assert(name != nullptr, "must provide a name");
410 {
411 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
412 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
413 }
414 // Track memory usage statistic after releasing CodeCache_lock
415 MemoryService::track_code_cache_memory_usage();
416
417 return blob;
418 }
419
420
421 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size)
422 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
423 {}
424
425 // Used by gtest
426 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
427 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
428
429 BufferBlob* blob = nullptr;
430 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
431 assert(name != nullptr, "must provide a name");
432 {
433 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
434 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size);
435 }
436 // Track memory usage statistic after releasing CodeCache_lock
437 MemoryService::track_code_cache_memory_usage();
438
439 return blob;
440 }
441
442 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
443 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
444 }
445
446 void BufferBlob::free(BufferBlob *blob) {
447 RuntimeBlob::free(blob);
448 }
449
450
451 //----------------------------------------------------------------------------------------------------
452 // Implementation of AdapterBlob
453
454 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) :
455 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob)) {
456 assert(entry_offset[I2C] == 0, "sanity check");
457 #ifdef ASSERT
458 for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) {
459 // The entry is within the adapter blob or unset.
460 int offset = entry_offset[i];
461 assert((offset > 0 && offset < cb->insts()->size()) ||
462 (i >= C2I_No_Clinit_Check && offset == -1),
463 "invalid entry offset[%d] = 0x%x", i, offset);
464 }
465 #endif // ASSERT
466 _c2i_offset = entry_offset[C2I];
467 _c2i_unverified_offset = entry_offset[C2I_Unverified];
468 _c2i_no_clinit_check_offset = entry_offset[C2I_No_Clinit_Check];
469 CodeCache::commit(this);
470 }
471
472 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) {
473 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
474
475 CodeCache::gc_on_allocation();
476
477 AdapterBlob* blob = nullptr;
478 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
479 {
480 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
481 blob = new (size) AdapterBlob(size, cb, entry_offset);
482 }
483 // Track memory usage statistic after releasing CodeCache_lock
484 MemoryService::track_code_cache_memory_usage();
485
486 return blob;
487 }
488
489 //----------------------------------------------------------------------------------------------------
490 // Implementation of VtableBlob
491
492 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
493 // Handling of allocation failure stops compilation and prints a bunch of
494 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
495 // can be locked, and then re-locking the CodeCache_lock. That is not safe in
496 // this context as we hold the CompiledICLocker. So we just don't handle code
497 // cache exhaustion here; we leave that for a later allocation that does not
498 // hold the CompiledICLocker.
499 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
500 }
501
502 VtableBlob::VtableBlob(const char* name, int size) :
503 BufferBlob(name, CodeBlobKind::Vtable, size) {
504 }
505
506 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
507 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
508
509 VtableBlob* blob = nullptr;
510 unsigned int size = sizeof(VtableBlob);
511 // align the size to CodeEntryAlignment
512 size = align_code_offset(size);
513 size += align_up(buffer_size, oopSize);
514 assert(name != nullptr, "must provide a name");
515 {
516 if (!CodeCache_lock->try_lock()) {
517 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
518 // IC transition to megamorphic, for which this stub will be needed. It is better to
519 // bail out the transition, and wait for a more opportune moment. Not only is it not
520 // worth waiting for the lock blockingly for the megamorphic transition, it might
521 // also result in a deadlock to blockingly wait, when concurrent class unloading is
522 // performed. At this point in time, the CompiledICLocker is taken, so we are not
523 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
524 // consistently taken in the opposite order. Bailing out results in an IC transition to
525 // the clean state instead, which will cause subsequent calls to retry the transitioning
526 // eventually.
527 return nullptr;
528 }
529 blob = new (size) VtableBlob(name, size);
530 CodeCache_lock->unlock();
531 }
532 // Track memory usage statistic after releasing CodeCache_lock
533 MemoryService::track_code_cache_memory_usage();
534
535 return blob;
536 }
537
538 //----------------------------------------------------------------------------------------------------
539 // Implementation of MethodHandlesAdapterBlob
540
541 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
542 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
543
544 MethodHandlesAdapterBlob* blob = nullptr;
545 unsigned int size = sizeof(MethodHandlesAdapterBlob);
546 // align the size to CodeEntryAlignment
547 size = CodeBlob::align_code_offset(size);
548 size += align_up(buffer_size, oopSize);
549 {
550 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
551 blob = new (size) MethodHandlesAdapterBlob(size);
552 if (blob == nullptr) {
553 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
554 }
555 }
556 // Track memory usage statistic after releasing CodeCache_lock
557 MemoryService::track_code_cache_memory_usage();
558
559 return blob;
560 }
561
562 //----------------------------------------------------------------------------------------------------
563 // Implementation of RuntimeStub
564
565 RuntimeStub::RuntimeStub(
566 const char* name,
567 CodeBuffer* cb,
568 int size,
569 int16_t frame_complete,
570 int frame_size,
571 OopMapSet* oop_maps,
572 bool caller_must_gc_arguments
573 )
574 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
575 frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
576 {
577 }
578
579 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
580 CodeBuffer* cb,
581 int16_t frame_complete,
582 int frame_size,
583 OopMapSet* oop_maps,
584 bool caller_must_gc_arguments,
585 bool alloc_fail_is_fatal)
586 {
587 RuntimeStub* stub = nullptr;
588 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
589 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
590 {
591 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
592 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
593 if (stub == nullptr) {
594 if (!alloc_fail_is_fatal) {
595 return nullptr;
596 }
597 fatal("Initial size of CodeCache is too small");
598 }
599 }
600
601 trace_new_stub(stub, "RuntimeStub - ", stub_name);
602
603 return stub;
604 }
605
606
607 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
608 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
609 }
610
611 // operator new shared by all singletons:
612 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
613 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
614 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
615 return p;
616 }
617
618
619 //----------------------------------------------------------------------------------------------------
620 // Implementation of DeoptimizationBlob
621
622 DeoptimizationBlob::DeoptimizationBlob(
623 CodeBuffer* cb,
624 int size,
625 OopMapSet* oop_maps,
626 int unpack_offset,
627 int unpack_with_exception_offset,
628 int unpack_with_reexecution_offset,
629 int frame_size
630 )
631 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
632 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
633 {
634 _unpack_offset = unpack_offset;
635 _unpack_with_exception = unpack_with_exception_offset;
636 _unpack_with_reexecution = unpack_with_reexecution_offset;
637 #ifdef COMPILER1
638 _unpack_with_exception_in_tls = -1;
639 #endif
640 }
641
642
643 DeoptimizationBlob* DeoptimizationBlob::create(
644 CodeBuffer* cb,
645 OopMapSet* oop_maps,
646 int unpack_offset,
647 int unpack_with_exception_offset,
648 int unpack_with_reexecution_offset,
649 int frame_size)
650 {
651 DeoptimizationBlob* blob = nullptr;
652 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
653 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
654 {
655 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
656 blob = new (size) DeoptimizationBlob(cb,
657 size,
658 oop_maps,
659 unpack_offset,
660 unpack_with_exception_offset,
661 unpack_with_reexecution_offset,
662 frame_size);
663 }
664
665 trace_new_stub(blob, "DeoptimizationBlob");
666
667 return blob;
668 }
669
670 #ifdef COMPILER2
671
672 //----------------------------------------------------------------------------------------------------
673 // Implementation of UncommonTrapBlob
674
675 UncommonTrapBlob::UncommonTrapBlob(
676 CodeBuffer* cb,
677 int size,
678 OopMapSet* oop_maps,
679 int frame_size
680 )
681 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
682 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
683 {}
684
685
686 UncommonTrapBlob* UncommonTrapBlob::create(
687 CodeBuffer* cb,
688 OopMapSet* oop_maps,
689 int frame_size)
690 {
691 UncommonTrapBlob* blob = nullptr;
692 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
693 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
694 {
695 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
696 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
697 }
698
699 trace_new_stub(blob, "UncommonTrapBlob");
700
701 return blob;
702 }
703
704 //----------------------------------------------------------------------------------------------------
705 // Implementation of ExceptionBlob
706
707 ExceptionBlob::ExceptionBlob(
708 CodeBuffer* cb,
709 int size,
710 OopMapSet* oop_maps,
711 int frame_size
712 )
713 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
714 size, sizeof(ExceptionBlob), frame_size, oop_maps)
715 {}
716
717
718 ExceptionBlob* ExceptionBlob::create(
719 CodeBuffer* cb,
720 OopMapSet* oop_maps,
721 int frame_size)
722 {
723 ExceptionBlob* blob = nullptr;
724 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
725 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
726 {
727 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
728 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
729 }
730
731 trace_new_stub(blob, "ExceptionBlob");
732
733 return blob;
734 }
735
736 #endif // COMPILER2
737
738 //----------------------------------------------------------------------------------------------------
739 // Implementation of SafepointBlob
740
741 SafepointBlob::SafepointBlob(
742 CodeBuffer* cb,
743 int size,
744 OopMapSet* oop_maps,
745 int frame_size
746 )
747 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
748 size, sizeof(SafepointBlob), frame_size, oop_maps)
749 {}
750
751
752 SafepointBlob* SafepointBlob::create(
753 CodeBuffer* cb,
754 OopMapSet* oop_maps,
755 int frame_size)
756 {
757 SafepointBlob* blob = nullptr;
758 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
759 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
760 {
761 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
762 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
763 }
764
765 trace_new_stub(blob, "SafepointBlob");
766
767 return blob;
768 }
769
770 //----------------------------------------------------------------------------------------------------
771 // Implementation of UpcallStub
772
773 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
774 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
775 CodeOffsets::frame_never_safe, 0 /* no frame size */,
776 /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
777 _receiver(receiver),
778 _frame_data_offset(frame_data_offset)
779 {
780 CodeCache::commit(this);
781 }
782
783 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
784 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
785 }
786
787 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
788 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
789
790 UpcallStub* blob = nullptr;
791 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
792 {
793 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
794 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
795 }
796 if (blob == nullptr) {
797 return nullptr; // caller must handle this
798 }
799
800 // Track memory usage statistic after releasing CodeCache_lock
801 MemoryService::track_code_cache_memory_usage();
802
803 trace_new_stub(blob, "UpcallStub - ", name);
804
805 return blob;
806 }
807
808 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
809 frame_data_for_frame(frame)->old_handles->oops_do(f);
810 }
811
812 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
813 return &frame_data_for_frame(frame)->jfa;
814 }
815
816 void UpcallStub::free(UpcallStub* blob) {
817 assert(blob != nullptr, "caller must check for nullptr");
818 JNIHandles::destroy_global(blob->receiver());
819 RuntimeBlob::free(blob);
820 }
821
822 //----------------------------------------------------------------------------------------------------
823 // Verification and printing
824
825 void CodeBlob::verify() {
826 if (is_nmethod()) {
827 as_nmethod()->verify();
828 }
829 }
830
831 void CodeBlob::print_on(outputStream* st) const {
832 vptr()->print_on(this, st);
833 }
834
835 void CodeBlob::print() const { print_on(tty); }
836
837 void CodeBlob::print_value_on(outputStream* st) const {
838 vptr()->print_value_on(this, st);
839 }
840
841 void CodeBlob::print_on_impl(outputStream* st) const {
842 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
843 st->print_cr("Framesize: %d", _frame_size);
844 }
845
846 void CodeBlob::print_value_on_impl(outputStream* st) const {
847 st->print_cr("[CodeBlob]");
848 }
849
850 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
851 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
852 if (is_nmethod()) {
853 as_nmethod()->print_nmethod_labels(stream, block_begin);
854 }
855 #endif
856
857 #ifndef PRODUCT
858 ptrdiff_t offset = block_begin - code_begin();
859 assert(offset >= 0, "Expecting non-negative offset!");
860 _asm_remarks.print(uint(offset), stream);
861 #endif
862 }
863
864 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
865 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
866 // the interpreter is generated into a buffer blob
867 InterpreterCodelet* i = Interpreter::codelet_containing(addr);
868 if (i != nullptr) {
869 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
870 i->print_on(st);
871 return;
872 }
873 if (Interpreter::contains(addr)) {
874 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
875 " (not bytecode specific)", p2i(addr));
876 return;
877 }
878 //
879 if (AdapterHandlerLibrary::contains(this)) {
880 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
881 AdapterHandlerLibrary::print_handler_on(st, this);
882 }
883 // the stubroutines are generated into a buffer blob
884 StubCodeDesc* d = StubCodeDesc::desc_for(addr);
885 if (d != nullptr) {
886 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
887 d->print_on(st);
888 st->cr();
889 return;
890 }
891 if (StubRoutines::contains(addr)) {
892 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
893 return;
894 }
895 VtableStub* v = VtableStubs::stub_containing(addr);
896 if (v != nullptr) {
897 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
898 v->print_on(st);
899 st->cr();
900 return;
901 }
902 }
903 if (is_nmethod()) {
904 nmethod* nm = (nmethod*)this;
905 ResourceMark rm;
906 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
907 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
908 if (verbose) {
909 st->print(" for ");
910 nm->method()->print_value_on(st);
911 }
912 st->cr();
913 if (verbose && st == tty) {
914 // verbose is only ever true when called from findpc in debug.cpp
915 nm->print_nmethod(true);
916 } else {
917 nm->print_on(st);
918 nm->print_code_snippet(st, addr);
919 }
920 return;
921 }
922 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
923 print_on(st);
924 }
925
926 void BufferBlob::print_on_impl(outputStream* st) const {
927 RuntimeBlob::print_on_impl(st);
928 print_value_on_impl(st);
929 }
930
931 void BufferBlob::print_value_on_impl(outputStream* st) const {
932 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
933 }
934
935 void RuntimeStub::print_on_impl(outputStream* st) const {
936 ttyLocker ttyl;
937 RuntimeBlob::print_on_impl(st);
938 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
939 st->print_cr("%s", name());
940 Disassembler::decode((RuntimeBlob*)this, st);
941 }
942
943 void RuntimeStub::print_value_on_impl(outputStream* st) const {
944 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
945 }
946
947 void SingletonBlob::print_on_impl(outputStream* st) const {
948 ttyLocker ttyl;
949 RuntimeBlob::print_on_impl(st);
950 st->print_cr("%s", name());
951 Disassembler::decode((RuntimeBlob*)this, st);
952 }
953
954 void SingletonBlob::print_value_on_impl(outputStream* st) const {
955 st->print_cr("%s", name());
956 }
957
958 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
959 st->print_cr("Deoptimization (frame not available)");
960 }
961
962 void UpcallStub::print_on_impl(outputStream* st) const {
963 RuntimeBlob::print_on_impl(st);
964 print_value_on_impl(st);
965 st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
966 oop recv = JNIHandles::resolve(_receiver);
967 st->print("Receiver MH=");
968 recv->print_on(st);
969 Disassembler::decode((RuntimeBlob*)this, st);
970 }
971
972 void UpcallStub::print_value_on_impl(outputStream* st) const {
973 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name());
974 }