1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/relocInfo.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "cppstdlib/type_traits.hpp"
32 #include "interpreter/bytecode.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "jvm.h"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/heap.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/forte.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaFrameAnchor.hpp"
44 #include "runtime/jniHandles.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/safepoint.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/stubCodeGenerator.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "runtime/vframe.hpp"
51 #include "services/memoryService.hpp"
52 #include "utilities/align.hpp"
53 #ifdef COMPILER1
54 #include "c1/c1_Runtime1.hpp"
55 #endif
56
57 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
58 // Check all "leaf" subclasses of CodeBlob class.
59
60 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod");
61 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs");
62 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs");
63 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
64 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs");
65 static_assert(!std::is_polymorphic<BufferedInlineTypeBlob>::value, "no virtual methods are allowed in code blobs");
66 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
67 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs");
68 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs");
69 #ifdef COMPILER2
70 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs");
71 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs");
72 #endif
73
74 // Add proxy vtables.
75 // We need only few for now - they are used only from prints.
76 const nmethod::Vptr nmethod::_vpntr;
77 const BufferBlob::Vptr BufferBlob::_vpntr;
78 const RuntimeStub::Vptr RuntimeStub::_vpntr;
79 const SingletonBlob::Vptr SingletonBlob::_vpntr;
80 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr;
81 #ifdef COMPILER2
82 const ExceptionBlob::Vptr ExceptionBlob::_vpntr;
83 #endif // COMPILER2
84 const UpcallStub::Vptr UpcallStub::_vpntr;
85
86 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
87 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
88 nullptr/* None */,
89 &nmethod::_vpntr,
90 &BufferBlob::_vpntr,
91 &AdapterBlob::_vpntr,
92 &VtableBlob::_vpntr,
93 &MethodHandlesAdapterBlob::_vpntr,
94 &BufferedInlineTypeBlob::_vpntr,
95 &RuntimeStub::_vpntr,
96 &DeoptimizationBlob::_vpntr,
97 &SafepointBlob::_vpntr,
98 #ifdef COMPILER2
99 &ExceptionBlob::_vpntr,
100 &UncommonTrapBlob::_vpntr,
101 #endif
102 &UpcallStub::_vpntr
103 };
104
105 return array[(size_t)kind];
106 }
107
108 const CodeBlob::Vptr* CodeBlob::vptr() const {
109 return vptr(_kind);
110 }
111
112 unsigned int CodeBlob::align_code_offset(int offset) {
113 // align the size to CodeEntryAlignment
114 int header_size = (int)CodeHeap::header_size();
115 return align_up(offset + header_size, CodeEntryAlignment) - header_size;
116 }
117
118 // This must be consistent with the CodeBlob constructor's layout actions.
119 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
120 // align the size to CodeEntryAlignment
121 unsigned int size = align_code_offset(header_size);
122 size += align_up(cb->total_content_size(), oopSize);
123 size += align_up(cb->total_oop_size(), oopSize);
124 return size;
125 }
126
127 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
128 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
129 int mutable_data_size) :
130 _oop_maps(nullptr), // will be set by set_oop_maps() call
131 _name(name),
132 _mutable_data(header_begin() + size), // default value is blob_end()
133 _size(size),
134 _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
135 _content_offset(CodeBlob::align_code_offset(header_size)),
136 _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
137 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
138 _frame_size(frame_size),
139 _mutable_data_size(mutable_data_size),
140 S390_ONLY(_ctable_offset(0) COMMA)
141 _header_size(header_size),
142 _frame_complete_offset(frame_complete_offset),
143 _kind(kind),
144 _caller_must_gc_arguments(caller_must_gc_arguments)
145 {
146 assert(is_aligned(_size, oopSize), "unaligned size");
147 assert(is_aligned(header_size, oopSize), "unaligned size");
148 assert(is_aligned(_relocation_size, oopSize), "unaligned size");
149 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
150 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
151 assert(code_end() == content_end(), "must be the same - see code_end()");
152 #ifdef COMPILER1
153 // probably wrong for tiered
154 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
155 #endif // COMPILER1
156
157 if (_mutable_data_size > 0) {
158 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
159 if (_mutable_data == nullptr) {
160 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
161 }
162 } else {
163 // We need unique and valid not null address
164 assert(_mutable_data == blob_end(), "sanity");
165 }
166
167 set_oop_maps(oop_maps);
168 }
169
170 // Simple CodeBlob used for simple BufferBlob.
171 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
172 _oop_maps(nullptr),
173 _name(name),
174 _mutable_data(header_begin() + size), // default value is blob_end()
175 _size(size),
176 _relocation_size(0),
177 _content_offset(CodeBlob::align_code_offset(header_size)),
178 _code_offset(_content_offset),
179 _data_offset(size),
180 _frame_size(0),
181 _mutable_data_size(0),
182 S390_ONLY(_ctable_offset(0) COMMA)
183 _header_size(header_size),
184 _frame_complete_offset(CodeOffsets::frame_never_safe),
185 _kind(kind),
186 _caller_must_gc_arguments(false)
187 {
188 assert(is_aligned(size, oopSize), "unaligned size");
189 assert(is_aligned(header_size, oopSize), "unaligned size");
190 assert(_mutable_data == blob_end(), "sanity");
191 }
192
193 void CodeBlob::restore_mutable_data(address reloc_data) {
194 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
195 if (_mutable_data_size > 0) {
196 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
197 if (_mutable_data == nullptr) {
198 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
199 }
200 } else {
201 _mutable_data = blob_end(); // default value
202 }
203 if (_relocation_size > 0) {
204 assert(_mutable_data_size > 0, "relocation is part of mutable data section");
205 memcpy((address)relocation_begin(), reloc_data, relocation_size());
206 }
207 }
208
209 void CodeBlob::purge() {
210 assert(_mutable_data != nullptr, "should never be null");
211 if (_mutable_data != blob_end()) {
212 os::free(_mutable_data);
213 _mutable_data = blob_end(); // Valid not null address
214 _mutable_data_size = 0;
215 _relocation_size = 0;
216 }
217 if (_oop_maps != nullptr) {
218 delete _oop_maps;
219 _oop_maps = nullptr;
220 }
221 NOT_PRODUCT(_asm_remarks.clear());
222 NOT_PRODUCT(_dbg_strings.clear());
223 }
224
225 void CodeBlob::set_oop_maps(OopMapSet* p) {
226 // Danger Will Robinson! This method allocates a big
227 // chunk of memory, its your job to free it.
228 if (p != nullptr) {
229 _oop_maps = ImmutableOopMapSet::build_from(p);
230 } else {
231 _oop_maps = nullptr;
232 }
233 }
234
235 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
236 assert(_oop_maps != nullptr, "nope");
237 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
238 }
239
240 void CodeBlob::print_code_on(outputStream* st) {
241 ResourceMark m;
242 Disassembler::decode(this, st);
243 }
244
245 void CodeBlob::prepare_for_archiving_impl() {
246 set_name(nullptr);
247 _oop_maps = nullptr;
248 _mutable_data = nullptr;
249 #ifndef PRODUCT
250 asm_remarks().clear();
251 dbg_strings().clear();
252 #endif /* PRODUCT */
253 }
254
255 void CodeBlob::prepare_for_archiving() {
256 vptr(_kind)->prepare_for_archiving(this);
257 }
258
259 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
260 blob->copy_to(archive_buffer);
261 CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
262 archived_blob->prepare_for_archiving();
263 }
264
265 void CodeBlob::post_restore_impl() {
266 // Track memory usage statistic after releasing CodeCache_lock
267 MemoryService::track_code_cache_memory_usage();
268 }
269
270 void CodeBlob::post_restore() {
271 vptr(_kind)->post_restore(this);
272 }
273
274 CodeBlob* CodeBlob::restore(address code_cache_buffer,
275 const char* name,
276 address archived_reloc_data,
277 ImmutableOopMapSet* archived_oop_maps)
278 {
279 copy_to(code_cache_buffer);
280 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
281 code_blob->set_name(name);
282 code_blob->restore_mutable_data(archived_reloc_data);
283 code_blob->set_oop_maps(archived_oop_maps);
284 return code_blob;
285 }
286
287 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
288 const char* name,
289 address archived_reloc_data,
290 ImmutableOopMapSet* archived_oop_maps
291 )
292 {
293 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
294
295 CodeCache::gc_on_allocation();
296
297 CodeBlob* blob = nullptr;
298 unsigned int size = archived_blob->size();
299 {
300 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
301 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
302 if (code_cache_buffer != nullptr) {
303 blob = archived_blob->restore(code_cache_buffer,
304 name,
305 archived_reloc_data,
306 archived_oop_maps);
307 assert(blob != nullptr, "sanity check");
308
309 // Flush the code block
310 ICache::invalidate_range(blob->code_begin(), blob->code_size());
311 CodeCache::commit(blob); // Count adapters
312 }
313 }
314 if (blob != nullptr) {
315 blob->post_restore();
316 }
317 return blob;
318 }
319
320 //-----------------------------------------------------------------------------------------
321 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
322
323 RuntimeBlob::RuntimeBlob(
324 const char* name,
325 CodeBlobKind kind,
326 CodeBuffer* cb,
327 int size,
328 uint16_t header_size,
329 int16_t frame_complete,
330 int frame_size,
331 OopMapSet* oop_maps,
332 bool caller_must_gc_arguments)
333 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
334 align_up(cb->total_relocation_size(), oopSize))
335 {
336 cb->copy_code_and_locs_to(this);
337 }
338
339 void RuntimeBlob::free(RuntimeBlob* blob) {
340 assert(blob != nullptr, "caller must check for nullptr");
341 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
342 blob->purge();
343 {
344 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
345 CodeCache::free(blob);
346 }
347 // Track memory usage statistic after releasing CodeCache_lock
348 MemoryService::track_code_cache_memory_usage();
349 }
350
351 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
352 // Do not hold the CodeCache lock during name formatting.
353 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
354
355 if (stub != nullptr && (PrintStubCode ||
356 Forte::is_enabled() ||
357 JvmtiExport::should_post_dynamic_code_generated())) {
358 char stub_id[256];
359 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
360 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
361 if (PrintStubCode) {
362 ttyLocker ttyl;
363 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
364 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
365 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
366 Disassembler::decode(stub->code_begin(), stub->code_end(), tty
367 NOT_PRODUCT(COMMA &stub->asm_remarks()));
368 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
369 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
370 stub->oop_maps()->print();
371 }
372 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
373 tty->cr();
374 }
375 if (Forte::is_enabled()) {
376 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
377 }
378
379 if (JvmtiExport::should_post_dynamic_code_generated()) {
380 const char* stub_name = name2;
381 if (name2[0] == '\0') stub_name = name1;
382 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
383 }
384 }
385
386 // Track memory usage statistic after releasing CodeCache_lock
387 MemoryService::track_code_cache_memory_usage();
388 }
389
390 //----------------------------------------------------------------------------------------------------
391 // Implementation of BufferBlob
392
393 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
394 : RuntimeBlob(name, kind, size, header_size)
395 {}
396
397 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
398 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
399
400 BufferBlob* blob = nullptr;
401 unsigned int size = sizeof(BufferBlob);
402 // align the size to CodeEntryAlignment
403 size = CodeBlob::align_code_offset(size);
404 size += align_up(buffer_size, oopSize);
405 assert(name != nullptr, "must provide a name");
406 {
407 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
408 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
409 }
410 // Track memory usage statistic after releasing CodeCache_lock
411 MemoryService::track_code_cache_memory_usage();
412
413 return blob;
414 }
415
416
417 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size)
418 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
419 {}
420
421 // Used by gtest
422 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
423 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
424
425 BufferBlob* blob = nullptr;
426 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
427 assert(name != nullptr, "must provide a name");
428 {
429 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
430 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size, sizeof(BufferBlob));
431 }
432 // Track memory usage statistic after releasing CodeCache_lock
433 MemoryService::track_code_cache_memory_usage();
434
435 return blob;
436 }
437
438 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
439 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
440 }
441
442 void BufferBlob::free(BufferBlob *blob) {
443 RuntimeBlob::free(blob);
444 }
445
446 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
447 : RuntimeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
448 {}
449
450
451 //----------------------------------------------------------------------------------------------------
452 // Implementation of AdapterBlob
453
454 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT], int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
455 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob), frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
456 #ifdef ASSERT
457 assert(entry_offset[I2C] == 0, "sanity check");
458 for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) {
459 // The entry is within the adapter blob or unset.
460 int offset = entry_offset[i];
461 assert((offset > 0 && offset < cb->insts()->size()) ||
462 (i >= C2I_No_Clinit_Check && offset == -1),
463 "invalid entry offset[%d] = 0x%x", i, offset);
464 }
465 #endif // ASSERT
466 _c2i_offset = entry_offset[C2I];
467 _c2i_inline_offset = entry_offset[C2I_Inline];
468 _c2i_inline_ro_offset = entry_offset[C2I_Inline_RO];
469 _c2i_unverified_offset = entry_offset[C2I_Unverified];
470 _c2i_unverified_inline_offset = entry_offset[C2I_Unverified_Inline];
471 _c2i_no_clinit_check_offset = entry_offset[C2I_No_Clinit_Check];
472 CodeCache::commit(this);
473 }
474
475 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT], int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) {
476 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
477
478 CodeCache::gc_on_allocation();
479
480 AdapterBlob* blob = nullptr;
481 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
482 {
483 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
484 blob = new (size) AdapterBlob(size, cb, entry_offset, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
485 }
486 // Track memory usage statistic after releasing CodeCache_lock
487 MemoryService::track_code_cache_memory_usage();
488
489 return blob;
490 }
491
492 //----------------------------------------------------------------------------------------------------
493 // Implementation of VtableBlob
494
495 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
496 // Handling of allocation failure stops compilation and prints a bunch of
497 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
498 // can be locked, and then re-locking the CodeCache_lock. That is not safe in
499 // this context as we hold the CompiledICLocker. So we just don't handle code
500 // cache exhaustion here; we leave that for a later allocation that does not
501 // hold the CompiledICLocker.
502 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
503 }
504
505 VtableBlob::VtableBlob(const char* name, int size) :
506 BufferBlob(name, CodeBlobKind::Vtable, size) {
507 }
508
509 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
510 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
511
512 VtableBlob* blob = nullptr;
513 unsigned int size = sizeof(VtableBlob);
514 // align the size to CodeEntryAlignment
515 size = align_code_offset(size);
516 size += align_up(buffer_size, oopSize);
517 assert(name != nullptr, "must provide a name");
518 {
519 if (!CodeCache_lock->try_lock()) {
520 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
521 // IC transition to megamorphic, for which this stub will be needed. It is better to
522 // bail out the transition, and wait for a more opportune moment. Not only is it not
523 // worth waiting for the lock blockingly for the megamorphic transition, it might
524 // also result in a deadlock to blockingly wait, when concurrent class unloading is
525 // performed. At this point in time, the CompiledICLocker is taken, so we are not
526 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
527 // consistently taken in the opposite order. Bailing out results in an IC transition to
528 // the clean state instead, which will cause subsequent calls to retry the transitioning
529 // eventually.
530 return nullptr;
531 }
532 blob = new (size) VtableBlob(name, size);
533 CodeCache_lock->unlock();
534 }
535 // Track memory usage statistic after releasing CodeCache_lock
536 MemoryService::track_code_cache_memory_usage();
537
538 return blob;
539 }
540
541 //----------------------------------------------------------------------------------------------------
542 // Implementation of MethodHandlesAdapterBlob
543
544 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
545 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
546
547 MethodHandlesAdapterBlob* blob = nullptr;
548 unsigned int size = sizeof(MethodHandlesAdapterBlob);
549 // align the size to CodeEntryAlignment
550 size = CodeBlob::align_code_offset(size);
551 size += align_up(buffer_size, oopSize);
552 {
553 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
554 blob = new (size) MethodHandlesAdapterBlob(size);
555 if (blob == nullptr) {
556 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
557 }
558 }
559 // Track memory usage statistic after releasing CodeCache_lock
560 MemoryService::track_code_cache_memory_usage();
561
562 return blob;
563 }
564
565 //----------------------------------------------------------------------------------------------------
566 // Implementation of BufferedInlineTypeBlob
567 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) :
568 BufferBlob("buffered inline type", CodeBlobKind::BufferedInlineType, cb, size, sizeof(BufferedInlineTypeBlob)),
569 _pack_fields_off(pack_fields_off),
570 _pack_fields_jobject_off(pack_fields_jobject_off),
571 _unpack_fields_off(unpack_fields_off) {
572 CodeCache::commit(this);
573 }
574
575 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) {
576 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
577
578 BufferedInlineTypeBlob* blob = nullptr;
579 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob));
580 {
581 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
582 blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
583 }
584 // Track memory usage statistic after releasing CodeCache_lock
585 MemoryService::track_code_cache_memory_usage();
586
587 return blob;
588 }
589
590 //----------------------------------------------------------------------------------------------------
591 // Implementation of RuntimeStub
592
593 RuntimeStub::RuntimeStub(
594 const char* name,
595 CodeBuffer* cb,
596 int size,
597 int16_t frame_complete,
598 int frame_size,
599 OopMapSet* oop_maps,
600 bool caller_must_gc_arguments
601 )
602 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
603 frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
604 {
605 }
606
607 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
608 CodeBuffer* cb,
609 int16_t frame_complete,
610 int frame_size,
611 OopMapSet* oop_maps,
612 bool caller_must_gc_arguments,
613 bool alloc_fail_is_fatal)
614 {
615 RuntimeStub* stub = nullptr;
616 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
617 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
618 {
619 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
620 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
621 if (stub == nullptr) {
622 if (!alloc_fail_is_fatal) {
623 return nullptr;
624 }
625 fatal("Initial size of CodeCache is too small");
626 }
627 }
628
629 trace_new_stub(stub, "RuntimeStub - ", stub_name);
630
631 return stub;
632 }
633
634
635 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
636 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
637 }
638
639 // operator new shared by all singletons:
640 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
641 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
642 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
643 return p;
644 }
645
646
647 //----------------------------------------------------------------------------------------------------
648 // Implementation of DeoptimizationBlob
649
650 DeoptimizationBlob::DeoptimizationBlob(
651 CodeBuffer* cb,
652 int size,
653 OopMapSet* oop_maps,
654 int unpack_offset,
655 int unpack_with_exception_offset,
656 int unpack_with_reexecution_offset,
657 int frame_size
658 )
659 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
660 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
661 {
662 _unpack_offset = unpack_offset;
663 _unpack_with_exception = unpack_with_exception_offset;
664 _unpack_with_reexecution = unpack_with_reexecution_offset;
665 #ifdef COMPILER1
666 _unpack_with_exception_in_tls = -1;
667 #endif
668 }
669
670
671 DeoptimizationBlob* DeoptimizationBlob::create(
672 CodeBuffer* cb,
673 OopMapSet* oop_maps,
674 int unpack_offset,
675 int unpack_with_exception_offset,
676 int unpack_with_reexecution_offset,
677 int frame_size)
678 {
679 DeoptimizationBlob* blob = nullptr;
680 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
681 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
682 {
683 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
684 blob = new (size) DeoptimizationBlob(cb,
685 size,
686 oop_maps,
687 unpack_offset,
688 unpack_with_exception_offset,
689 unpack_with_reexecution_offset,
690 frame_size);
691 }
692
693 trace_new_stub(blob, "DeoptimizationBlob");
694
695 return blob;
696 }
697
698 #ifdef COMPILER2
699
700 //----------------------------------------------------------------------------------------------------
701 // Implementation of UncommonTrapBlob
702
703 UncommonTrapBlob::UncommonTrapBlob(
704 CodeBuffer* cb,
705 int size,
706 OopMapSet* oop_maps,
707 int frame_size
708 )
709 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
710 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
711 {}
712
713
714 UncommonTrapBlob* UncommonTrapBlob::create(
715 CodeBuffer* cb,
716 OopMapSet* oop_maps,
717 int frame_size)
718 {
719 UncommonTrapBlob* blob = nullptr;
720 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
721 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
722 {
723 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
724 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
725 }
726
727 trace_new_stub(blob, "UncommonTrapBlob");
728
729 return blob;
730 }
731
732 //----------------------------------------------------------------------------------------------------
733 // Implementation of ExceptionBlob
734
735 ExceptionBlob::ExceptionBlob(
736 CodeBuffer* cb,
737 int size,
738 OopMapSet* oop_maps,
739 int frame_size
740 )
741 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
742 size, sizeof(ExceptionBlob), frame_size, oop_maps)
743 {}
744
745
746 ExceptionBlob* ExceptionBlob::create(
747 CodeBuffer* cb,
748 OopMapSet* oop_maps,
749 int frame_size)
750 {
751 ExceptionBlob* blob = nullptr;
752 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
753 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
754 {
755 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
756 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
757 }
758
759 trace_new_stub(blob, "ExceptionBlob");
760
761 return blob;
762 }
763
764 #endif // COMPILER2
765
766 //----------------------------------------------------------------------------------------------------
767 // Implementation of SafepointBlob
768
769 SafepointBlob::SafepointBlob(
770 CodeBuffer* cb,
771 int size,
772 OopMapSet* oop_maps,
773 int frame_size
774 )
775 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
776 size, sizeof(SafepointBlob), frame_size, oop_maps)
777 {}
778
779
780 SafepointBlob* SafepointBlob::create(
781 CodeBuffer* cb,
782 OopMapSet* oop_maps,
783 int frame_size)
784 {
785 SafepointBlob* blob = nullptr;
786 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
787 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
788 {
789 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
790 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
791 }
792
793 trace_new_stub(blob, "SafepointBlob");
794
795 return blob;
796 }
797
798 //----------------------------------------------------------------------------------------------------
799 // Implementation of UpcallStub
800
801 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
802 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
803 CodeOffsets::frame_never_safe, 0 /* no frame size */,
804 /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
805 _receiver(receiver),
806 _frame_data_offset(frame_data_offset)
807 {
808 CodeCache::commit(this);
809 }
810
811 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
812 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
813 }
814
815 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
816 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
817
818 UpcallStub* blob = nullptr;
819 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
820 {
821 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
822 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
823 }
824 if (blob == nullptr) {
825 return nullptr; // caller must handle this
826 }
827
828 // Track memory usage statistic after releasing CodeCache_lock
829 MemoryService::track_code_cache_memory_usage();
830
831 trace_new_stub(blob, "UpcallStub - ", name);
832
833 return blob;
834 }
835
836 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
837 frame_data_for_frame(frame)->old_handles->oops_do(f);
838 }
839
840 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
841 return &frame_data_for_frame(frame)->jfa;
842 }
843
844 void UpcallStub::free(UpcallStub* blob) {
845 assert(blob != nullptr, "caller must check for nullptr");
846 JNIHandles::destroy_global(blob->receiver());
847 RuntimeBlob::free(blob);
848 }
849
850 //----------------------------------------------------------------------------------------------------
851 // Verification and printing
852
853 void CodeBlob::verify() {
854 if (is_nmethod()) {
855 as_nmethod()->verify();
856 }
857 }
858
859 void CodeBlob::print_on(outputStream* st) const {
860 vptr()->print_on(this, st);
861 }
862
863 void CodeBlob::print() const { print_on(tty); }
864
865 void CodeBlob::print_value_on(outputStream* st) const {
866 vptr()->print_value_on(this, st);
867 }
868
869 void CodeBlob::print_on_impl(outputStream* st) const {
870 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
871 st->print_cr("Framesize: %d", _frame_size);
872 }
873
874 void CodeBlob::print_value_on_impl(outputStream* st) const {
875 st->print_cr("[CodeBlob]");
876 }
877
878 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
879 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
880 if (is_nmethod()) {
881 as_nmethod()->print_nmethod_labels(stream, block_begin);
882 }
883 #endif
884
885 #ifndef PRODUCT
886 ptrdiff_t offset = block_begin - code_begin();
887 assert(offset >= 0, "Expecting non-negative offset!");
888 _asm_remarks.print(uint(offset), stream);
889 #endif
890 }
891
892 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
893 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
894 // the interpreter is generated into a buffer blob
895 InterpreterCodelet* i = Interpreter::codelet_containing(addr);
896 if (i != nullptr) {
897 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
898 i->print_on(st);
899 return;
900 }
901 if (Interpreter::contains(addr)) {
902 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
903 " (not bytecode specific)", p2i(addr));
904 return;
905 }
906 //
907 if (AdapterHandlerLibrary::contains(this)) {
908 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
909 AdapterHandlerLibrary::print_handler_on(st, this);
910 }
911 // the stubroutines are generated into a buffer blob
912 StubCodeDesc* d = StubCodeDesc::desc_for(addr);
913 if (d != nullptr) {
914 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
915 d->print_on(st);
916 st->cr();
917 return;
918 }
919 if (StubRoutines::contains(addr)) {
920 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
921 return;
922 }
923 VtableStub* v = VtableStubs::stub_containing(addr);
924 if (v != nullptr) {
925 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
926 v->print_on(st);
927 st->cr();
928 return;
929 }
930 }
931 if (is_nmethod()) {
932 nmethod* nm = (nmethod*)this;
933 ResourceMark rm;
934 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
935 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
936 if (verbose) {
937 st->print(" for ");
938 nm->method()->print_value_on(st);
939 }
940 st->cr();
941 if (verbose && st == tty) {
942 // verbose is only ever true when called from findpc in debug.cpp
943 nm->print_nmethod(true);
944 } else {
945 nm->print_on(st);
946 nm->print_code_snippet(st, addr);
947 }
948 return;
949 }
950 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
951 print_on(st);
952 }
953
954 void BufferBlob::print_on_impl(outputStream* st) const {
955 RuntimeBlob::print_on_impl(st);
956 print_value_on_impl(st);
957 }
958
959 void BufferBlob::print_value_on_impl(outputStream* st) const {
960 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
961 }
962
963 void RuntimeStub::print_on_impl(outputStream* st) const {
964 ttyLocker ttyl;
965 RuntimeBlob::print_on_impl(st);
966 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
967 st->print_cr("%s", name());
968 Disassembler::decode((RuntimeBlob*)this, st);
969 }
970
971 void RuntimeStub::print_value_on_impl(outputStream* st) const {
972 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
973 }
974
975 void SingletonBlob::print_on_impl(outputStream* st) const {
976 ttyLocker ttyl;
977 RuntimeBlob::print_on_impl(st);
978 st->print_cr("%s", name());
979 Disassembler::decode((RuntimeBlob*)this, st);
980 }
981
982 void SingletonBlob::print_value_on_impl(outputStream* st) const {
983 st->print_cr("%s", name());
984 }
985
986 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
987 st->print_cr("Deoptimization (frame not available)");
988 }
989
990 void UpcallStub::print_on_impl(outputStream* st) const {
991 RuntimeBlob::print_on_impl(st);
992 print_value_on_impl(st);
993 st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
994 oop recv = JNIHandles::resolve(_receiver);
995 st->print("Receiver MH=");
996 recv->print_on(st);
997 Disassembler::decode((RuntimeBlob*)this, st);
998 }
999
1000 void UpcallStub::print_value_on_impl(outputStream* st) const {
1001 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name());
1002 }