1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/relocInfo.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "interpreter/bytecode.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "jvm.h"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/heap.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/forte.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/javaFrameAnchor.hpp"
43 #include "runtime/jniHandles.inline.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/safepoint.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubCodeGenerator.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "runtime/vframe.hpp"
50 #include "services/memoryService.hpp"
51 #include "utilities/align.hpp"
52 #ifdef COMPILER1
53 #include "c1/c1_Runtime1.hpp"
54 #endif
55
56 #include <type_traits>
57
58 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
59 // Check all "leaf" subclasses of CodeBlob class.
60
61 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod");
62 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs");
63 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs");
64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
65 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs");
66 static_assert(!std::is_polymorphic<BufferedInlineTypeBlob>::value, "no virtual methods are allowed in code blobs");
67 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
68 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs");
69 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs");
70 #ifdef COMPILER2
71 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs");
72 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs");
73 #endif
74
75 // Add proxy vtables.
76 // We need only few for now - they are used only from prints.
77 const nmethod::Vptr nmethod::_vpntr;
78 const BufferBlob::Vptr BufferBlob::_vpntr;
79 const RuntimeStub::Vptr RuntimeStub::_vpntr;
80 const SingletonBlob::Vptr SingletonBlob::_vpntr;
81 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr;
82 #ifdef COMPILER2
83 const ExceptionBlob::Vptr ExceptionBlob::_vpntr;
84 #endif // COMPILER2
85 const UpcallStub::Vptr UpcallStub::_vpntr;
86
87 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
88 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
89 nullptr/* None */,
90 &nmethod::_vpntr,
91 &BufferBlob::_vpntr,
92 &AdapterBlob::_vpntr,
93 &VtableBlob::_vpntr,
94 &MethodHandlesAdapterBlob::_vpntr,
95 &BufferedInlineTypeBlob::_vpntr,
96 &RuntimeStub::_vpntr,
97 &DeoptimizationBlob::_vpntr,
98 &SafepointBlob::_vpntr,
99 #ifdef COMPILER2
100 &ExceptionBlob::_vpntr,
101 &UncommonTrapBlob::_vpntr,
102 #endif
103 &UpcallStub::_vpntr
104 };
105
106 return array[(size_t)kind];
107 }
108
109 const CodeBlob::Vptr* CodeBlob::vptr() const {
110 return vptr(_kind);
111 }
112
113 unsigned int CodeBlob::align_code_offset(int offset) {
114 // align the size to CodeEntryAlignment
115 int header_size = (int)CodeHeap::header_size();
116 return align_up(offset + header_size, CodeEntryAlignment) - header_size;
117 }
118
119 // This must be consistent with the CodeBlob constructor's layout actions.
120 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
121 // align the size to CodeEntryAlignment
122 unsigned int size = align_code_offset(header_size);
123 size += align_up(cb->total_content_size(), oopSize);
124 size += align_up(cb->total_oop_size(), oopSize);
125 return size;
126 }
127
128 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
129 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
130 int mutable_data_size) :
131 _oop_maps(nullptr), // will be set by set_oop_maps() call
132 _name(name),
133 _mutable_data(header_begin() + size), // default value is blob_end()
134 _size(size),
135 _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
136 _content_offset(CodeBlob::align_code_offset(header_size)),
137 _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
138 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
139 _frame_size(frame_size),
140 _mutable_data_size(mutable_data_size),
141 S390_ONLY(_ctable_offset(0) COMMA)
142 _header_size(header_size),
143 _frame_complete_offset(frame_complete_offset),
144 _kind(kind),
145 _caller_must_gc_arguments(caller_must_gc_arguments)
146 {
147 assert(is_aligned(_size, oopSize), "unaligned size");
148 assert(is_aligned(header_size, oopSize), "unaligned size");
149 assert(is_aligned(_relocation_size, oopSize), "unaligned size");
150 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
151 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
152 assert(code_end() == content_end(), "must be the same - see code_end()");
153 #ifdef COMPILER1
154 // probably wrong for tiered
155 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
156 #endif // COMPILER1
157
158 if (_mutable_data_size > 0) {
159 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
160 if (_mutable_data == nullptr) {
161 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
162 }
163 } else {
164 // We need unique and valid not null address
165 assert(_mutable_data == blob_end(), "sanity");
166 }
167
168 set_oop_maps(oop_maps);
169 }
170
171 // Simple CodeBlob used for simple BufferBlob.
172 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
173 _oop_maps(nullptr),
174 _name(name),
175 _mutable_data(header_begin() + size), // default value is blob_end()
176 _size(size),
177 _relocation_size(0),
178 _content_offset(CodeBlob::align_code_offset(header_size)),
179 _code_offset(_content_offset),
180 _data_offset(size),
181 _frame_size(0),
182 _mutable_data_size(0),
183 S390_ONLY(_ctable_offset(0) COMMA)
184 _header_size(header_size),
185 _frame_complete_offset(CodeOffsets::frame_never_safe),
186 _kind(kind),
187 _caller_must_gc_arguments(false)
188 {
189 assert(is_aligned(size, oopSize), "unaligned size");
190 assert(is_aligned(header_size, oopSize), "unaligned size");
191 assert(_mutable_data == blob_end(), "sanity");
192 }
193
194 void CodeBlob::restore_mutable_data(address reloc_data) {
195 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
196 if (_mutable_data_size > 0) {
197 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
198 if (_mutable_data == nullptr) {
199 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
200 }
201 } else {
202 _mutable_data = blob_end(); // default value
203 }
204 if (_relocation_size > 0) {
205 assert(_mutable_data_size > 0, "relocation is part of mutable data section");
206 memcpy((address)relocation_begin(), reloc_data, relocation_size());
207 }
208 }
209
210 void CodeBlob::purge() {
211 assert(_mutable_data != nullptr, "should never be null");
212 if (_mutable_data != blob_end()) {
213 os::free(_mutable_data);
214 _mutable_data = blob_end(); // Valid not null address
215 _mutable_data_size = 0;
216 _relocation_size = 0;
217 }
218 if (_oop_maps != nullptr) {
219 delete _oop_maps;
220 _oop_maps = nullptr;
221 }
222 NOT_PRODUCT(_asm_remarks.clear());
223 NOT_PRODUCT(_dbg_strings.clear());
224 }
225
226 void CodeBlob::set_oop_maps(OopMapSet* p) {
227 // Danger Will Robinson! This method allocates a big
228 // chunk of memory, its your job to free it.
229 if (p != nullptr) {
230 _oop_maps = ImmutableOopMapSet::build_from(p);
231 } else {
232 _oop_maps = nullptr;
233 }
234 }
235
236 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
237 assert(_oop_maps != nullptr, "nope");
238 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
239 }
240
241 void CodeBlob::print_code_on(outputStream* st) {
242 ResourceMark m;
243 Disassembler::decode(this, st);
244 }
245
246 void CodeBlob::prepare_for_archiving_impl() {
247 set_name(nullptr);
248 _oop_maps = nullptr;
249 _mutable_data = nullptr;
250 #ifndef PRODUCT
251 asm_remarks().clear();
252 dbg_strings().clear();
253 #endif /* PRODUCT */
254 }
255
256 void CodeBlob::prepare_for_archiving() {
257 vptr(_kind)->prepare_for_archiving(this);
258 }
259
260 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
261 blob->copy_to(archive_buffer);
262 CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
263 archived_blob->prepare_for_archiving();
264 }
265
266 void CodeBlob::post_restore_impl() {
267 // Track memory usage statistic after releasing CodeCache_lock
268 MemoryService::track_code_cache_memory_usage();
269 }
270
271 void CodeBlob::post_restore() {
272 vptr(_kind)->post_restore(this);
273 }
274
275 CodeBlob* CodeBlob::restore(address code_cache_buffer,
276 const char* name,
277 address archived_reloc_data,
278 ImmutableOopMapSet* archived_oop_maps)
279 {
280 copy_to(code_cache_buffer);
281 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
282 code_blob->set_name(name);
283 code_blob->restore_mutable_data(archived_reloc_data);
284 code_blob->set_oop_maps(archived_oop_maps);
285 return code_blob;
286 }
287
288 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
289 const char* name,
290 address archived_reloc_data,
291 ImmutableOopMapSet* archived_oop_maps
292 )
293 {
294 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
295
296 CodeCache::gc_on_allocation();
297
298 CodeBlob* blob = nullptr;
299 unsigned int size = archived_blob->size();
300 {
301 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
302 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
303 if (code_cache_buffer != nullptr) {
304 blob = archived_blob->restore(code_cache_buffer,
305 name,
306 archived_reloc_data,
307 archived_oop_maps);
308 assert(blob != nullptr, "sanity check");
309
310 // Flush the code block
311 ICache::invalidate_range(blob->code_begin(), blob->code_size());
312 CodeCache::commit(blob); // Count adapters
313 }
314 }
315 if (blob != nullptr) {
316 blob->post_restore();
317 }
318 return blob;
319 }
320
321 //-----------------------------------------------------------------------------------------
322 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
323
324 RuntimeBlob::RuntimeBlob(
325 const char* name,
326 CodeBlobKind kind,
327 CodeBuffer* cb,
328 int size,
329 uint16_t header_size,
330 int16_t frame_complete,
331 int frame_size,
332 OopMapSet* oop_maps,
333 bool caller_must_gc_arguments)
334 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
335 align_up(cb->total_relocation_size(), oopSize))
336 {
337 cb->copy_code_and_locs_to(this);
338 }
339
340 void RuntimeBlob::free(RuntimeBlob* blob) {
341 assert(blob != nullptr, "caller must check for nullptr");
342 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
343 blob->purge();
344 {
345 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
346 CodeCache::free(blob);
347 }
348 // Track memory usage statistic after releasing CodeCache_lock
349 MemoryService::track_code_cache_memory_usage();
350 }
351
352 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
353 // Do not hold the CodeCache lock during name formatting.
354 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
355
356 if (stub != nullptr && (PrintStubCode ||
357 Forte::is_enabled() ||
358 JvmtiExport::should_post_dynamic_code_generated())) {
359 char stub_id[256];
360 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
361 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
362 if (PrintStubCode) {
363 ttyLocker ttyl;
364 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
365 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
366 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
367 Disassembler::decode(stub->code_begin(), stub->code_end(), tty
368 NOT_PRODUCT(COMMA &stub->asm_remarks()));
369 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
370 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
371 stub->oop_maps()->print();
372 }
373 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
374 tty->cr();
375 }
376 if (Forte::is_enabled()) {
377 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
378 }
379
380 if (JvmtiExport::should_post_dynamic_code_generated()) {
381 const char* stub_name = name2;
382 if (name2[0] == '\0') stub_name = name1;
383 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
384 }
385 }
386
387 // Track memory usage statistic after releasing CodeCache_lock
388 MemoryService::track_code_cache_memory_usage();
389 }
390
391 //----------------------------------------------------------------------------------------------------
392 // Implementation of BufferBlob
393
394 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
395 : RuntimeBlob(name, kind, size, header_size)
396 {}
397
398 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
399 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
400
401 BufferBlob* blob = nullptr;
402 unsigned int size = sizeof(BufferBlob);
403 // align the size to CodeEntryAlignment
404 size = CodeBlob::align_code_offset(size);
405 size += align_up(buffer_size, oopSize);
406 assert(name != nullptr, "must provide a name");
407 {
408 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
409 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
410 }
411 // Track memory usage statistic after releasing CodeCache_lock
412 MemoryService::track_code_cache_memory_usage();
413
414 return blob;
415 }
416
417
418 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size)
419 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
420 {}
421
422 // Used by gtest
423 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
424 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
425
426 BufferBlob* blob = nullptr;
427 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
428 assert(name != nullptr, "must provide a name");
429 {
430 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
431 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size, sizeof(BufferBlob));
432 }
433 // Track memory usage statistic after releasing CodeCache_lock
434 MemoryService::track_code_cache_memory_usage();
435
436 return blob;
437 }
438
439 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
440 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
441 }
442
443 void BufferBlob::free(BufferBlob *blob) {
444 RuntimeBlob::free(blob);
445 }
446
447 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
448 : RuntimeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
449 {}
450
451
452 //----------------------------------------------------------------------------------------------------
453 // Implementation of AdapterBlob
454
455 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT], int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
456 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob), frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
457 #ifdef ASSERT
458 assert(entry_offset[I2C] == 0, "sanity check");
459 for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) {
460 // The entry is within the adapter blob or unset.
461 int offset = entry_offset[i];
462 assert((offset > 0 && offset < cb->insts()->size()) ||
463 (i >= C2I_No_Clinit_Check && offset == -1),
464 "invalid entry offset[%d] = 0x%x", i, offset);
465 }
466 #endif // ASSERT
467 _c2i_offset = entry_offset[C2I];
468 _c2i_inline_offset = entry_offset[C2I_Inline];
469 _c2i_inline_ro_offset = entry_offset[C2I_Inline_RO];
470 _c2i_unverified_offset = entry_offset[C2I_Unverified];
471 _c2i_unverified_inline_offset = entry_offset[C2I_Unverified_Inline];
472 _c2i_no_clinit_check_offset = entry_offset[C2I_No_Clinit_Check];
473 CodeCache::commit(this);
474 }
475
476 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT], int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) {
477 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
478
479 CodeCache::gc_on_allocation();
480
481 AdapterBlob* blob = nullptr;
482 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
483 {
484 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
485 blob = new (size) AdapterBlob(size, cb, entry_offset, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
486 }
487 // Track memory usage statistic after releasing CodeCache_lock
488 MemoryService::track_code_cache_memory_usage();
489
490 return blob;
491 }
492
493 //----------------------------------------------------------------------------------------------------
494 // Implementation of VtableBlob
495
496 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
497 // Handling of allocation failure stops compilation and prints a bunch of
498 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
499 // can be locked, and then re-locking the CodeCache_lock. That is not safe in
500 // this context as we hold the CompiledICLocker. So we just don't handle code
501 // cache exhaustion here; we leave that for a later allocation that does not
502 // hold the CompiledICLocker.
503 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
504 }
505
506 VtableBlob::VtableBlob(const char* name, int size) :
507 BufferBlob(name, CodeBlobKind::Vtable, size) {
508 }
509
510 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
511 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
512
513 VtableBlob* blob = nullptr;
514 unsigned int size = sizeof(VtableBlob);
515 // align the size to CodeEntryAlignment
516 size = align_code_offset(size);
517 size += align_up(buffer_size, oopSize);
518 assert(name != nullptr, "must provide a name");
519 {
520 if (!CodeCache_lock->try_lock()) {
521 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
522 // IC transition to megamorphic, for which this stub will be needed. It is better to
523 // bail out the transition, and wait for a more opportune moment. Not only is it not
524 // worth waiting for the lock blockingly for the megamorphic transition, it might
525 // also result in a deadlock to blockingly wait, when concurrent class unloading is
526 // performed. At this point in time, the CompiledICLocker is taken, so we are not
527 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
528 // consistently taken in the opposite order. Bailing out results in an IC transition to
529 // the clean state instead, which will cause subsequent calls to retry the transitioning
530 // eventually.
531 return nullptr;
532 }
533 blob = new (size) VtableBlob(name, size);
534 CodeCache_lock->unlock();
535 }
536 // Track memory usage statistic after releasing CodeCache_lock
537 MemoryService::track_code_cache_memory_usage();
538
539 return blob;
540 }
541
542 //----------------------------------------------------------------------------------------------------
543 // Implementation of MethodHandlesAdapterBlob
544
545 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
546 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
547
548 MethodHandlesAdapterBlob* blob = nullptr;
549 unsigned int size = sizeof(MethodHandlesAdapterBlob);
550 // align the size to CodeEntryAlignment
551 size = CodeBlob::align_code_offset(size);
552 size += align_up(buffer_size, oopSize);
553 {
554 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
555 blob = new (size) MethodHandlesAdapterBlob(size);
556 if (blob == nullptr) {
557 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
558 }
559 }
560 // Track memory usage statistic after releasing CodeCache_lock
561 MemoryService::track_code_cache_memory_usage();
562
563 return blob;
564 }
565
566 //----------------------------------------------------------------------------------------------------
567 // Implementation of BufferedInlineTypeBlob
568 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) :
569 BufferBlob("buffered inline type", CodeBlobKind::BufferedInlineType, cb, size, sizeof(BufferedInlineTypeBlob)),
570 _pack_fields_off(pack_fields_off),
571 _pack_fields_jobject_off(pack_fields_jobject_off),
572 _unpack_fields_off(unpack_fields_off) {
573 CodeCache::commit(this);
574 }
575
576 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) {
577 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
578
579 BufferedInlineTypeBlob* blob = nullptr;
580 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob));
581 {
582 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
583 blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
584 }
585 // Track memory usage statistic after releasing CodeCache_lock
586 MemoryService::track_code_cache_memory_usage();
587
588 return blob;
589 }
590
591 //----------------------------------------------------------------------------------------------------
592 // Implementation of RuntimeStub
593
594 RuntimeStub::RuntimeStub(
595 const char* name,
596 CodeBuffer* cb,
597 int size,
598 int16_t frame_complete,
599 int frame_size,
600 OopMapSet* oop_maps,
601 bool caller_must_gc_arguments
602 )
603 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
604 frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
605 {
606 }
607
608 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
609 CodeBuffer* cb,
610 int16_t frame_complete,
611 int frame_size,
612 OopMapSet* oop_maps,
613 bool caller_must_gc_arguments,
614 bool alloc_fail_is_fatal)
615 {
616 RuntimeStub* stub = nullptr;
617 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
618 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
619 {
620 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
621 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
622 if (stub == nullptr) {
623 if (!alloc_fail_is_fatal) {
624 return nullptr;
625 }
626 fatal("Initial size of CodeCache is too small");
627 }
628 }
629
630 trace_new_stub(stub, "RuntimeStub - ", stub_name);
631
632 return stub;
633 }
634
635
636 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
637 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
638 }
639
640 // operator new shared by all singletons:
641 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
642 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
643 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
644 return p;
645 }
646
647
648 //----------------------------------------------------------------------------------------------------
649 // Implementation of DeoptimizationBlob
650
651 DeoptimizationBlob::DeoptimizationBlob(
652 CodeBuffer* cb,
653 int size,
654 OopMapSet* oop_maps,
655 int unpack_offset,
656 int unpack_with_exception_offset,
657 int unpack_with_reexecution_offset,
658 int frame_size
659 )
660 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
661 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
662 {
663 _unpack_offset = unpack_offset;
664 _unpack_with_exception = unpack_with_exception_offset;
665 _unpack_with_reexecution = unpack_with_reexecution_offset;
666 #ifdef COMPILER1
667 _unpack_with_exception_in_tls = -1;
668 #endif
669 }
670
671
672 DeoptimizationBlob* DeoptimizationBlob::create(
673 CodeBuffer* cb,
674 OopMapSet* oop_maps,
675 int unpack_offset,
676 int unpack_with_exception_offset,
677 int unpack_with_reexecution_offset,
678 int frame_size)
679 {
680 DeoptimizationBlob* blob = nullptr;
681 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
682 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
683 {
684 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
685 blob = new (size) DeoptimizationBlob(cb,
686 size,
687 oop_maps,
688 unpack_offset,
689 unpack_with_exception_offset,
690 unpack_with_reexecution_offset,
691 frame_size);
692 }
693
694 trace_new_stub(blob, "DeoptimizationBlob");
695
696 return blob;
697 }
698
699 #ifdef COMPILER2
700
701 //----------------------------------------------------------------------------------------------------
702 // Implementation of UncommonTrapBlob
703
704 UncommonTrapBlob::UncommonTrapBlob(
705 CodeBuffer* cb,
706 int size,
707 OopMapSet* oop_maps,
708 int frame_size
709 )
710 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
711 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
712 {}
713
714
715 UncommonTrapBlob* UncommonTrapBlob::create(
716 CodeBuffer* cb,
717 OopMapSet* oop_maps,
718 int frame_size)
719 {
720 UncommonTrapBlob* blob = nullptr;
721 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
722 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
723 {
724 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
725 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
726 }
727
728 trace_new_stub(blob, "UncommonTrapBlob");
729
730 return blob;
731 }
732
733 //----------------------------------------------------------------------------------------------------
734 // Implementation of ExceptionBlob
735
736 ExceptionBlob::ExceptionBlob(
737 CodeBuffer* cb,
738 int size,
739 OopMapSet* oop_maps,
740 int frame_size
741 )
742 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
743 size, sizeof(ExceptionBlob), frame_size, oop_maps)
744 {}
745
746
747 ExceptionBlob* ExceptionBlob::create(
748 CodeBuffer* cb,
749 OopMapSet* oop_maps,
750 int frame_size)
751 {
752 ExceptionBlob* blob = nullptr;
753 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
754 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
755 {
756 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
757 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
758 }
759
760 trace_new_stub(blob, "ExceptionBlob");
761
762 return blob;
763 }
764
765 #endif // COMPILER2
766
767 //----------------------------------------------------------------------------------------------------
768 // Implementation of SafepointBlob
769
770 SafepointBlob::SafepointBlob(
771 CodeBuffer* cb,
772 int size,
773 OopMapSet* oop_maps,
774 int frame_size
775 )
776 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
777 size, sizeof(SafepointBlob), frame_size, oop_maps)
778 {}
779
780
781 SafepointBlob* SafepointBlob::create(
782 CodeBuffer* cb,
783 OopMapSet* oop_maps,
784 int frame_size)
785 {
786 SafepointBlob* blob = nullptr;
787 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
788 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
789 {
790 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
791 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
792 }
793
794 trace_new_stub(blob, "SafepointBlob");
795
796 return blob;
797 }
798
799 //----------------------------------------------------------------------------------------------------
800 // Implementation of UpcallStub
801
802 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
803 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
804 CodeOffsets::frame_never_safe, 0 /* no frame size */,
805 /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
806 _receiver(receiver),
807 _frame_data_offset(frame_data_offset)
808 {
809 CodeCache::commit(this);
810 }
811
812 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
813 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
814 }
815
816 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
817 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
818
819 UpcallStub* blob = nullptr;
820 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
821 {
822 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
823 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
824 }
825 if (blob == nullptr) {
826 return nullptr; // caller must handle this
827 }
828
829 // Track memory usage statistic after releasing CodeCache_lock
830 MemoryService::track_code_cache_memory_usage();
831
832 trace_new_stub(blob, "UpcallStub - ", name);
833
834 return blob;
835 }
836
837 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
838 frame_data_for_frame(frame)->old_handles->oops_do(f);
839 }
840
841 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
842 return &frame_data_for_frame(frame)->jfa;
843 }
844
845 void UpcallStub::free(UpcallStub* blob) {
846 assert(blob != nullptr, "caller must check for nullptr");
847 JNIHandles::destroy_global(blob->receiver());
848 RuntimeBlob::free(blob);
849 }
850
851 //----------------------------------------------------------------------------------------------------
852 // Verification and printing
853
854 void CodeBlob::verify() {
855 if (is_nmethod()) {
856 as_nmethod()->verify();
857 }
858 }
859
860 void CodeBlob::print_on(outputStream* st) const {
861 vptr()->print_on(this, st);
862 }
863
864 void CodeBlob::print() const { print_on(tty); }
865
866 void CodeBlob::print_value_on(outputStream* st) const {
867 vptr()->print_value_on(this, st);
868 }
869
870 void CodeBlob::print_on_impl(outputStream* st) const {
871 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
872 st->print_cr("Framesize: %d", _frame_size);
873 }
874
875 void CodeBlob::print_value_on_impl(outputStream* st) const {
876 st->print_cr("[CodeBlob]");
877 }
878
879 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
880 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
881 if (is_nmethod()) {
882 as_nmethod()->print_nmethod_labels(stream, block_begin);
883 }
884 #endif
885
886 #ifndef PRODUCT
887 ptrdiff_t offset = block_begin - code_begin();
888 assert(offset >= 0, "Expecting non-negative offset!");
889 _asm_remarks.print(uint(offset), stream);
890 #endif
891 }
892
893 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
894 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
895 // the interpreter is generated into a buffer blob
896 InterpreterCodelet* i = Interpreter::codelet_containing(addr);
897 if (i != nullptr) {
898 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
899 i->print_on(st);
900 return;
901 }
902 if (Interpreter::contains(addr)) {
903 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
904 " (not bytecode specific)", p2i(addr));
905 return;
906 }
907 //
908 if (AdapterHandlerLibrary::contains(this)) {
909 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
910 AdapterHandlerLibrary::print_handler_on(st, this);
911 }
912 // the stubroutines are generated into a buffer blob
913 StubCodeDesc* d = StubCodeDesc::desc_for(addr);
914 if (d != nullptr) {
915 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
916 d->print_on(st);
917 st->cr();
918 return;
919 }
920 if (StubRoutines::contains(addr)) {
921 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
922 return;
923 }
924 VtableStub* v = VtableStubs::stub_containing(addr);
925 if (v != nullptr) {
926 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
927 v->print_on(st);
928 st->cr();
929 return;
930 }
931 }
932 if (is_nmethod()) {
933 nmethod* nm = (nmethod*)this;
934 ResourceMark rm;
935 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
936 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
937 if (verbose) {
938 st->print(" for ");
939 nm->method()->print_value_on(st);
940 }
941 st->cr();
942 if (verbose && st == tty) {
943 // verbose is only ever true when called from findpc in debug.cpp
944 nm->print_nmethod(true);
945 } else {
946 nm->print_on(st);
947 }
948 return;
949 }
950 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
951 print_on(st);
952 }
953
954 void BufferBlob::print_on_impl(outputStream* st) const {
955 RuntimeBlob::print_on_impl(st);
956 print_value_on_impl(st);
957 }
958
959 void BufferBlob::print_value_on_impl(outputStream* st) const {
960 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
961 }
962
963 void RuntimeStub::print_on_impl(outputStream* st) const {
964 ttyLocker ttyl;
965 RuntimeBlob::print_on_impl(st);
966 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
967 st->print_cr("%s", name());
968 Disassembler::decode((RuntimeBlob*)this, st);
969 }
970
971 void RuntimeStub::print_value_on_impl(outputStream* st) const {
972 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
973 }
974
975 void SingletonBlob::print_on_impl(outputStream* st) const {
976 ttyLocker ttyl;
977 RuntimeBlob::print_on_impl(st);
978 st->print_cr("%s", name());
979 Disassembler::decode((RuntimeBlob*)this, st);
980 }
981
982 void SingletonBlob::print_value_on_impl(outputStream* st) const {
983 st->print_cr("%s", name());
984 }
985
986 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
987 st->print_cr("Deoptimization (frame not available)");
988 }
989
990 void UpcallStub::print_on_impl(outputStream* st) const {
991 RuntimeBlob::print_on_impl(st);
992 print_value_on_impl(st);
993 st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
994 oop recv = JNIHandles::resolve(_receiver);
995 st->print("Receiver MH=");
996 recv->print_on(st);
997 Disassembler::decode((RuntimeBlob*)this, st);
998 }
999
1000 void UpcallStub::print_value_on_impl(outputStream* st) const {
1001 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name());
1002 }