1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/aotCodeCache.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/relocInfo.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "interpreter/bytecode.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "jvm.h"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/heap.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/forte.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaFrameAnchor.hpp"
44 #include "runtime/jniHandles.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/safepoint.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/stubCodeGenerator.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "runtime/vframe.hpp"
51 #include "services/memoryService.hpp"
52 #include "utilities/align.hpp"
53 #ifdef COMPILER1
54 #include "c1/c1_Runtime1.hpp"
55 #endif
56
57 #include <type_traits>
58
59 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
60 // Check all "leaf" subclasses of CodeBlob class.
61
62 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod");
63 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs");
64 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs");
65 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
66 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs");
67 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
68 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs");
69 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs");
70 #ifdef COMPILER2
71 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs");
72 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs");
73 #endif
74
75 // Add proxy vtables.
76 // We need only few for now - they are used only from prints.
77 const nmethod::Vptr nmethod::_vpntr;
78 const BufferBlob::Vptr BufferBlob::_vpntr;
79 const RuntimeStub::Vptr RuntimeStub::_vpntr;
80 const SingletonBlob::Vptr SingletonBlob::_vpntr;
81 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr;
82 #ifdef COMPILER2
83 const ExceptionBlob::Vptr ExceptionBlob::_vpntr;
84 #endif // COMPILER2
85 const UpcallStub::Vptr UpcallStub::_vpntr;
86
87 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
88 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
89 nullptr/* None */,
90 &nmethod::_vpntr,
91 &BufferBlob::_vpntr,
92 &AdapterBlob::_vpntr,
93 &VtableBlob::_vpntr,
94 &MethodHandlesAdapterBlob::_vpntr,
95 &RuntimeStub::_vpntr,
96 &DeoptimizationBlob::_vpntr,
97 &SafepointBlob::_vpntr,
98 #ifdef COMPILER2
99 &ExceptionBlob::_vpntr,
100 &UncommonTrapBlob::_vpntr,
101 #endif
102 &UpcallStub::_vpntr
103 };
104
105 return array[(size_t)kind];
106 }
107
108 const CodeBlob::Vptr* CodeBlob::vptr() const {
109 return vptr(_kind);
110 }
111
112 unsigned int CodeBlob::align_code_offset(int offset) {
113 // align the size to CodeEntryAlignment
114 int header_size = (int)CodeHeap::header_size();
115 return align_up(offset + header_size, CodeEntryAlignment) - header_size;
116 }
117
118 // This must be consistent with the CodeBlob constructor's layout actions.
119 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
120 // align the size to CodeEntryAlignment
121 unsigned int size = align_code_offset(header_size);
122 size += align_up(cb->total_content_size(), oopSize);
123 size += align_up(cb->total_oop_size(), oopSize);
124 return size;
125 }
126
127 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
128 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
129 int mutable_data_size) :
130 _oop_maps(nullptr), // will be set by set_oop_maps() call
131 _name(name),
132 _mutable_data(header_begin() + size), // default value is blob_end()
133 _size(size),
134 _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
135 _content_offset(CodeBlob::align_code_offset(header_size)),
136 _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
137 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
138 _frame_size(frame_size),
139 _mutable_data_size(mutable_data_size),
140 S390_ONLY(_ctable_offset(0) COMMA)
141 _header_size(header_size),
142 _frame_complete_offset(frame_complete_offset),
143 _kind(kind),
144 _caller_must_gc_arguments(caller_must_gc_arguments)
145 {
146 assert(is_aligned(_size, oopSize), "unaligned size");
147 assert(is_aligned(header_size, oopSize), "unaligned size");
148 assert(is_aligned(_relocation_size, oopSize), "unaligned size");
149 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
150 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
151 assert(code_end() == content_end(), "must be the same - see code_end()");
152 #ifdef COMPILER1
153 // probably wrong for tiered
154 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
155 #endif // COMPILER1
156
157 if (_mutable_data_size > 0) {
158 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
159 if (_mutable_data == nullptr) {
160 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
161 }
162 } else {
163 // We need unique and valid not null address
164 assert(_mutable_data == blob_end(), "sanity");
165 }
166
167 set_oop_maps(oop_maps);
168 }
169
170 // Simple CodeBlob used for simple BufferBlob.
171 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
172 _oop_maps(nullptr),
173 _name(name),
174 _mutable_data(header_begin() + size), // default value is blob_end()
175 _size(size),
176 _relocation_size(0),
177 _content_offset(CodeBlob::align_code_offset(header_size)),
178 _code_offset(_content_offset),
179 _data_offset(size),
180 _frame_size(0),
181 _mutable_data_size(0),
182 S390_ONLY(_ctable_offset(0) COMMA)
183 _header_size(header_size),
184 _frame_complete_offset(CodeOffsets::frame_never_safe),
185 _kind(kind),
186 _caller_must_gc_arguments(false)
187 {
188 assert(is_aligned(size, oopSize), "unaligned size");
189 assert(is_aligned(header_size, oopSize), "unaligned size");
190 assert(_mutable_data == blob_end(), "sanity");
191 }
192
193 #ifdef ASSERT
194 CodeBlob::~CodeBlob() {
195 assert(_oop_maps == nullptr || AOTCodeCache::is_address_in_aot_cache((address)_oop_maps), "Not flushed");
196 }
197 #endif
198
199 void CodeBlob::restore_mutable_data(address reloc_data) {
200 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
201 if (_mutable_data_size > 0) {
202 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
203 if (_mutable_data == nullptr) {
204 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
205 }
206 } else {
207 _mutable_data = blob_end(); // default value
208 }
209 if (_relocation_size > 0) {
210 assert(_mutable_data_size > 0, "relocation is part of mutable data section");
211 memcpy((address)relocation_begin(), reloc_data, relocation_size());
212 }
213 }
214
215 void CodeBlob::purge() {
216 assert(_mutable_data != nullptr, "should never be null");
217 if (_mutable_data != blob_end()) {
218 os::free(_mutable_data);
219 _mutable_data = blob_end(); // Valid not null address
220 _mutable_data_size = 0;
221 _relocation_size = 0;
222 }
223 if (_oop_maps != nullptr && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) {
224 delete _oop_maps;
225 _oop_maps = nullptr;
226 }
227 NOT_PRODUCT(_asm_remarks.clear());
228 NOT_PRODUCT(_dbg_strings.clear());
229 }
230
231 void CodeBlob::set_oop_maps(OopMapSet* p) {
232 // Danger Will Robinson! This method allocates a big
233 // chunk of memory, its your job to free it.
234 if (p != nullptr) {
235 _oop_maps = ImmutableOopMapSet::build_from(p);
236 } else {
237 _oop_maps = nullptr;
238 }
239 }
240
241 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
242 assert(_oop_maps != nullptr, "nope");
243 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
244 }
245
246 void CodeBlob::print_code_on(outputStream* st) {
247 ResourceMark m;
248 Disassembler::decode(this, st);
249 }
250
251 void CodeBlob::prepare_for_archiving_impl() {
252 set_name(nullptr);
253 _oop_maps = nullptr;
254 _mutable_data = nullptr;
255 #ifndef PRODUCT
256 asm_remarks().clear_ref();
257 dbg_strings().clear_ref();
258 #endif /* PRODUCT */
259 }
260
261 void CodeBlob::prepare_for_archiving() {
262 vptr(_kind)->prepare_for_archiving(this);
263 }
264
265 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
266 blob->copy_to(archive_buffer);
267 CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
268 archived_blob->prepare_for_archiving();
269 }
270
271 void CodeBlob::post_restore_impl() {
272 // Track memory usage statistic after releasing CodeCache_lock
273 MemoryService::track_code_cache_memory_usage();
274 }
275
276 void CodeBlob::post_restore() {
277 vptr(_kind)->post_restore(this);
278 }
279
280 CodeBlob* CodeBlob::restore(address code_cache_buffer,
281 const char* name,
282 address archived_reloc_data,
283 ImmutableOopMapSet* archived_oop_maps)
284 {
285 copy_to(code_cache_buffer);
286 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
287 code_blob->set_name(name);
288 code_blob->restore_mutable_data(archived_reloc_data);
289 code_blob->set_oop_maps(archived_oop_maps);
290 return code_blob;
291 }
292
293 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
294 const char* name,
295 address archived_reloc_data,
296 ImmutableOopMapSet* archived_oop_maps
297 )
298 {
299 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
300
301 CodeCache::gc_on_allocation();
302
303 CodeBlob* blob = nullptr;
304 unsigned int size = archived_blob->size();
305 {
306 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
307 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
308 if (code_cache_buffer != nullptr) {
309 blob = archived_blob->restore(code_cache_buffer,
310 name,
311 archived_reloc_data,
312 archived_oop_maps);
313 assert(blob != nullptr, "sanity check");
314 // Flush the code block
315 ICache::invalidate_range(blob->code_begin(), blob->code_size());
316 CodeCache::commit(blob); // Count adapters
317 }
318 }
319 if (blob != nullptr) {
320 blob->post_restore();
321 }
322 return blob;
323 }
324
325 //-----------------------------------------------------------------------------------------
326 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
327
328 RuntimeBlob::RuntimeBlob(
329 const char* name,
330 CodeBlobKind kind,
331 CodeBuffer* cb,
332 int size,
333 uint16_t header_size,
334 int16_t frame_complete,
335 int frame_size,
336 OopMapSet* oop_maps,
337 bool caller_must_gc_arguments)
338 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
339 align_up(cb->total_relocation_size(), oopSize))
340 {
341 cb->copy_code_and_locs_to(this);
342 }
343
344 void RuntimeBlob::free(RuntimeBlob* blob) {
345 assert(blob != nullptr, "caller must check for nullptr");
346 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
347 blob->purge();
348 {
349 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
350 CodeCache::free(blob);
351 }
352 // Track memory usage statistic after releasing CodeCache_lock
353 MemoryService::track_code_cache_memory_usage();
354 }
355
356 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
357 // Do not hold the CodeCache lock during name formatting.
358 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
359
360 if (stub != nullptr && (PrintStubCode ||
361 Forte::is_enabled() ||
362 JvmtiExport::should_post_dynamic_code_generated())) {
363 char stub_id[256];
364 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
365 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
366 if (PrintStubCode) {
367 ttyLocker ttyl;
368 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
369 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
370 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
371 Disassembler::decode(stub->code_begin(), stub->code_end(), tty
372 NOT_PRODUCT(COMMA &stub->asm_remarks()));
373 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
374 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
375 stub->oop_maps()->print();
376 }
377 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
378 tty->cr();
379 }
380 if (Forte::is_enabled()) {
381 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
382 }
383
384 if (JvmtiExport::should_post_dynamic_code_generated()) {
385 const char* stub_name = name2;
386 if (name2[0] == '\0') stub_name = name1;
387 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
388 }
389 }
390
391 // Track memory usage statistic after releasing CodeCache_lock
392 MemoryService::track_code_cache_memory_usage();
393 }
394
395 //----------------------------------------------------------------------------------------------------
396 // Implementation of BufferBlob
397
398 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
399 : RuntimeBlob(name, kind, size, header_size)
400 {}
401
402 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
403 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
404
405 BufferBlob* blob = nullptr;
406 unsigned int size = sizeof(BufferBlob);
407 // align the size to CodeEntryAlignment
408 size = CodeBlob::align_code_offset(size);
409 size += align_up(buffer_size, oopSize);
410 assert(name != nullptr, "must provide a name");
411 {
412 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
413 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
414 }
415 // Track memory usage statistic after releasing CodeCache_lock
416 MemoryService::track_code_cache_memory_usage();
417
418 return blob;
419 }
420
421
422 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size)
423 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
424 {}
425
426 // Used by gtest
427 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
428 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
429
430 BufferBlob* blob = nullptr;
431 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
432 assert(name != nullptr, "must provide a name");
433 {
434 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
435 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size);
436 }
437 // Track memory usage statistic after releasing CodeCache_lock
438 MemoryService::track_code_cache_memory_usage();
439
440 return blob;
441 }
442
443 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
444 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
445 }
446
447 void BufferBlob::free(BufferBlob *blob) {
448 RuntimeBlob::free(blob);
449 }
450
451
452 //----------------------------------------------------------------------------------------------------
453 // Implementation of AdapterBlob
454
455 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) :
456 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob)) {
457 assert(entry_offset[I2C] == 0, "sanity check");
458 #ifdef ASSERT
459 for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) {
460 // The entry is within the adapter blob or unset.
461 int offset = entry_offset[i];
462 assert((offset > 0 && offset < cb->insts()->size()) ||
463 (i >= C2I_No_Clinit_Check && offset == -1),
464 "invalid entry offset[%d] = 0x%x", i, offset);
465 }
466 #endif // ASSERT
467 _c2i_offset = entry_offset[C2I];
468 _c2i_unverified_offset = entry_offset[C2I_Unverified];
469 _c2i_no_clinit_check_offset = entry_offset[C2I_No_Clinit_Check];
470 CodeCache::commit(this);
471 }
472
473 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) {
474 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
475
476 CodeCache::gc_on_allocation();
477
478 AdapterBlob* blob = nullptr;
479 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
480 {
481 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
482 blob = new (size) AdapterBlob(size, cb, entry_offset);
483 }
484 // Track memory usage statistic after releasing CodeCache_lock
485 MemoryService::track_code_cache_memory_usage();
486
487 return blob;
488 }
489
490 //----------------------------------------------------------------------------------------------------
491 // Implementation of VtableBlob
492
493 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
494 // Handling of allocation failure stops compilation and prints a bunch of
495 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
496 // can be locked, and then re-locking the CodeCache_lock. That is not safe in
497 // this context as we hold the CompiledICLocker. So we just don't handle code
498 // cache exhaustion here; we leave that for a later allocation that does not
499 // hold the CompiledICLocker.
500 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
501 }
502
503 VtableBlob::VtableBlob(const char* name, int size) :
504 BufferBlob(name, CodeBlobKind::Vtable, size) {
505 }
506
507 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
508 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
509
510 VtableBlob* blob = nullptr;
511 unsigned int size = sizeof(VtableBlob);
512 // align the size to CodeEntryAlignment
513 size = align_code_offset(size);
514 size += align_up(buffer_size, oopSize);
515 assert(name != nullptr, "must provide a name");
516 {
517 if (!CodeCache_lock->try_lock()) {
518 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
519 // IC transition to megamorphic, for which this stub will be needed. It is better to
520 // bail out the transition, and wait for a more opportune moment. Not only is it not
521 // worth waiting for the lock blockingly for the megamorphic transition, it might
522 // also result in a deadlock to blockingly wait, when concurrent class unloading is
523 // performed. At this point in time, the CompiledICLocker is taken, so we are not
524 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
525 // consistently taken in the opposite order. Bailing out results in an IC transition to
526 // the clean state instead, which will cause subsequent calls to retry the transitioning
527 // eventually.
528 return nullptr;
529 }
530 blob = new (size) VtableBlob(name, size);
531 CodeCache_lock->unlock();
532 }
533 // Track memory usage statistic after releasing CodeCache_lock
534 MemoryService::track_code_cache_memory_usage();
535
536 return blob;
537 }
538
539 //----------------------------------------------------------------------------------------------------
540 // Implementation of MethodHandlesAdapterBlob
541
542 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
543 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
544
545 MethodHandlesAdapterBlob* blob = nullptr;
546 unsigned int size = sizeof(MethodHandlesAdapterBlob);
547 // align the size to CodeEntryAlignment
548 size = CodeBlob::align_code_offset(size);
549 size += align_up(buffer_size, oopSize);
550 {
551 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
552 blob = new (size) MethodHandlesAdapterBlob(size);
553 if (blob == nullptr) {
554 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
555 }
556 }
557 // Track memory usage statistic after releasing CodeCache_lock
558 MemoryService::track_code_cache_memory_usage();
559
560 return blob;
561 }
562
563 //----------------------------------------------------------------------------------------------------
564 // Implementation of RuntimeStub
565
566 RuntimeStub::RuntimeStub(
567 const char* name,
568 CodeBuffer* cb,
569 int size,
570 int16_t frame_complete,
571 int frame_size,
572 OopMapSet* oop_maps,
573 bool caller_must_gc_arguments
574 )
575 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
576 frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
577 {
578 }
579
580 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
581 CodeBuffer* cb,
582 int16_t frame_complete,
583 int frame_size,
584 OopMapSet* oop_maps,
585 bool caller_must_gc_arguments,
586 bool alloc_fail_is_fatal)
587 {
588 RuntimeStub* stub = nullptr;
589 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
590 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
591 {
592 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
593 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
594 if (stub == nullptr) {
595 if (!alloc_fail_is_fatal) {
596 return nullptr;
597 }
598 fatal("Initial size of CodeCache is too small");
599 }
600 }
601
602 trace_new_stub(stub, "RuntimeStub - ", stub_name);
603
604 return stub;
605 }
606
607
608 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
609 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
610 }
611
612 // operator new shared by all singletons:
613 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
614 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
615 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
616 return p;
617 }
618
619
620 //----------------------------------------------------------------------------------------------------
621 // Implementation of DeoptimizationBlob
622
623 DeoptimizationBlob::DeoptimizationBlob(
624 CodeBuffer* cb,
625 int size,
626 OopMapSet* oop_maps,
627 int unpack_offset,
628 int unpack_with_exception_offset,
629 int unpack_with_reexecution_offset,
630 int frame_size
631 )
632 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
633 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
634 {
635 _unpack_offset = unpack_offset;
636 _unpack_with_exception = unpack_with_exception_offset;
637 _unpack_with_reexecution = unpack_with_reexecution_offset;
638 #ifdef COMPILER1
639 _unpack_with_exception_in_tls = -1;
640 #endif
641 }
642
643
644 DeoptimizationBlob* DeoptimizationBlob::create(
645 CodeBuffer* cb,
646 OopMapSet* oop_maps,
647 int unpack_offset,
648 int unpack_with_exception_offset,
649 int unpack_with_reexecution_offset,
650 int frame_size)
651 {
652 DeoptimizationBlob* blob = nullptr;
653 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
654 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
655 {
656 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
657 blob = new (size) DeoptimizationBlob(cb,
658 size,
659 oop_maps,
660 unpack_offset,
661 unpack_with_exception_offset,
662 unpack_with_reexecution_offset,
663 frame_size);
664 }
665
666 trace_new_stub(blob, "DeoptimizationBlob");
667
668 return blob;
669 }
670
671 #ifdef COMPILER2
672
673 //----------------------------------------------------------------------------------------------------
674 // Implementation of UncommonTrapBlob
675
676 UncommonTrapBlob::UncommonTrapBlob(
677 CodeBuffer* cb,
678 int size,
679 OopMapSet* oop_maps,
680 int frame_size
681 )
682 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
683 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
684 {}
685
686
687 UncommonTrapBlob* UncommonTrapBlob::create(
688 CodeBuffer* cb,
689 OopMapSet* oop_maps,
690 int frame_size)
691 {
692 UncommonTrapBlob* blob = nullptr;
693 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
694 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
695 {
696 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
697 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
698 }
699
700 trace_new_stub(blob, "UncommonTrapBlob");
701
702 return blob;
703 }
704
705 //----------------------------------------------------------------------------------------------------
706 // Implementation of ExceptionBlob
707
708 ExceptionBlob::ExceptionBlob(
709 CodeBuffer* cb,
710 int size,
711 OopMapSet* oop_maps,
712 int frame_size
713 )
714 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
715 size, sizeof(ExceptionBlob), frame_size, oop_maps)
716 {}
717
718
719 ExceptionBlob* ExceptionBlob::create(
720 CodeBuffer* cb,
721 OopMapSet* oop_maps,
722 int frame_size)
723 {
724 ExceptionBlob* blob = nullptr;
725 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
726 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
727 {
728 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
729 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
730 }
731
732 trace_new_stub(blob, "ExceptionBlob");
733
734 return blob;
735 }
736
737 #endif // COMPILER2
738
739 //----------------------------------------------------------------------------------------------------
740 // Implementation of SafepointBlob
741
742 SafepointBlob::SafepointBlob(
743 CodeBuffer* cb,
744 int size,
745 OopMapSet* oop_maps,
746 int frame_size
747 )
748 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
749 size, sizeof(SafepointBlob), frame_size, oop_maps)
750 {}
751
752
753 SafepointBlob* SafepointBlob::create(
754 CodeBuffer* cb,
755 OopMapSet* oop_maps,
756 int frame_size)
757 {
758 SafepointBlob* blob = nullptr;
759 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
760 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
761 {
762 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
763 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
764 }
765
766 trace_new_stub(blob, "SafepointBlob");
767
768 return blob;
769 }
770
771 //----------------------------------------------------------------------------------------------------
772 // Implementation of UpcallStub
773
774 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
775 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
776 CodeOffsets::frame_never_safe, 0 /* no frame size */,
777 /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
778 _receiver(receiver),
779 _frame_data_offset(frame_data_offset)
780 {
781 CodeCache::commit(this);
782 }
783
784 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
785 return CodeCache::allocate(size, CodeBlobType::NonNMethod);
786 }
787
788 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
789 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
790
791 UpcallStub* blob = nullptr;
792 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
793 {
794 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
795 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
796 }
797 if (blob == nullptr) {
798 return nullptr; // caller must handle this
799 }
800
801 // Track memory usage statistic after releasing CodeCache_lock
802 MemoryService::track_code_cache_memory_usage();
803
804 trace_new_stub(blob, "UpcallStub - ", name);
805
806 return blob;
807 }
808
809 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
810 frame_data_for_frame(frame)->old_handles->oops_do(f);
811 }
812
813 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
814 return &frame_data_for_frame(frame)->jfa;
815 }
816
817 void UpcallStub::free(UpcallStub* blob) {
818 assert(blob != nullptr, "caller must check for nullptr");
819 JNIHandles::destroy_global(blob->receiver());
820 RuntimeBlob::free(blob);
821 }
822
823 //----------------------------------------------------------------------------------------------------
824 // Verification and printing
825
826 void CodeBlob::verify() {
827 if (is_nmethod()) {
828 as_nmethod()->verify();
829 }
830 }
831
832 void CodeBlob::print_on(outputStream* st) const {
833 vptr()->print_on(this, st);
834 }
835
836 void CodeBlob::print() const { print_on(tty); }
837
838 void CodeBlob::print_value_on(outputStream* st) const {
839 vptr()->print_value_on(this, st);
840 }
841
842 void CodeBlob::print_on_impl(outputStream* st) const {
843 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
844 st->print_cr("Framesize: %d", _frame_size);
845 }
846
847 void CodeBlob::print_value_on_impl(outputStream* st) const {
848 st->print_cr("[CodeBlob]");
849 }
850
851 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
852 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
853 if (is_nmethod()) {
854 as_nmethod()->print_nmethod_labels(stream, block_begin);
855 }
856 #endif
857
858 #ifndef PRODUCT
859 ptrdiff_t offset = block_begin - code_begin();
860 assert(offset >= 0, "Expecting non-negative offset!");
861 _asm_remarks.print(uint(offset), stream);
862 #endif
863 }
864
865 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
866 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
867 // the interpreter is generated into a buffer blob
868 InterpreterCodelet* i = Interpreter::codelet_containing(addr);
869 if (i != nullptr) {
870 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
871 i->print_on(st);
872 return;
873 }
874 if (Interpreter::contains(addr)) {
875 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
876 " (not bytecode specific)", p2i(addr));
877 return;
878 }
879 //
880 if (AdapterHandlerLibrary::contains(this)) {
881 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
882 AdapterHandlerLibrary::print_handler_on(st, this);
883 }
884 // the stubroutines are generated into a buffer blob
885 StubCodeDesc* d = StubCodeDesc::desc_for(addr);
886 if (d != nullptr) {
887 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
888 d->print_on(st);
889 st->cr();
890 return;
891 }
892 if (StubRoutines::contains(addr)) {
893 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
894 return;
895 }
896 VtableStub* v = VtableStubs::stub_containing(addr);
897 if (v != nullptr) {
898 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
899 v->print_on(st);
900 st->cr();
901 return;
902 }
903 }
904 if (is_nmethod()) {
905 nmethod* nm = (nmethod*)this;
906 ResourceMark rm;
907 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
908 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
909 if (verbose) {
910 st->print(" for ");
911 nm->method()->print_value_on(st);
912 }
913 st->cr();
914 if (verbose && st == tty) {
915 // verbose is only ever true when called from findpc in debug.cpp
916 nm->print_nmethod(true);
917 } else {
918 nm->print_on(st);
919 }
920 return;
921 }
922 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
923 print_on(st);
924 }
925
926 void BufferBlob::print_on_impl(outputStream* st) const {
927 RuntimeBlob::print_on_impl(st);
928 print_value_on_impl(st);
929 }
930
931 void BufferBlob::print_value_on_impl(outputStream* st) const {
932 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
933 }
934
935 void RuntimeStub::print_on_impl(outputStream* st) const {
936 ttyLocker ttyl;
937 RuntimeBlob::print_on_impl(st);
938 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
939 st->print_cr("%s", name());
940 Disassembler::decode((RuntimeBlob*)this, st);
941 }
942
943 void RuntimeStub::print_value_on_impl(outputStream* st) const {
944 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
945 }
946
947 void SingletonBlob::print_on_impl(outputStream* st) const {
948 ttyLocker ttyl;
949 RuntimeBlob::print_on_impl(st);
950 st->print_cr("%s", name());
951 Disassembler::decode((RuntimeBlob*)this, st);
952 }
953
954 void SingletonBlob::print_value_on_impl(outputStream* st) const {
955 st->print_cr("%s", name());
956 }
957
958 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
959 st->print_cr("Deoptimization (frame not available)");
960 }
961
962 void UpcallStub::print_on_impl(outputStream* st) const {
963 RuntimeBlob::print_on_impl(st);
964 print_value_on_impl(st);
965 st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
966 oop recv = JNIHandles::resolve(_receiver);
967 st->print("Receiver MH=");
968 recv->print_on(st);
969 Disassembler::decode((RuntimeBlob*)this, st);
970 }
971
972 void UpcallStub::print_value_on_impl(outputStream* st) const {
973 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name());
974 }