1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/codeBlob.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/relocInfo.hpp" 28 #include "code/vtableStubs.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/bytecode.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/heap.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/forte.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/javaFrameAnchor.hpp" 43 #include "runtime/jniHandles.inline.hpp" 44 #include "runtime/mutexLocker.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubCodeGenerator.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "runtime/vframe.hpp" 50 #include "services/memoryService.hpp" 51 #include "utilities/align.hpp" 52 #ifdef COMPILER1 53 #include "c1/c1_Runtime1.hpp" 54 #endif 55 56 #include <type_traits> 57 58 // Virtual methods are not allowed in code blobs to simplify caching compiled code. 59 // Check all "leaf" subclasses of CodeBlob class. 60 61 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod"); 62 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs"); 63 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs"); 64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs"); 65 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs"); 66 static_assert(!std::is_polymorphic<BufferedInlineTypeBlob>::value, "no virtual methods are allowed in code blobs"); 67 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs"); 68 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs"); 69 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs"); 70 #ifdef COMPILER2 71 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs"); 72 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs"); 73 #endif 74 75 // Add proxy vtables. 76 // We need only few for now - they are used only from prints. 77 const nmethod::Vptr nmethod::_vpntr; 78 const BufferBlob::Vptr BufferBlob::_vpntr; 79 const RuntimeStub::Vptr RuntimeStub::_vpntr; 80 const SingletonBlob::Vptr SingletonBlob::_vpntr; 81 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr; 82 #ifdef COMPILER2 83 const ExceptionBlob::Vptr ExceptionBlob::_vpntr; 84 #endif // COMPILER2 85 const UpcallStub::Vptr UpcallStub::_vpntr; 86 87 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) { 88 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = { 89 nullptr/* None */, 90 &nmethod::_vpntr, 91 &BufferBlob::_vpntr, 92 &AdapterBlob::_vpntr, 93 &VtableBlob::_vpntr, 94 &MethodHandlesAdapterBlob::_vpntr, 95 &BufferedInlineTypeBlob::_vpntr, 96 &RuntimeStub::_vpntr, 97 &DeoptimizationBlob::_vpntr, 98 &SafepointBlob::_vpntr, 99 #ifdef COMPILER2 100 &ExceptionBlob::_vpntr, 101 &UncommonTrapBlob::_vpntr, 102 #endif 103 &UpcallStub::_vpntr 104 }; 105 106 return array[(size_t)kind]; 107 } 108 109 const CodeBlob::Vptr* CodeBlob::vptr() const { 110 return vptr(_kind); 111 } 112 113 unsigned int CodeBlob::align_code_offset(int offset) { 114 // align the size to CodeEntryAlignment 115 int header_size = (int)CodeHeap::header_size(); 116 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 117 } 118 119 // This must be consistent with the CodeBlob constructor's layout actions. 120 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 121 // align the size to CodeEntryAlignment 122 unsigned int size = align_code_offset(header_size); 123 size += align_up(cb->total_content_size(), oopSize); 124 size += align_up(cb->total_oop_size(), oopSize); 125 return size; 126 } 127 128 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, 129 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, 130 int mutable_data_size) : 131 _oop_maps(nullptr), // will be set by set_oop_maps() call 132 _name(name), 133 _mutable_data(header_begin() + size), // default value is blob_end() 134 _size(size), 135 _relocation_size(align_up(cb->total_relocation_size(), oopSize)), 136 _content_offset(CodeBlob::align_code_offset(header_size)), 137 _code_offset(_content_offset + cb->total_offset_of(cb->insts())), 138 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)), 139 _frame_size(frame_size), 140 _mutable_data_size(mutable_data_size), 141 S390_ONLY(_ctable_offset(0) COMMA) 142 _header_size(header_size), 143 _frame_complete_offset(frame_complete_offset), 144 _kind(kind), 145 _caller_must_gc_arguments(caller_must_gc_arguments) 146 { 147 assert(is_aligned(_size, oopSize), "unaligned size"); 148 assert(is_aligned(header_size, oopSize), "unaligned size"); 149 assert(is_aligned(_relocation_size, oopSize), "unaligned size"); 150 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size); 151 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod"); 152 assert(code_end() == content_end(), "must be the same - see code_end()"); 153 #ifdef COMPILER1 154 // probably wrong for tiered 155 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 156 #endif // COMPILER1 157 158 if (_mutable_data_size > 0) { 159 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); 160 if (_mutable_data == nullptr) { 161 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); 162 } 163 } else { 164 // We need unique and valid not null address 165 assert(_mutable_data = blob_end(), "sanity"); 166 } 167 168 set_oop_maps(oop_maps); 169 } 170 171 // Simple CodeBlob used for simple BufferBlob. 172 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) : 173 _oop_maps(nullptr), 174 _name(name), 175 _mutable_data(header_begin() + size), // default value is blob_end() 176 _size(size), 177 _relocation_size(0), 178 _content_offset(CodeBlob::align_code_offset(header_size)), 179 _code_offset(_content_offset), 180 _data_offset(size), 181 _frame_size(0), 182 S390_ONLY(_ctable_offset(0) COMMA) 183 _header_size(header_size), 184 _frame_complete_offset(CodeOffsets::frame_never_safe), 185 _kind(kind), 186 _caller_must_gc_arguments(false) 187 { 188 assert(is_aligned(size, oopSize), "unaligned size"); 189 assert(is_aligned(header_size, oopSize), "unaligned size"); 190 assert(_mutable_data = blob_end(), "sanity"); 191 } 192 193 void CodeBlob::restore_mutable_data(address reloc_data) { 194 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations 195 if (_mutable_data_size > 0) { 196 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); 197 if (_mutable_data == nullptr) { 198 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); 199 } 200 } 201 if (_relocation_size > 0) { 202 memcpy((address)relocation_begin(), reloc_data, relocation_size()); 203 } 204 } 205 206 void CodeBlob::purge() { 207 assert(_mutable_data != nullptr, "should never be null"); 208 if (_mutable_data != blob_end()) { 209 os::free(_mutable_data); 210 _mutable_data = blob_end(); // Valid not null address 211 _mutable_data_size = 0; 212 _relocation_size = 0; 213 } 214 if (_oop_maps != nullptr) { 215 delete _oop_maps; 216 _oop_maps = nullptr; 217 } 218 NOT_PRODUCT(_asm_remarks.clear()); 219 NOT_PRODUCT(_dbg_strings.clear()); 220 } 221 222 void CodeBlob::set_oop_maps(OopMapSet* p) { 223 // Danger Will Robinson! This method allocates a big 224 // chunk of memory, its your job to free it. 225 if (p != nullptr) { 226 _oop_maps = ImmutableOopMapSet::build_from(p); 227 } else { 228 _oop_maps = nullptr; 229 } 230 } 231 232 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { 233 assert(_oop_maps != nullptr, "nope"); 234 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 235 } 236 237 void CodeBlob::print_code_on(outputStream* st) { 238 ResourceMark m; 239 Disassembler::decode(this, st); 240 } 241 242 void CodeBlob::prepare_for_archiving_impl() { 243 set_name(nullptr); 244 _oop_maps = nullptr; 245 _mutable_data = nullptr; 246 #ifndef PRODUCT 247 asm_remarks().clear(); 248 dbg_strings().clear(); 249 #endif /* PRODUCT */ 250 } 251 252 void CodeBlob::prepare_for_archiving() { 253 vptr(_kind)->prepare_for_archiving(this); 254 } 255 256 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) { 257 blob->copy_to(archive_buffer); 258 CodeBlob* archived_blob = (CodeBlob*)archive_buffer; 259 archived_blob->prepare_for_archiving(); 260 } 261 262 void CodeBlob::post_restore_impl() { 263 // Track memory usage statistic after releasing CodeCache_lock 264 MemoryService::track_code_cache_memory_usage(); 265 } 266 267 void CodeBlob::post_restore() { 268 vptr(_kind)->post_restore(this); 269 } 270 271 CodeBlob* CodeBlob::restore(address code_cache_buffer, 272 const char* name, 273 address archived_reloc_data, 274 ImmutableOopMapSet* archived_oop_maps) 275 { 276 copy_to(code_cache_buffer); 277 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer; 278 code_blob->set_name(name); 279 code_blob->restore_mutable_data(archived_reloc_data); 280 code_blob->set_oop_maps(archived_oop_maps); 281 return code_blob; 282 } 283 284 CodeBlob* CodeBlob::create(CodeBlob* archived_blob, 285 const char* name, 286 address archived_reloc_data, 287 ImmutableOopMapSet* archived_oop_maps 288 ) 289 { 290 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 291 292 CodeCache::gc_on_allocation(); 293 294 CodeBlob* blob = nullptr; 295 unsigned int size = archived_blob->size(); 296 { 297 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 298 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod); 299 if (code_cache_buffer != nullptr) { 300 blob = archived_blob->restore(code_cache_buffer, 301 name, 302 archived_reloc_data, 303 archived_oop_maps); 304 assert(blob != nullptr, "sanity check"); 305 306 // Flush the code block 307 ICache::invalidate_range(blob->code_begin(), blob->code_size()); 308 CodeCache::commit(blob); // Count adapters 309 } 310 } 311 if (blob != nullptr) { 312 blob->post_restore(); 313 } 314 return blob; 315 } 316 317 //----------------------------------------------------------------------------------------- 318 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info. 319 320 RuntimeBlob::RuntimeBlob( 321 const char* name, 322 CodeBlobKind kind, 323 CodeBuffer* cb, 324 int size, 325 uint16_t header_size, 326 int16_t frame_complete, 327 int frame_size, 328 OopMapSet* oop_maps, 329 bool caller_must_gc_arguments) 330 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments, 331 align_up(cb->total_relocation_size(), oopSize)) 332 { 333 cb->copy_code_and_locs_to(this); 334 } 335 336 void RuntimeBlob::free(RuntimeBlob* blob) { 337 assert(blob != nullptr, "caller must check for nullptr"); 338 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 339 blob->purge(); 340 { 341 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 342 CodeCache::free(blob); 343 } 344 // Track memory usage statistic after releasing CodeCache_lock 345 MemoryService::track_code_cache_memory_usage(); 346 } 347 348 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 349 // Do not hold the CodeCache lock during name formatting. 350 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 351 352 if (stub != nullptr && (PrintStubCode || 353 Forte::is_enabled() || 354 JvmtiExport::should_post_dynamic_code_generated())) { 355 char stub_id[256]; 356 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 357 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 358 if (PrintStubCode) { 359 ttyLocker ttyl; 360 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 361 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)", 362 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size()); 363 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 364 NOT_PRODUCT(COMMA &stub->asm_remarks())); 365 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) { 366 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 367 stub->oop_maps()->print(); 368 } 369 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 370 tty->cr(); 371 } 372 if (Forte::is_enabled()) { 373 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 374 } 375 376 if (JvmtiExport::should_post_dynamic_code_generated()) { 377 const char* stub_name = name2; 378 if (name2[0] == '\0') stub_name = name1; 379 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 380 } 381 } 382 383 // Track memory usage statistic after releasing CodeCache_lock 384 MemoryService::track_code_cache_memory_usage(); 385 } 386 387 //---------------------------------------------------------------------------------------------------- 388 // Implementation of BufferBlob 389 390 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size) 391 : RuntimeBlob(name, kind, size, sizeof(BufferBlob)) 392 {} 393 394 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { 395 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 396 397 BufferBlob* blob = nullptr; 398 unsigned int size = sizeof(BufferBlob); 399 // align the size to CodeEntryAlignment 400 size = CodeBlob::align_code_offset(size); 401 size += align_up(buffer_size, oopSize); 402 assert(name != nullptr, "must provide a name"); 403 { 404 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 405 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size); 406 } 407 // Track memory usage statistic after releasing CodeCache_lock 408 MemoryService::track_code_cache_memory_usage(); 409 410 return blob; 411 } 412 413 414 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size) 415 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr) 416 {} 417 418 // Used by gtest 419 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 420 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 421 422 BufferBlob* blob = nullptr; 423 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 424 assert(name != nullptr, "must provide a name"); 425 { 426 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 427 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size, sizeof(BufferBlob)); 428 } 429 // Track memory usage statistic after releasing CodeCache_lock 430 MemoryService::track_code_cache_memory_usage(); 431 432 return blob; 433 } 434 435 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 436 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 437 } 438 439 void BufferBlob::free(BufferBlob *blob) { 440 RuntimeBlob::free(blob); 441 } 442 443 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 444 : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 445 {} 446 447 448 //---------------------------------------------------------------------------------------------------- 449 // Implementation of AdapterBlob 450 451 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : 452 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 453 CodeCache::commit(this); 454 } 455 456 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) { 457 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 458 459 CodeCache::gc_on_allocation(); 460 461 AdapterBlob* blob = nullptr; 462 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 463 { 464 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 465 blob = new (size) AdapterBlob(size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 466 } 467 // Track memory usage statistic after releasing CodeCache_lock 468 MemoryService::track_code_cache_memory_usage(); 469 470 return blob; 471 } 472 473 //---------------------------------------------------------------------------------------------------- 474 // Implementation of VtableBlob 475 476 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 477 // Handling of allocation failure stops compilation and prints a bunch of 478 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 479 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 480 // this context as we hold the CompiledICLocker. So we just don't handle code 481 // cache exhaustion here; we leave that for a later allocation that does not 482 // hold the CompiledICLocker. 483 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 484 } 485 486 VtableBlob::VtableBlob(const char* name, int size) : 487 BufferBlob(name, CodeBlobKind::Vtable, size) { 488 } 489 490 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 491 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 492 493 VtableBlob* blob = nullptr; 494 unsigned int size = sizeof(VtableBlob); 495 // align the size to CodeEntryAlignment 496 size = align_code_offset(size); 497 size += align_up(buffer_size, oopSize); 498 assert(name != nullptr, "must provide a name"); 499 { 500 if (!CodeCache_lock->try_lock()) { 501 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 502 // IC transition to megamorphic, for which this stub will be needed. It is better to 503 // bail out the transition, and wait for a more opportune moment. Not only is it not 504 // worth waiting for the lock blockingly for the megamorphic transition, it might 505 // also result in a deadlock to blockingly wait, when concurrent class unloading is 506 // performed. At this point in time, the CompiledICLocker is taken, so we are not 507 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 508 // consistently taken in the opposite order. Bailing out results in an IC transition to 509 // the clean state instead, which will cause subsequent calls to retry the transitioning 510 // eventually. 511 return nullptr; 512 } 513 blob = new (size) VtableBlob(name, size); 514 CodeCache_lock->unlock(); 515 } 516 // Track memory usage statistic after releasing CodeCache_lock 517 MemoryService::track_code_cache_memory_usage(); 518 519 return blob; 520 } 521 522 //---------------------------------------------------------------------------------------------------- 523 // Implementation of MethodHandlesAdapterBlob 524 525 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 526 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 527 528 MethodHandlesAdapterBlob* blob = nullptr; 529 unsigned int size = sizeof(MethodHandlesAdapterBlob); 530 // align the size to CodeEntryAlignment 531 size = CodeBlob::align_code_offset(size); 532 size += align_up(buffer_size, oopSize); 533 { 534 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 535 blob = new (size) MethodHandlesAdapterBlob(size); 536 if (blob == nullptr) { 537 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 538 } 539 } 540 // Track memory usage statistic after releasing CodeCache_lock 541 MemoryService::track_code_cache_memory_usage(); 542 543 return blob; 544 } 545 546 //---------------------------------------------------------------------------------------------------- 547 // Implementation of BufferedInlineTypeBlob 548 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) : 549 BufferBlob("buffered inline type", CodeBlobKind::BufferedInlineType, cb, size, sizeof(BufferedInlineTypeBlob)), 550 _pack_fields_off(pack_fields_off), 551 _pack_fields_jobject_off(pack_fields_jobject_off), 552 _unpack_fields_off(unpack_fields_off) { 553 CodeCache::commit(this); 554 } 555 556 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) { 557 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 558 559 BufferedInlineTypeBlob* blob = nullptr; 560 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob)); 561 { 562 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 563 blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off); 564 } 565 // Track memory usage statistic after releasing CodeCache_lock 566 MemoryService::track_code_cache_memory_usage(); 567 568 return blob; 569 } 570 571 //---------------------------------------------------------------------------------------------------- 572 // Implementation of RuntimeStub 573 574 RuntimeStub::RuntimeStub( 575 const char* name, 576 CodeBuffer* cb, 577 int size, 578 int16_t frame_complete, 579 int frame_size, 580 OopMapSet* oop_maps, 581 bool caller_must_gc_arguments 582 ) 583 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub), 584 frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 585 { 586 } 587 588 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 589 CodeBuffer* cb, 590 int16_t frame_complete, 591 int frame_size, 592 OopMapSet* oop_maps, 593 bool caller_must_gc_arguments, 594 bool alloc_fail_is_fatal) 595 { 596 RuntimeStub* stub = nullptr; 597 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 598 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 599 { 600 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 601 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 602 if (stub == nullptr) { 603 if (!alloc_fail_is_fatal) { 604 return nullptr; 605 } 606 fatal("Initial size of CodeCache is too small"); 607 } 608 } 609 610 trace_new_stub(stub, "RuntimeStub - ", stub_name); 611 612 return stub; 613 } 614 615 616 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 617 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 618 } 619 620 // operator new shared by all singletons: 621 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() { 622 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 623 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small"); 624 return p; 625 } 626 627 628 //---------------------------------------------------------------------------------------------------- 629 // Implementation of DeoptimizationBlob 630 631 DeoptimizationBlob::DeoptimizationBlob( 632 CodeBuffer* cb, 633 int size, 634 OopMapSet* oop_maps, 635 int unpack_offset, 636 int unpack_with_exception_offset, 637 int unpack_with_reexecution_offset, 638 int frame_size 639 ) 640 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, 641 size, sizeof(DeoptimizationBlob), frame_size, oop_maps) 642 { 643 _unpack_offset = unpack_offset; 644 _unpack_with_exception = unpack_with_exception_offset; 645 _unpack_with_reexecution = unpack_with_reexecution_offset; 646 #ifdef COMPILER1 647 _unpack_with_exception_in_tls = -1; 648 #endif 649 } 650 651 652 DeoptimizationBlob* DeoptimizationBlob::create( 653 CodeBuffer* cb, 654 OopMapSet* oop_maps, 655 int unpack_offset, 656 int unpack_with_exception_offset, 657 int unpack_with_reexecution_offset, 658 int frame_size) 659 { 660 DeoptimizationBlob* blob = nullptr; 661 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 662 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 663 { 664 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 665 blob = new (size) DeoptimizationBlob(cb, 666 size, 667 oop_maps, 668 unpack_offset, 669 unpack_with_exception_offset, 670 unpack_with_reexecution_offset, 671 frame_size); 672 } 673 674 trace_new_stub(blob, "DeoptimizationBlob"); 675 676 return blob; 677 } 678 679 #ifdef COMPILER2 680 681 //---------------------------------------------------------------------------------------------------- 682 // Implementation of UncommonTrapBlob 683 684 UncommonTrapBlob::UncommonTrapBlob( 685 CodeBuffer* cb, 686 int size, 687 OopMapSet* oop_maps, 688 int frame_size 689 ) 690 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb, 691 size, sizeof(UncommonTrapBlob), frame_size, oop_maps) 692 {} 693 694 695 UncommonTrapBlob* UncommonTrapBlob::create( 696 CodeBuffer* cb, 697 OopMapSet* oop_maps, 698 int frame_size) 699 { 700 UncommonTrapBlob* blob = nullptr; 701 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 702 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 703 { 704 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 705 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size); 706 } 707 708 trace_new_stub(blob, "UncommonTrapBlob"); 709 710 return blob; 711 } 712 713 //---------------------------------------------------------------------------------------------------- 714 // Implementation of ExceptionBlob 715 716 ExceptionBlob::ExceptionBlob( 717 CodeBuffer* cb, 718 int size, 719 OopMapSet* oop_maps, 720 int frame_size 721 ) 722 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, 723 size, sizeof(ExceptionBlob), frame_size, oop_maps) 724 {} 725 726 727 ExceptionBlob* ExceptionBlob::create( 728 CodeBuffer* cb, 729 OopMapSet* oop_maps, 730 int frame_size) 731 { 732 ExceptionBlob* blob = nullptr; 733 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 734 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 735 { 736 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 737 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size); 738 } 739 740 trace_new_stub(blob, "ExceptionBlob"); 741 742 return blob; 743 } 744 745 #endif // COMPILER2 746 747 //---------------------------------------------------------------------------------------------------- 748 // Implementation of SafepointBlob 749 750 SafepointBlob::SafepointBlob( 751 CodeBuffer* cb, 752 int size, 753 OopMapSet* oop_maps, 754 int frame_size 755 ) 756 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb, 757 size, sizeof(SafepointBlob), frame_size, oop_maps) 758 {} 759 760 761 SafepointBlob* SafepointBlob::create( 762 CodeBuffer* cb, 763 OopMapSet* oop_maps, 764 int frame_size) 765 { 766 SafepointBlob* blob = nullptr; 767 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 768 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 769 { 770 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 771 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 772 } 773 774 trace_new_stub(blob, "SafepointBlob"); 775 776 return blob; 777 } 778 779 //---------------------------------------------------------------------------------------------------- 780 // Implementation of UpcallStub 781 782 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : 783 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub), 784 CodeOffsets::frame_never_safe, 0 /* no frame size */, 785 /* oop maps = */ nullptr, /* caller must gc arguments = */ false), 786 _receiver(receiver), 787 _frame_data_offset(frame_data_offset) 788 { 789 CodeCache::commit(this); 790 } 791 792 void* UpcallStub::operator new(size_t s, unsigned size) throw() { 793 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 794 } 795 796 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { 797 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 798 799 UpcallStub* blob = nullptr; 800 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); 801 { 802 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 803 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); 804 } 805 if (blob == nullptr) { 806 return nullptr; // caller must handle this 807 } 808 809 // Track memory usage statistic after releasing CodeCache_lock 810 MemoryService::track_code_cache_memory_usage(); 811 812 trace_new_stub(blob, "UpcallStub - ", name); 813 814 return blob; 815 } 816 817 void UpcallStub::oops_do(OopClosure* f, const frame& frame) { 818 frame_data_for_frame(frame)->old_handles->oops_do(f); 819 } 820 821 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { 822 return &frame_data_for_frame(frame)->jfa; 823 } 824 825 void UpcallStub::free(UpcallStub* blob) { 826 assert(blob != nullptr, "caller must check for nullptr"); 827 JNIHandles::destroy_global(blob->receiver()); 828 RuntimeBlob::free(blob); 829 } 830 831 //---------------------------------------------------------------------------------------------------- 832 // Verification and printing 833 834 void CodeBlob::verify() { 835 if (is_nmethod()) { 836 as_nmethod()->verify(); 837 } 838 } 839 840 void CodeBlob::print_on(outputStream* st) const { 841 vptr()->print_on(this, st); 842 } 843 844 void CodeBlob::print() const { print_on(tty); } 845 846 void CodeBlob::print_value_on(outputStream* st) const { 847 vptr()->print_value_on(this, st); 848 } 849 850 void CodeBlob::print_on_impl(outputStream* st) const { 851 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this)); 852 st->print_cr("Framesize: %d", _frame_size); 853 } 854 855 void CodeBlob::print_value_on_impl(outputStream* st) const { 856 st->print_cr("[CodeBlob]"); 857 } 858 859 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const { 860 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 861 if (is_nmethod()) { 862 as_nmethod()->print_nmethod_labels(stream, block_begin); 863 } 864 #endif 865 866 #ifndef PRODUCT 867 ptrdiff_t offset = block_begin - code_begin(); 868 assert(offset >= 0, "Expecting non-negative offset!"); 869 _asm_remarks.print(uint(offset), stream); 870 #endif 871 } 872 873 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 874 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) { 875 // the interpreter is generated into a buffer blob 876 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 877 if (i != nullptr) { 878 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 879 i->print_on(st); 880 return; 881 } 882 if (Interpreter::contains(addr)) { 883 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 884 " (not bytecode specific)", p2i(addr)); 885 return; 886 } 887 // 888 if (AdapterHandlerLibrary::contains(this)) { 889 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 890 AdapterHandlerLibrary::print_handler_on(st, this); 891 } 892 // the stubroutines are generated into a buffer blob 893 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 894 if (d != nullptr) { 895 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 896 d->print_on(st); 897 st->cr(); 898 return; 899 } 900 if (StubRoutines::contains(addr)) { 901 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 902 return; 903 } 904 VtableStub* v = VtableStubs::stub_containing(addr); 905 if (v != nullptr) { 906 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 907 v->print_on(st); 908 st->cr(); 909 return; 910 } 911 } 912 if (is_nmethod()) { 913 nmethod* nm = (nmethod*)this; 914 ResourceMark rm; 915 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 916 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 917 if (verbose) { 918 st->print(" for "); 919 nm->method()->print_value_on(st); 920 } 921 st->cr(); 922 if (verbose && st == tty) { 923 // verbose is only ever true when called from findpc in debug.cpp 924 nm->print_nmethod(true); 925 } else { 926 nm->print_on(st); 927 } 928 return; 929 } 930 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 931 print_on(st); 932 } 933 934 void BufferBlob::print_on_impl(outputStream* st) const { 935 RuntimeBlob::print_on_impl(st); 936 print_value_on_impl(st); 937 } 938 939 void BufferBlob::print_value_on_impl(outputStream* st) const { 940 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 941 } 942 943 void RuntimeStub::print_on_impl(outputStream* st) const { 944 ttyLocker ttyl; 945 RuntimeBlob::print_on_impl(st); 946 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 947 st->print_cr("%s", name()); 948 Disassembler::decode((RuntimeBlob*)this, st); 949 } 950 951 void RuntimeStub::print_value_on_impl(outputStream* st) const { 952 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 953 } 954 955 void SingletonBlob::print_on_impl(outputStream* st) const { 956 ttyLocker ttyl; 957 RuntimeBlob::print_on_impl(st); 958 st->print_cr("%s", name()); 959 Disassembler::decode((RuntimeBlob*)this, st); 960 } 961 962 void SingletonBlob::print_value_on_impl(outputStream* st) const { 963 st->print_cr("%s", name()); 964 } 965 966 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const { 967 st->print_cr("Deoptimization (frame not available)"); 968 } 969 970 void UpcallStub::print_on_impl(outputStream* st) const { 971 RuntimeBlob::print_on_impl(st); 972 print_value_on_impl(st); 973 st->print_cr("Frame data offset: %d", (int) _frame_data_offset); 974 oop recv = JNIHandles::resolve(_receiver); 975 st->print("Receiver MH="); 976 recv->print_on(st); 977 Disassembler::decode((RuntimeBlob*)this, st); 978 } 979 980 void UpcallStub::print_value_on_impl(outputStream* st) const { 981 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 982 }