1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/codeBlob.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/relocInfo.hpp" 28 #include "code/vtableStubs.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/bytecode.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/heap.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/forte.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/javaFrameAnchor.hpp" 43 #include "runtime/jniHandles.inline.hpp" 44 #include "runtime/mutexLocker.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubCodeGenerator.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "runtime/vframe.hpp" 50 #include "services/memoryService.hpp" 51 #include "utilities/align.hpp" 52 #ifdef COMPILER1 53 #include "c1/c1_Runtime1.hpp" 54 #endif 55 56 #include <type_traits> 57 58 // Virtual methods are not allowed in code blobs to simplify caching compiled code. 59 // Check all "leaf" subclasses of CodeBlob class. 60 61 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod"); 62 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs"); 63 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs"); 64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs"); 65 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs"); 66 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs"); 67 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs"); 68 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs"); 69 #ifdef COMPILER2 70 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs"); 71 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs"); 72 #endif 73 74 // Add proxy vtables. 75 // We need only few for now - they are used only from prints. 76 const nmethod::Vptr nmethod::_vpntr; 77 const BufferBlob::Vptr BufferBlob::_vpntr; 78 const RuntimeStub::Vptr RuntimeStub::_vpntr; 79 const SingletonBlob::Vptr SingletonBlob::_vpntr; 80 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr; 81 #ifdef COMPILER2 82 const ExceptionBlob::Vptr ExceptionBlob::_vpntr; 83 #endif // COMPILER2 84 const UpcallStub::Vptr UpcallStub::_vpntr; 85 86 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) { 87 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = { 88 nullptr/* None */, 89 &nmethod::_vpntr, 90 &BufferBlob::_vpntr, 91 &AdapterBlob::_vpntr, 92 &VtableBlob::_vpntr, 93 &MethodHandlesAdapterBlob::_vpntr, 94 &RuntimeStub::_vpntr, 95 &DeoptimizationBlob::_vpntr, 96 &SafepointBlob::_vpntr, 97 #ifdef COMPILER2 98 &ExceptionBlob::_vpntr, 99 &UncommonTrapBlob::_vpntr, 100 #endif 101 &UpcallStub::_vpntr 102 }; 103 104 return array[(size_t)kind]; 105 } 106 107 const CodeBlob::Vptr* CodeBlob::vptr() const { 108 return vptr(_kind); 109 } 110 111 unsigned int CodeBlob::align_code_offset(int offset) { 112 // align the size to CodeEntryAlignment 113 int header_size = (int)CodeHeap::header_size(); 114 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 115 } 116 117 // This must be consistent with the CodeBlob constructor's layout actions. 118 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 119 // align the size to CodeEntryAlignment 120 unsigned int size = align_code_offset(header_size); 121 size += align_up(cb->total_content_size(), oopSize); 122 size += align_up(cb->total_oop_size(), oopSize); 123 return size; 124 } 125 126 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, 127 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, 128 int mutable_data_size) : 129 _oop_maps(nullptr), // will be set by set_oop_maps() call 130 _name(name), 131 _mutable_data(header_begin() + size), // default value is blob_end() 132 _size(size), 133 _relocation_size(align_up(cb->total_relocation_size(), oopSize)), 134 _content_offset(CodeBlob::align_code_offset(header_size)), 135 _code_offset(_content_offset + cb->total_offset_of(cb->insts())), 136 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)), 137 _frame_size(frame_size), 138 _mutable_data_size(mutable_data_size), 139 S390_ONLY(_ctable_offset(0) COMMA) 140 _header_size(header_size), 141 _frame_complete_offset(frame_complete_offset), 142 _kind(kind), 143 _caller_must_gc_arguments(caller_must_gc_arguments) 144 { 145 assert(is_aligned(_size, oopSize), "unaligned size"); 146 assert(is_aligned(header_size, oopSize), "unaligned size"); 147 assert(is_aligned(_relocation_size, oopSize), "unaligned size"); 148 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size); 149 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod"); 150 assert(code_end() == content_end(), "must be the same - see code_end()"); 151 #ifdef COMPILER1 152 // probably wrong for tiered 153 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 154 #endif // COMPILER1 155 156 if (_mutable_data_size > 0) { 157 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); 158 if (_mutable_data == nullptr) { 159 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); 160 } 161 } else { 162 // We need unique and valid not null address 163 assert(_mutable_data == blob_end(), "sanity"); 164 } 165 166 set_oop_maps(oop_maps); 167 } 168 169 // Simple CodeBlob used for simple BufferBlob. 170 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) : 171 _oop_maps(nullptr), 172 _name(name), 173 _mutable_data(header_begin() + size), // default value is blob_end() 174 _size(size), 175 _relocation_size(0), 176 _content_offset(CodeBlob::align_code_offset(header_size)), 177 _code_offset(_content_offset), 178 _data_offset(size), 179 _frame_size(0), 180 _mutable_data_size(0), 181 S390_ONLY(_ctable_offset(0) COMMA) 182 _header_size(header_size), 183 _frame_complete_offset(CodeOffsets::frame_never_safe), 184 _kind(kind), 185 _caller_must_gc_arguments(false) 186 { 187 assert(is_aligned(size, oopSize), "unaligned size"); 188 assert(is_aligned(header_size, oopSize), "unaligned size"); 189 assert(_mutable_data == blob_end(), "sanity"); 190 } 191 192 void CodeBlob::restore_mutable_data(address reloc_data) { 193 // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations 194 if (_mutable_data_size > 0) { 195 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); 196 if (_mutable_data == nullptr) { 197 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); 198 } 199 } else { 200 _mutable_data = blob_end(); // default value 201 } 202 if (_relocation_size > 0) { 203 assert(_mutable_data_size > 0, "relocation is part of mutable data section"); 204 memcpy((address)relocation_begin(), reloc_data, relocation_size()); 205 } 206 } 207 208 void CodeBlob::purge() { 209 assert(_mutable_data != nullptr, "should never be null"); 210 if (_mutable_data != blob_end()) { 211 os::free(_mutable_data); 212 _mutable_data = blob_end(); // Valid not null address 213 _mutable_data_size = 0; 214 _relocation_size = 0; 215 } 216 if (_oop_maps != nullptr) { 217 delete _oop_maps; 218 _oop_maps = nullptr; 219 } 220 NOT_PRODUCT(_asm_remarks.clear()); 221 NOT_PRODUCT(_dbg_strings.clear()); 222 } 223 224 void CodeBlob::set_oop_maps(OopMapSet* p) { 225 // Danger Will Robinson! This method allocates a big 226 // chunk of memory, its your job to free it. 227 if (p != nullptr) { 228 _oop_maps = ImmutableOopMapSet::build_from(p); 229 } else { 230 _oop_maps = nullptr; 231 } 232 } 233 234 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { 235 assert(_oop_maps != nullptr, "nope"); 236 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 237 } 238 239 void CodeBlob::print_code_on(outputStream* st) { 240 ResourceMark m; 241 Disassembler::decode(this, st); 242 } 243 244 void CodeBlob::prepare_for_archiving_impl() { 245 set_name(nullptr); 246 _oop_maps = nullptr; 247 _mutable_data = nullptr; 248 #ifndef PRODUCT 249 asm_remarks().clear(); 250 dbg_strings().clear(); 251 #endif /* PRODUCT */ 252 } 253 254 void CodeBlob::prepare_for_archiving() { 255 vptr(_kind)->prepare_for_archiving(this); 256 } 257 258 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) { 259 blob->copy_to(archive_buffer); 260 CodeBlob* archived_blob = (CodeBlob*)archive_buffer; 261 archived_blob->prepare_for_archiving(); 262 } 263 264 void CodeBlob::post_restore_impl() { 265 // Track memory usage statistic after releasing CodeCache_lock 266 MemoryService::track_code_cache_memory_usage(); 267 } 268 269 void CodeBlob::post_restore() { 270 vptr(_kind)->post_restore(this); 271 } 272 273 CodeBlob* CodeBlob::restore(address code_cache_buffer, 274 const char* name, 275 address archived_reloc_data, 276 ImmutableOopMapSet* archived_oop_maps) 277 { 278 copy_to(code_cache_buffer); 279 CodeBlob* code_blob = (CodeBlob*)code_cache_buffer; 280 code_blob->set_name(name); 281 code_blob->restore_mutable_data(archived_reloc_data); 282 code_blob->set_oop_maps(archived_oop_maps); 283 return code_blob; 284 } 285 286 CodeBlob* CodeBlob::create(CodeBlob* archived_blob, 287 const char* name, 288 address archived_reloc_data, 289 ImmutableOopMapSet* archived_oop_maps 290 ) 291 { 292 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 293 294 CodeCache::gc_on_allocation(); 295 296 CodeBlob* blob = nullptr; 297 unsigned int size = archived_blob->size(); 298 { 299 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 300 address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod); 301 if (code_cache_buffer != nullptr) { 302 blob = archived_blob->restore(code_cache_buffer, 303 name, 304 archived_reloc_data, 305 archived_oop_maps); 306 assert(blob != nullptr, "sanity check"); 307 308 // Flush the code block 309 ICache::invalidate_range(blob->code_begin(), blob->code_size()); 310 CodeCache::commit(blob); // Count adapters 311 } 312 } 313 if (blob != nullptr) { 314 blob->post_restore(); 315 } 316 return blob; 317 } 318 319 //----------------------------------------------------------------------------------------- 320 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info. 321 322 RuntimeBlob::RuntimeBlob( 323 const char* name, 324 CodeBlobKind kind, 325 CodeBuffer* cb, 326 int size, 327 uint16_t header_size, 328 int16_t frame_complete, 329 int frame_size, 330 OopMapSet* oop_maps, 331 bool caller_must_gc_arguments) 332 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments, 333 align_up(cb->total_relocation_size(), oopSize)) 334 { 335 cb->copy_code_and_locs_to(this); 336 } 337 338 void RuntimeBlob::free(RuntimeBlob* blob) { 339 assert(blob != nullptr, "caller must check for nullptr"); 340 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 341 blob->purge(); 342 { 343 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 344 CodeCache::free(blob); 345 } 346 // Track memory usage statistic after releasing CodeCache_lock 347 MemoryService::track_code_cache_memory_usage(); 348 } 349 350 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 351 // Do not hold the CodeCache lock during name formatting. 352 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 353 354 if (stub != nullptr && (PrintStubCode || 355 Forte::is_enabled() || 356 JvmtiExport::should_post_dynamic_code_generated())) { 357 char stub_id[256]; 358 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 359 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 360 if (PrintStubCode) { 361 ttyLocker ttyl; 362 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 363 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)", 364 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size()); 365 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 366 NOT_PRODUCT(COMMA &stub->asm_remarks())); 367 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) { 368 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 369 stub->oop_maps()->print(); 370 } 371 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 372 tty->cr(); 373 } 374 if (Forte::is_enabled()) { 375 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 376 } 377 378 if (JvmtiExport::should_post_dynamic_code_generated()) { 379 const char* stub_name = name2; 380 if (name2[0] == '\0') stub_name = name1; 381 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 382 } 383 } 384 385 // Track memory usage statistic after releasing CodeCache_lock 386 MemoryService::track_code_cache_memory_usage(); 387 } 388 389 //---------------------------------------------------------------------------------------------------- 390 // Implementation of BufferBlob 391 392 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) 393 : RuntimeBlob(name, kind, size, header_size) 394 {} 395 396 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { 397 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 398 399 BufferBlob* blob = nullptr; 400 unsigned int size = sizeof(BufferBlob); 401 // align the size to CodeEntryAlignment 402 size = CodeBlob::align_code_offset(size); 403 size += align_up(buffer_size, oopSize); 404 assert(name != nullptr, "must provide a name"); 405 { 406 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 407 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size); 408 } 409 // Track memory usage statistic after releasing CodeCache_lock 410 MemoryService::track_code_cache_memory_usage(); 411 412 return blob; 413 } 414 415 416 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size) 417 : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr) 418 {} 419 420 // Used by gtest 421 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 422 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 423 424 BufferBlob* blob = nullptr; 425 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 426 assert(name != nullptr, "must provide a name"); 427 { 428 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 429 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size); 430 } 431 // Track memory usage statistic after releasing CodeCache_lock 432 MemoryService::track_code_cache_memory_usage(); 433 434 return blob; 435 } 436 437 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 438 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 439 } 440 441 void BufferBlob::free(BufferBlob *blob) { 442 RuntimeBlob::free(blob); 443 } 444 445 446 //---------------------------------------------------------------------------------------------------- 447 // Implementation of AdapterBlob 448 449 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) : 450 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob)) { 451 assert(entry_offset[0] == 0, "sanity check"); 452 for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) { 453 // The entry is within the adapter blob or unset. 454 assert((entry_offset[i] > 0 && entry_offset[i] < cb->insts()->size()) || 455 (entry_offset[i] == -1), 456 "invalid entry offset[%d] = 0x%x", i, entry_offset[i]); 457 } 458 _c2i_offset = entry_offset[1]; 459 _c2i_unverified_offset = entry_offset[2]; 460 _c2i_no_clinit_check_offset = entry_offset[3]; 461 CodeCache::commit(this); 462 } 463 464 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT]) { 465 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 466 467 CodeCache::gc_on_allocation(); 468 469 AdapterBlob* blob = nullptr; 470 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 471 { 472 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 473 blob = new (size) AdapterBlob(size, cb, entry_offset); 474 } 475 // Track memory usage statistic after releasing CodeCache_lock 476 MemoryService::track_code_cache_memory_usage(); 477 478 return blob; 479 } 480 481 void AdapterBlob::get_offsets(int entry_offset[ENTRY_COUNT]) { 482 entry_offset[0] = 0; 483 entry_offset[1] = _c2i_offset; 484 entry_offset[2] = _c2i_unverified_offset; 485 entry_offset[3] = _c2i_no_clinit_check_offset; 486 } 487 488 //---------------------------------------------------------------------------------------------------- 489 // Implementation of VtableBlob 490 491 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 492 // Handling of allocation failure stops compilation and prints a bunch of 493 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 494 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 495 // this context as we hold the CompiledICLocker. So we just don't handle code 496 // cache exhaustion here; we leave that for a later allocation that does not 497 // hold the CompiledICLocker. 498 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 499 } 500 501 VtableBlob::VtableBlob(const char* name, int size) : 502 BufferBlob(name, CodeBlobKind::Vtable, size) { 503 } 504 505 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 506 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 507 508 VtableBlob* blob = nullptr; 509 unsigned int size = sizeof(VtableBlob); 510 // align the size to CodeEntryAlignment 511 size = align_code_offset(size); 512 size += align_up(buffer_size, oopSize); 513 assert(name != nullptr, "must provide a name"); 514 { 515 if (!CodeCache_lock->try_lock()) { 516 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 517 // IC transition to megamorphic, for which this stub will be needed. It is better to 518 // bail out the transition, and wait for a more opportune moment. Not only is it not 519 // worth waiting for the lock blockingly for the megamorphic transition, it might 520 // also result in a deadlock to blockingly wait, when concurrent class unloading is 521 // performed. At this point in time, the CompiledICLocker is taken, so we are not 522 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 523 // consistently taken in the opposite order. Bailing out results in an IC transition to 524 // the clean state instead, which will cause subsequent calls to retry the transitioning 525 // eventually. 526 return nullptr; 527 } 528 blob = new (size) VtableBlob(name, size); 529 CodeCache_lock->unlock(); 530 } 531 // Track memory usage statistic after releasing CodeCache_lock 532 MemoryService::track_code_cache_memory_usage(); 533 534 return blob; 535 } 536 537 //---------------------------------------------------------------------------------------------------- 538 // Implementation of MethodHandlesAdapterBlob 539 540 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 541 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 542 543 MethodHandlesAdapterBlob* blob = nullptr; 544 unsigned int size = sizeof(MethodHandlesAdapterBlob); 545 // align the size to CodeEntryAlignment 546 size = CodeBlob::align_code_offset(size); 547 size += align_up(buffer_size, oopSize); 548 { 549 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 550 blob = new (size) MethodHandlesAdapterBlob(size); 551 if (blob == nullptr) { 552 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 553 } 554 } 555 // Track memory usage statistic after releasing CodeCache_lock 556 MemoryService::track_code_cache_memory_usage(); 557 558 return blob; 559 } 560 561 //---------------------------------------------------------------------------------------------------- 562 // Implementation of RuntimeStub 563 564 RuntimeStub::RuntimeStub( 565 const char* name, 566 CodeBuffer* cb, 567 int size, 568 int16_t frame_complete, 569 int frame_size, 570 OopMapSet* oop_maps, 571 bool caller_must_gc_arguments 572 ) 573 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub), 574 frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 575 { 576 } 577 578 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 579 CodeBuffer* cb, 580 int16_t frame_complete, 581 int frame_size, 582 OopMapSet* oop_maps, 583 bool caller_must_gc_arguments, 584 bool alloc_fail_is_fatal) 585 { 586 RuntimeStub* stub = nullptr; 587 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 588 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 589 { 590 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 591 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 592 if (stub == nullptr) { 593 if (!alloc_fail_is_fatal) { 594 return nullptr; 595 } 596 fatal("Initial size of CodeCache is too small"); 597 } 598 } 599 600 trace_new_stub(stub, "RuntimeStub - ", stub_name); 601 602 return stub; 603 } 604 605 606 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 607 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 608 } 609 610 // operator new shared by all singletons: 611 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() { 612 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 613 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small"); 614 return p; 615 } 616 617 618 //---------------------------------------------------------------------------------------------------- 619 // Implementation of DeoptimizationBlob 620 621 DeoptimizationBlob::DeoptimizationBlob( 622 CodeBuffer* cb, 623 int size, 624 OopMapSet* oop_maps, 625 int unpack_offset, 626 int unpack_with_exception_offset, 627 int unpack_with_reexecution_offset, 628 int frame_size 629 ) 630 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, 631 size, sizeof(DeoptimizationBlob), frame_size, oop_maps) 632 { 633 _unpack_offset = unpack_offset; 634 _unpack_with_exception = unpack_with_exception_offset; 635 _unpack_with_reexecution = unpack_with_reexecution_offset; 636 #ifdef COMPILER1 637 _unpack_with_exception_in_tls = -1; 638 #endif 639 } 640 641 642 DeoptimizationBlob* DeoptimizationBlob::create( 643 CodeBuffer* cb, 644 OopMapSet* oop_maps, 645 int unpack_offset, 646 int unpack_with_exception_offset, 647 int unpack_with_reexecution_offset, 648 int frame_size) 649 { 650 DeoptimizationBlob* blob = nullptr; 651 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 652 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 653 { 654 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 655 blob = new (size) DeoptimizationBlob(cb, 656 size, 657 oop_maps, 658 unpack_offset, 659 unpack_with_exception_offset, 660 unpack_with_reexecution_offset, 661 frame_size); 662 } 663 664 trace_new_stub(blob, "DeoptimizationBlob"); 665 666 return blob; 667 } 668 669 #ifdef COMPILER2 670 671 //---------------------------------------------------------------------------------------------------- 672 // Implementation of UncommonTrapBlob 673 674 UncommonTrapBlob::UncommonTrapBlob( 675 CodeBuffer* cb, 676 int size, 677 OopMapSet* oop_maps, 678 int frame_size 679 ) 680 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb, 681 size, sizeof(UncommonTrapBlob), frame_size, oop_maps) 682 {} 683 684 685 UncommonTrapBlob* UncommonTrapBlob::create( 686 CodeBuffer* cb, 687 OopMapSet* oop_maps, 688 int frame_size) 689 { 690 UncommonTrapBlob* blob = nullptr; 691 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 692 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 693 { 694 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 695 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size); 696 } 697 698 trace_new_stub(blob, "UncommonTrapBlob"); 699 700 return blob; 701 } 702 703 //---------------------------------------------------------------------------------------------------- 704 // Implementation of ExceptionBlob 705 706 ExceptionBlob::ExceptionBlob( 707 CodeBuffer* cb, 708 int size, 709 OopMapSet* oop_maps, 710 int frame_size 711 ) 712 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, 713 size, sizeof(ExceptionBlob), frame_size, oop_maps) 714 {} 715 716 717 ExceptionBlob* ExceptionBlob::create( 718 CodeBuffer* cb, 719 OopMapSet* oop_maps, 720 int frame_size) 721 { 722 ExceptionBlob* blob = nullptr; 723 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 724 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 725 { 726 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 727 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size); 728 } 729 730 trace_new_stub(blob, "ExceptionBlob"); 731 732 return blob; 733 } 734 735 #endif // COMPILER2 736 737 //---------------------------------------------------------------------------------------------------- 738 // Implementation of SafepointBlob 739 740 SafepointBlob::SafepointBlob( 741 CodeBuffer* cb, 742 int size, 743 OopMapSet* oop_maps, 744 int frame_size 745 ) 746 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb, 747 size, sizeof(SafepointBlob), frame_size, oop_maps) 748 {} 749 750 751 SafepointBlob* SafepointBlob::create( 752 CodeBuffer* cb, 753 OopMapSet* oop_maps, 754 int frame_size) 755 { 756 SafepointBlob* blob = nullptr; 757 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 758 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 759 { 760 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 761 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 762 } 763 764 trace_new_stub(blob, "SafepointBlob"); 765 766 return blob; 767 } 768 769 //---------------------------------------------------------------------------------------------------- 770 // Implementation of UpcallStub 771 772 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : 773 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub), 774 CodeOffsets::frame_never_safe, 0 /* no frame size */, 775 /* oop maps = */ nullptr, /* caller must gc arguments = */ false), 776 _receiver(receiver), 777 _frame_data_offset(frame_data_offset) 778 { 779 CodeCache::commit(this); 780 } 781 782 void* UpcallStub::operator new(size_t s, unsigned size) throw() { 783 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 784 } 785 786 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { 787 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 788 789 UpcallStub* blob = nullptr; 790 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); 791 { 792 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 793 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); 794 } 795 if (blob == nullptr) { 796 return nullptr; // caller must handle this 797 } 798 799 // Track memory usage statistic after releasing CodeCache_lock 800 MemoryService::track_code_cache_memory_usage(); 801 802 trace_new_stub(blob, "UpcallStub - ", name); 803 804 return blob; 805 } 806 807 void UpcallStub::oops_do(OopClosure* f, const frame& frame) { 808 frame_data_for_frame(frame)->old_handles->oops_do(f); 809 } 810 811 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { 812 return &frame_data_for_frame(frame)->jfa; 813 } 814 815 void UpcallStub::free(UpcallStub* blob) { 816 assert(blob != nullptr, "caller must check for nullptr"); 817 JNIHandles::destroy_global(blob->receiver()); 818 RuntimeBlob::free(blob); 819 } 820 821 //---------------------------------------------------------------------------------------------------- 822 // Verification and printing 823 824 void CodeBlob::verify() { 825 if (is_nmethod()) { 826 as_nmethod()->verify(); 827 } 828 } 829 830 void CodeBlob::print_on(outputStream* st) const { 831 vptr()->print_on(this, st); 832 } 833 834 void CodeBlob::print() const { print_on(tty); } 835 836 void CodeBlob::print_value_on(outputStream* st) const { 837 vptr()->print_value_on(this, st); 838 } 839 840 void CodeBlob::print_on_impl(outputStream* st) const { 841 st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this)); 842 st->print_cr("Framesize: %d", _frame_size); 843 } 844 845 void CodeBlob::print_value_on_impl(outputStream* st) const { 846 st->print_cr("[CodeBlob]"); 847 } 848 849 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const { 850 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 851 if (is_nmethod()) { 852 as_nmethod()->print_nmethod_labels(stream, block_begin); 853 } 854 #endif 855 856 #ifndef PRODUCT 857 ptrdiff_t offset = block_begin - code_begin(); 858 assert(offset >= 0, "Expecting non-negative offset!"); 859 _asm_remarks.print(uint(offset), stream); 860 #endif 861 } 862 863 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 864 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) { 865 // the interpreter is generated into a buffer blob 866 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 867 if (i != nullptr) { 868 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 869 i->print_on(st); 870 return; 871 } 872 if (Interpreter::contains(addr)) { 873 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 874 " (not bytecode specific)", p2i(addr)); 875 return; 876 } 877 // 878 if (AdapterHandlerLibrary::contains(this)) { 879 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 880 AdapterHandlerLibrary::print_handler_on(st, this); 881 } 882 // the stubroutines are generated into a buffer blob 883 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 884 if (d != nullptr) { 885 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 886 d->print_on(st); 887 st->cr(); 888 return; 889 } 890 if (StubRoutines::contains(addr)) { 891 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 892 return; 893 } 894 VtableStub* v = VtableStubs::stub_containing(addr); 895 if (v != nullptr) { 896 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 897 v->print_on(st); 898 st->cr(); 899 return; 900 } 901 } 902 if (is_nmethod()) { 903 nmethod* nm = (nmethod*)this; 904 ResourceMark rm; 905 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 906 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 907 if (verbose) { 908 st->print(" for "); 909 nm->method()->print_value_on(st); 910 } 911 st->cr(); 912 if (verbose && st == tty) { 913 // verbose is only ever true when called from findpc in debug.cpp 914 nm->print_nmethod(true); 915 } else { 916 nm->print_on(st); 917 } 918 return; 919 } 920 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 921 print_on(st); 922 } 923 924 void BufferBlob::print_on_impl(outputStream* st) const { 925 RuntimeBlob::print_on_impl(st); 926 print_value_on_impl(st); 927 } 928 929 void BufferBlob::print_value_on_impl(outputStream* st) const { 930 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 931 } 932 933 void RuntimeStub::print_on_impl(outputStream* st) const { 934 ttyLocker ttyl; 935 RuntimeBlob::print_on_impl(st); 936 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 937 st->print_cr("%s", name()); 938 Disassembler::decode((RuntimeBlob*)this, st); 939 } 940 941 void RuntimeStub::print_value_on_impl(outputStream* st) const { 942 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 943 } 944 945 void SingletonBlob::print_on_impl(outputStream* st) const { 946 ttyLocker ttyl; 947 RuntimeBlob::print_on_impl(st); 948 st->print_cr("%s", name()); 949 Disassembler::decode((RuntimeBlob*)this, st); 950 } 951 952 void SingletonBlob::print_value_on_impl(outputStream* st) const { 953 st->print_cr("%s", name()); 954 } 955 956 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const { 957 st->print_cr("Deoptimization (frame not available)"); 958 } 959 960 void UpcallStub::print_on_impl(outputStream* st) const { 961 RuntimeBlob::print_on_impl(st); 962 print_value_on_impl(st); 963 st->print_cr("Frame data offset: %d", (int) _frame_data_offset); 964 oop recv = JNIHandles::resolve(_receiver); 965 st->print("Receiver MH="); 966 recv->print_on(st); 967 Disassembler::decode((RuntimeBlob*)this, st); 968 } 969 970 void UpcallStub::print_value_on_impl(outputStream* st) const { 971 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 972 }