1 /* 2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/relocInfo.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "interpreter/bytecode.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "jvm.h" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/heap.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/forte.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/interfaceSupport.inline.hpp" 43 #include "runtime/javaFrameAnchor.hpp" 44 #include "runtime/jniHandles.hpp" 45 #include "runtime/mutexLocker.hpp" 46 #include "runtime/safepoint.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubCodeGenerator.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/vframe.hpp" 51 #include "services/memoryService.hpp" 52 #include "utilities/align.hpp" 53 #ifdef COMPILER1 54 #include "c1/c1_Runtime1.hpp" 55 #endif 56 57 const char* CodeBlob::compiler_name() const { 58 return compilertype2name(_type); 59 } 60 61 unsigned int CodeBlob::align_code_offset(int offset) { 62 // align the size to CodeEntryAlignment 63 int header_size = (int)CodeHeap::header_size(); 64 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 65 } 66 67 68 // This must be consistent with the CodeBlob constructor's layout actions. 69 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 70 unsigned int size = header_size; 71 size += align_up(cb->total_relocation_size(), oopSize); 72 // align the size to CodeEntryAlignment 73 size = align_code_offset(size); 74 size += align_up(cb->total_content_size(), oopSize); 75 size += align_up(cb->total_oop_size(), oopSize); 76 size += align_up(cb->total_metadata_size(), oopSize); 77 return size; 78 } 79 80 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : 81 _code_begin(layout.code_begin()), 82 _code_end(layout.code_end()), 83 _content_begin(layout.content_begin()), 84 _data_end(layout.data_end()), 85 _relocation_begin(layout.relocation_begin()), 86 _relocation_end(layout.relocation_end()), 87 _oop_maps(oop_maps), 88 _name(name), 89 _size(layout.size()), 90 _header_size(layout.header_size()), 91 _frame_complete_offset(frame_complete_offset), 92 _data_offset(layout.data_offset()), 93 _frame_size(frame_size), 94 _caller_must_gc_arguments(caller_must_gc_arguments), 95 _is_compiled(compiled), 96 _type(type) 97 { 98 assert(is_aligned(layout.size(), oopSize), "unaligned size"); 99 assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); 100 assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); 101 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 102 #ifdef COMPILER1 103 // probably wrong for tiered 104 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 105 #endif // COMPILER1 106 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 107 } 108 109 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : 110 _code_begin(layout.code_begin()), 111 _code_end(layout.code_end()), 112 _content_begin(layout.content_begin()), 113 _data_end(layout.data_end()), 114 _relocation_begin(layout.relocation_begin()), 115 _relocation_end(layout.relocation_end()), 116 _name(name), 117 _size(layout.size()), 118 _header_size(layout.header_size()), 119 _frame_complete_offset(frame_complete_offset), 120 _data_offset(layout.data_offset()), 121 _frame_size(frame_size), 122 _caller_must_gc_arguments(caller_must_gc_arguments), 123 _is_compiled(compiled), 124 _type(type) 125 { 126 assert(is_aligned(_size, oopSize), "unaligned size"); 127 assert(is_aligned(_header_size, oopSize), "unaligned size"); 128 assert(_data_offset <= _size, "codeBlob is too small"); 129 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 130 131 set_oop_maps(oop_maps); 132 #ifdef COMPILER1 133 // probably wrong for tiered 134 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 135 #endif // COMPILER1 136 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 137 } 138 139 140 // Creates a simple CodeBlob. Sets up the size of the different regions. 141 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) 142 : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, nullptr, false /* caller_must_gc_arguments */) 143 { 144 assert(is_aligned(locs_size, oopSize), "unaligned size"); 145 } 146 147 148 // Creates a RuntimeBlob from a CodeBuffer 149 // and copy code and relocation info. 150 RuntimeBlob::RuntimeBlob( 151 const char* name, 152 CodeBuffer* cb, 153 int header_size, 154 int size, 155 int frame_complete, 156 int frame_size, 157 OopMapSet* oop_maps, 158 bool caller_must_gc_arguments 159 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 160 cb->copy_code_and_locs_to(this); 161 } 162 163 void RuntimeBlob::free(RuntimeBlob* blob) { 164 assert(blob != nullptr, "caller must check for nullptr"); 165 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 166 blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */); 167 { 168 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 169 CodeCache::free(blob); 170 } 171 // Track memory usage statistic after releasing CodeCache_lock 172 MemoryService::track_code_cache_memory_usage(); 173 } 174 175 void CodeBlob::purge(bool free_code_cache_data, bool unregister_nmethod) { 176 if (_oop_maps != nullptr) { 177 delete _oop_maps; 178 _oop_maps = nullptr; 179 } 180 NOT_PRODUCT(_asm_remarks.clear()); 181 NOT_PRODUCT(_dbg_strings.clear()); 182 } 183 184 void CodeBlob::set_oop_maps(OopMapSet* p) { 185 // Danger Will Robinson! This method allocates a big 186 // chunk of memory, its your job to free it. 187 if (p != nullptr) { 188 _oop_maps = ImmutableOopMapSet::build_from(p); 189 } else { 190 _oop_maps = nullptr; 191 } 192 } 193 194 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 195 // Do not hold the CodeCache lock during name formatting. 196 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 197 198 if (stub != nullptr && (PrintStubCode || 199 Forte::is_enabled() || 200 JvmtiExport::should_post_dynamic_code_generated())) { 201 char stub_id[256]; 202 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 203 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 204 if (PrintStubCode) { 205 ttyLocker ttyl; 206 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 207 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)", 208 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size()); 209 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 210 NOT_PRODUCT(COMMA &stub->asm_remarks())); 211 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) { 212 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 213 stub->oop_maps()->print(); 214 } 215 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 216 tty->cr(); 217 } 218 if (Forte::is_enabled()) { 219 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 220 } 221 222 if (JvmtiExport::should_post_dynamic_code_generated()) { 223 const char* stub_name = name2; 224 if (name2[0] == '\0') stub_name = name1; 225 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 226 } 227 } 228 229 // Track memory usage statistic after releasing CodeCache_lock 230 MemoryService::track_code_cache_memory_usage(); 231 } 232 233 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { 234 assert(_oop_maps != nullptr, "nope"); 235 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 236 } 237 238 void CodeBlob::print_code_on(outputStream* st) { 239 ResourceMark m; 240 Disassembler::decode(this, st); 241 } 242 243 //---------------------------------------------------------------------------------------------------- 244 // Implementation of BufferBlob 245 246 247 BufferBlob::BufferBlob(const char* name, int size) 248 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 249 {} 250 251 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { 252 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 253 254 BufferBlob* blob = nullptr; 255 unsigned int size = sizeof(BufferBlob); 256 // align the size to CodeEntryAlignment 257 size = CodeBlob::align_code_offset(size); 258 size += align_up(buffer_size, oopSize); 259 assert(name != nullptr, "must provide a name"); 260 { 261 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 262 blob = new (size) BufferBlob(name, size); 263 } 264 // Track memory usage statistic after releasing CodeCache_lock 265 MemoryService::track_code_cache_memory_usage(); 266 267 return blob; 268 } 269 270 271 BufferBlob::BufferBlob(const char* name, int header_size, int size, CodeBuffer* cb) 272 : RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, 0, nullptr) 273 {} 274 275 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 276 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 277 278 BufferBlob* blob = nullptr; 279 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 280 assert(name != nullptr, "must provide a name"); 281 { 282 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 283 blob = new (size) BufferBlob(name, sizeof(BufferBlob), size, cb); 284 } 285 // Track memory usage statistic after releasing CodeCache_lock 286 MemoryService::track_code_cache_memory_usage(); 287 288 return blob; 289 } 290 291 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 292 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 293 } 294 295 void BufferBlob::free(BufferBlob *blob) { 296 RuntimeBlob::free(blob); 297 } 298 299 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 300 : RuntimeBlob(name, cb, sizeof(BufferBlob), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 301 {} 302 303 304 //---------------------------------------------------------------------------------------------------- 305 // Implementation of AdapterBlob 306 307 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : 308 BufferBlob("I2C/C2I adapters", size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 309 CodeCache::commit(this); 310 } 311 312 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) { 313 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 314 315 CodeCache::gc_on_allocation(); 316 317 AdapterBlob* blob = nullptr; 318 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 319 { 320 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 321 blob = new (size) AdapterBlob(size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 322 } 323 // Track memory usage statistic after releasing CodeCache_lock 324 MemoryService::track_code_cache_memory_usage(); 325 326 return blob; 327 } 328 329 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 330 // Handling of allocation failure stops compilation and prints a bunch of 331 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 332 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 333 // this context as we hold the CompiledICLocker. So we just don't handle code 334 // cache exhaustion here; we leave that for a later allocation that does not 335 // hold the CompiledICLocker. 336 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 337 } 338 339 VtableBlob::VtableBlob(const char* name, int size) : 340 BufferBlob(name, size) { 341 } 342 343 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 344 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 345 346 VtableBlob* blob = nullptr; 347 unsigned int size = sizeof(VtableBlob); 348 // align the size to CodeEntryAlignment 349 size = align_code_offset(size); 350 size += align_up(buffer_size, oopSize); 351 assert(name != nullptr, "must provide a name"); 352 { 353 if (!CodeCache_lock->try_lock()) { 354 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 355 // IC transition to megamorphic, for which this stub will be needed. It is better to 356 // bail out the transition, and wait for a more opportune moment. Not only is it not 357 // worth waiting for the lock blockingly for the megamorphic transition, it might 358 // also result in a deadlock to blockingly wait, when concurrent class unloading is 359 // performed. At this point in time, the CompiledICLocker is taken, so we are not 360 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 361 // consistently taken in the opposite order. Bailing out results in an IC transition to 362 // the clean state instead, which will cause subsequent calls to retry the transitioning 363 // eventually. 364 return nullptr; 365 } 366 blob = new (size) VtableBlob(name, size); 367 CodeCache_lock->unlock(); 368 } 369 // Track memory usage statistic after releasing CodeCache_lock 370 MemoryService::track_code_cache_memory_usage(); 371 372 return blob; 373 } 374 375 //---------------------------------------------------------------------------------------------------- 376 // Implementation of MethodHandlesAdapterBlob 377 378 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 379 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 380 381 MethodHandlesAdapterBlob* blob = nullptr; 382 unsigned int size = sizeof(MethodHandlesAdapterBlob); 383 // align the size to CodeEntryAlignment 384 size = CodeBlob::align_code_offset(size); 385 size += align_up(buffer_size, oopSize); 386 { 387 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 388 blob = new (size) MethodHandlesAdapterBlob(size); 389 if (blob == nullptr) { 390 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 391 } 392 } 393 // Track memory usage statistic after releasing CodeCache_lock 394 MemoryService::track_code_cache_memory_usage(); 395 396 return blob; 397 } 398 399 //---------------------------------------------------------------------------------------------------- 400 // Implementation of BufferedInlineTypeBlob 401 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) : 402 BufferBlob("buffered inline type", sizeof(BufferedInlineTypeBlob), size, cb), 403 _pack_fields_off(pack_fields_off), 404 _pack_fields_jobject_off(pack_fields_jobject_off), 405 _unpack_fields_off(unpack_fields_off) { 406 CodeCache::commit(this); 407 } 408 409 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) { 410 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 411 412 BufferedInlineTypeBlob* blob = nullptr; 413 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob)); 414 { 415 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 416 blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off); 417 } 418 // Track memory usage statistic after releasing CodeCache_lock 419 MemoryService::track_code_cache_memory_usage(); 420 421 return blob; 422 } 423 424 //---------------------------------------------------------------------------------------------------- 425 // Implementation of RuntimeStub 426 427 RuntimeStub::RuntimeStub( 428 const char* name, 429 CodeBuffer* cb, 430 int size, 431 int frame_complete, 432 int frame_size, 433 OopMapSet* oop_maps, 434 bool caller_must_gc_arguments 435 ) 436 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 437 { 438 } 439 440 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 441 CodeBuffer* cb, 442 int frame_complete, 443 int frame_size, 444 OopMapSet* oop_maps, 445 bool caller_must_gc_arguments, 446 bool alloc_fail_is_fatal) 447 { 448 RuntimeStub* stub = nullptr; 449 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 450 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 451 { 452 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 453 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 454 if (stub == nullptr) { 455 if (!alloc_fail_is_fatal) { 456 return nullptr; 457 } 458 fatal("Initial size of CodeCache is too small"); 459 } 460 } 461 462 trace_new_stub(stub, "RuntimeStub - ", stub_name); 463 464 return stub; 465 } 466 467 468 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 469 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 470 } 471 472 // operator new shared by all singletons: 473 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 474 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 475 if (!p) fatal("Initial size of CodeCache is too small"); 476 return p; 477 } 478 479 480 //---------------------------------------------------------------------------------------------------- 481 // Implementation of DeoptimizationBlob 482 483 DeoptimizationBlob::DeoptimizationBlob( 484 CodeBuffer* cb, 485 int size, 486 OopMapSet* oop_maps, 487 int unpack_offset, 488 int unpack_with_exception_offset, 489 int unpack_with_reexecution_offset, 490 int frame_size 491 ) 492 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 493 { 494 _unpack_offset = unpack_offset; 495 _unpack_with_exception = unpack_with_exception_offset; 496 _unpack_with_reexecution = unpack_with_reexecution_offset; 497 #ifdef COMPILER1 498 _unpack_with_exception_in_tls = -1; 499 #endif 500 } 501 502 503 DeoptimizationBlob* DeoptimizationBlob::create( 504 CodeBuffer* cb, 505 OopMapSet* oop_maps, 506 int unpack_offset, 507 int unpack_with_exception_offset, 508 int unpack_with_reexecution_offset, 509 int frame_size) 510 { 511 DeoptimizationBlob* blob = nullptr; 512 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 513 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 514 { 515 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 516 blob = new (size) DeoptimizationBlob(cb, 517 size, 518 oop_maps, 519 unpack_offset, 520 unpack_with_exception_offset, 521 unpack_with_reexecution_offset, 522 frame_size); 523 } 524 525 trace_new_stub(blob, "DeoptimizationBlob"); 526 527 return blob; 528 } 529 530 531 //---------------------------------------------------------------------------------------------------- 532 // Implementation of UncommonTrapBlob 533 534 #ifdef COMPILER2 535 UncommonTrapBlob::UncommonTrapBlob( 536 CodeBuffer* cb, 537 int size, 538 OopMapSet* oop_maps, 539 int frame_size 540 ) 541 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 542 {} 543 544 545 UncommonTrapBlob* UncommonTrapBlob::create( 546 CodeBuffer* cb, 547 OopMapSet* oop_maps, 548 int frame_size) 549 { 550 UncommonTrapBlob* blob = nullptr; 551 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 552 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 553 { 554 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 555 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 556 } 557 558 trace_new_stub(blob, "UncommonTrapBlob"); 559 560 return blob; 561 } 562 563 564 #endif // COMPILER2 565 566 567 //---------------------------------------------------------------------------------------------------- 568 // Implementation of ExceptionBlob 569 570 #ifdef COMPILER2 571 ExceptionBlob::ExceptionBlob( 572 CodeBuffer* cb, 573 int size, 574 OopMapSet* oop_maps, 575 int frame_size 576 ) 577 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 578 {} 579 580 581 ExceptionBlob* ExceptionBlob::create( 582 CodeBuffer* cb, 583 OopMapSet* oop_maps, 584 int frame_size) 585 { 586 ExceptionBlob* blob = nullptr; 587 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 588 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 589 { 590 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 591 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 592 } 593 594 trace_new_stub(blob, "ExceptionBlob"); 595 596 return blob; 597 } 598 599 600 #endif // COMPILER2 601 602 603 //---------------------------------------------------------------------------------------------------- 604 // Implementation of SafepointBlob 605 606 SafepointBlob::SafepointBlob( 607 CodeBuffer* cb, 608 int size, 609 OopMapSet* oop_maps, 610 int frame_size 611 ) 612 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 613 {} 614 615 616 SafepointBlob* SafepointBlob::create( 617 CodeBuffer* cb, 618 OopMapSet* oop_maps, 619 int frame_size) 620 { 621 SafepointBlob* blob = nullptr; 622 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 623 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 624 { 625 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 626 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 627 } 628 629 trace_new_stub(blob, "SafepointBlob"); 630 631 return blob; 632 } 633 634 635 //---------------------------------------------------------------------------------------------------- 636 // Verification and printing 637 638 void CodeBlob::print_on(outputStream* st) const { 639 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 640 st->print_cr("Framesize: %d", _frame_size); 641 } 642 643 void CodeBlob::print() const { print_on(tty); } 644 645 void CodeBlob::print_value_on(outputStream* st) const { 646 st->print_cr("[CodeBlob]"); 647 } 648 649 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 650 if (is_buffer_blob()) { 651 // the interpreter is generated into a buffer blob 652 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 653 if (i != nullptr) { 654 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 655 i->print_on(st); 656 return; 657 } 658 if (Interpreter::contains(addr)) { 659 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 660 " (not bytecode specific)", p2i(addr)); 661 return; 662 } 663 // 664 if (AdapterHandlerLibrary::contains(this)) { 665 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 666 AdapterHandlerLibrary::print_handler_on(st, this); 667 } 668 // the stubroutines are generated into a buffer blob 669 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 670 if (d != nullptr) { 671 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 672 d->print_on(st); 673 st->cr(); 674 return; 675 } 676 if (StubRoutines::contains(addr)) { 677 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 678 return; 679 } 680 VtableStub* v = VtableStubs::stub_containing(addr); 681 if (v != nullptr) { 682 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 683 v->print_on(st); 684 st->cr(); 685 return; 686 } 687 } 688 if (is_nmethod()) { 689 nmethod* nm = (nmethod*)this; 690 ResourceMark rm; 691 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 692 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 693 if (verbose) { 694 st->print(" for "); 695 nm->method()->print_value_on(st); 696 } 697 st->cr(); 698 if (verbose && st == tty) { 699 // verbose is only ever true when called from findpc in debug.cpp 700 nm->print_nmethod(true); 701 } else { 702 nm->print(st); 703 } 704 return; 705 } 706 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 707 print_on(st); 708 } 709 710 void RuntimeBlob::verify() { 711 ShouldNotReachHere(); 712 } 713 714 void BufferBlob::verify() { 715 // unimplemented 716 } 717 718 void BufferBlob::print_on(outputStream* st) const { 719 RuntimeBlob::print_on(st); 720 print_value_on(st); 721 } 722 723 void BufferBlob::print_value_on(outputStream* st) const { 724 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 725 } 726 727 void RuntimeStub::verify() { 728 // unimplemented 729 } 730 731 void RuntimeStub::print_on(outputStream* st) const { 732 ttyLocker ttyl; 733 RuntimeBlob::print_on(st); 734 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 735 st->print_cr("%s", name()); 736 Disassembler::decode((RuntimeBlob*)this, st); 737 } 738 739 void RuntimeStub::print_value_on(outputStream* st) const { 740 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 741 } 742 743 void SingletonBlob::verify() { 744 // unimplemented 745 } 746 747 void SingletonBlob::print_on(outputStream* st) const { 748 ttyLocker ttyl; 749 RuntimeBlob::print_on(st); 750 st->print_cr("%s", name()); 751 Disassembler::decode((RuntimeBlob*)this, st); 752 } 753 754 void SingletonBlob::print_value_on(outputStream* st) const { 755 st->print_cr("%s", name()); 756 } 757 758 void DeoptimizationBlob::print_value_on(outputStream* st) const { 759 st->print_cr("Deoptimization (frame not available)"); 760 } 761 762 // Implementation of UpcallStub 763 764 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : 765 RuntimeBlob(name, cb, sizeof(UpcallStub), size, CodeOffsets::frame_never_safe, 0 /* no frame size */, 766 /* oop maps = */ nullptr, /* caller must gc arguments = */ false), 767 _receiver(receiver), 768 _frame_data_offset(frame_data_offset) { 769 CodeCache::commit(this); 770 } 771 772 void* UpcallStub::operator new(size_t s, unsigned size) throw() { 773 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 774 } 775 776 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { 777 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 778 779 UpcallStub* blob = nullptr; 780 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); 781 { 782 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 783 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); 784 } 785 if (blob == nullptr) { 786 return nullptr; // caller must handle this 787 } 788 789 // Track memory usage statistic after releasing CodeCache_lock 790 MemoryService::track_code_cache_memory_usage(); 791 792 trace_new_stub(blob, "UpcallStub"); 793 794 return blob; 795 } 796 797 void UpcallStub::oops_do(OopClosure* f, const frame& frame) { 798 frame_data_for_frame(frame)->old_handles->oops_do(f); 799 } 800 801 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { 802 return &frame_data_for_frame(frame)->jfa; 803 } 804 805 void UpcallStub::free(UpcallStub* blob) { 806 assert(blob != nullptr, "caller must check for nullptr"); 807 JNIHandles::destroy_global(blob->receiver()); 808 RuntimeBlob::free(blob); 809 } 810 811 void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { 812 ShouldNotReachHere(); // caller should never have to gc arguments 813 } 814 815 // Misc. 816 void UpcallStub::verify() { 817 // unimplemented 818 } 819 820 void UpcallStub::print_on(outputStream* st) const { 821 RuntimeBlob::print_on(st); 822 print_value_on(st); 823 Disassembler::decode((RuntimeBlob*)this, st); 824 } 825 826 void UpcallStub::print_value_on(outputStream* st) const { 827 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 828 }