1 /* 2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/relocInfo.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "interpreter/bytecode.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "jvm.h" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/heap.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/forte.hpp" 41 #include "prims/jvmtiExport.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/interfaceSupport.inline.hpp" 44 #include "runtime/javaFrameAnchor.hpp" 45 #include "runtime/jniHandles.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/safepoint.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubCodeGenerator.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/vframe.hpp" 52 #include "services/memoryService.hpp" 53 #include "utilities/align.hpp" 54 #ifdef COMPILER1 55 #include "c1/c1_Runtime1.hpp" 56 #endif 57 58 const char* CodeBlob::compiler_name() const { 59 return compilertype2name(_type); 60 } 61 62 unsigned int CodeBlob::align_code_offset(int offset) { 63 // align the size to CodeEntryAlignment 64 int header_size = (int)CodeHeap::header_size(); 65 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 66 } 67 68 69 // This must be consistent with the CodeBlob constructor's layout actions. 70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 71 unsigned int size = header_size; 72 size += align_up(cb->total_relocation_size(), oopSize); 73 // align the size to CodeEntryAlignment 74 size = align_code_offset(size); 75 size += align_up(cb->total_content_size(), oopSize); 76 size += align_up(cb->total_oop_size(), oopSize); 77 size += align_up(cb->total_metadata_size(), oopSize); 78 return size; 79 } 80 81 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : 82 _code_begin(layout.code_begin()), 83 _code_end(layout.code_end()), 84 _content_begin(layout.content_begin()), 85 _data_end(layout.data_end()), 86 _relocation_begin(layout.relocation_begin()), 87 _relocation_end(layout.relocation_end()), 88 _oop_maps(oop_maps), 89 _name(name), 90 _size(layout.size()), 91 _header_size(layout.header_size()), 92 _frame_complete_offset(frame_complete_offset), 93 _data_offset(layout.data_offset()), 94 _frame_size(frame_size), 95 _caller_must_gc_arguments(caller_must_gc_arguments), 96 _is_compiled(compiled), 97 _type(type) 98 { 99 assert(is_aligned(layout.size(), oopSize), "unaligned size"); 100 assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); 101 assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); 102 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 103 #ifdef COMPILER1 104 // probably wrong for tiered 105 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 106 #endif // COMPILER1 107 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 108 } 109 110 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : 111 _code_begin(layout.code_begin()), 112 _code_end(layout.code_end()), 113 _content_begin(layout.content_begin()), 114 _data_end(layout.data_end()), 115 _relocation_begin(layout.relocation_begin()), 116 _relocation_end(layout.relocation_end()), 117 _name(name), 118 _size(layout.size()), 119 _header_size(layout.header_size()), 120 _frame_complete_offset(frame_complete_offset), 121 _data_offset(layout.data_offset()), 122 _frame_size(frame_size), 123 _caller_must_gc_arguments(caller_must_gc_arguments), 124 _is_compiled(compiled), 125 _type(type) 126 { 127 assert(is_aligned(_size, oopSize), "unaligned size"); 128 assert(is_aligned(_header_size, oopSize), "unaligned size"); 129 assert(_data_offset <= _size, "codeBlob is too small"); 130 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 131 132 set_oop_maps(oop_maps); 133 #ifdef COMPILER1 134 // probably wrong for tiered 135 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 136 #endif // COMPILER1 137 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 138 } 139 140 141 // Creates a simple CodeBlob. Sets up the size of the different regions. 142 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) 143 : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, nullptr, false /* caller_must_gc_arguments */) 144 { 145 assert(is_aligned(locs_size, oopSize), "unaligned size"); 146 } 147 148 149 // Creates a RuntimeBlob from a CodeBuffer 150 // and copy code and relocation info. 151 RuntimeBlob::RuntimeBlob( 152 const char* name, 153 CodeBuffer* cb, 154 int header_size, 155 int size, 156 int frame_complete, 157 int frame_size, 158 OopMapSet* oop_maps, 159 bool caller_must_gc_arguments 160 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 161 cb->copy_code_and_locs_to(this); 162 } 163 164 void RuntimeBlob::free(RuntimeBlob* blob) { 165 assert(blob != nullptr, "caller must check for nullptr"); 166 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 167 blob->flush(); 168 { 169 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 170 CodeCache::free(blob); 171 } 172 // Track memory usage statistic after releasing CodeCache_lock 173 MemoryService::track_code_cache_memory_usage(); 174 } 175 176 void CodeBlob::flush() { 177 if (_oop_maps != nullptr) { 178 delete _oop_maps; 179 _oop_maps = nullptr; 180 } 181 NOT_PRODUCT(_asm_remarks.clear()); 182 NOT_PRODUCT(_dbg_strings.clear()); 183 } 184 185 void CodeBlob::set_oop_maps(OopMapSet* p) { 186 // Danger Will Robinson! This method allocates a big 187 // chunk of memory, its your job to free it. 188 if (p != nullptr) { 189 _oop_maps = ImmutableOopMapSet::build_from(p); 190 } else { 191 _oop_maps = nullptr; 192 } 193 } 194 195 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 196 // Do not hold the CodeCache lock during name formatting. 197 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 198 199 if (stub != nullptr && (PrintStubCode || 200 Forte::is_enabled() || 201 JvmtiExport::should_post_dynamic_code_generated())) { 202 char stub_id[256]; 203 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 204 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 205 if (PrintStubCode) { 206 ttyLocker ttyl; 207 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 208 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)", 209 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size()); 210 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 211 NOT_PRODUCT(COMMA &stub->asm_remarks())); 212 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) { 213 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 214 stub->oop_maps()->print(); 215 } 216 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 217 tty->cr(); 218 } 219 if (Forte::is_enabled()) { 220 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 221 } 222 223 if (JvmtiExport::should_post_dynamic_code_generated()) { 224 const char* stub_name = name2; 225 if (name2[0] == '\0') stub_name = name1; 226 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 227 } 228 } 229 230 // Track memory usage statistic after releasing CodeCache_lock 231 MemoryService::track_code_cache_memory_usage(); 232 } 233 234 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { 235 assert(_oop_maps != nullptr, "nope"); 236 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 237 } 238 239 void CodeBlob::print_code() { 240 ResourceMark m; 241 Disassembler::decode(this, tty); 242 } 243 244 //---------------------------------------------------------------------------------------------------- 245 // Implementation of BufferBlob 246 247 248 BufferBlob::BufferBlob(const char* name, int size) 249 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 250 {} 251 252 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { 253 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 254 255 BufferBlob* blob = nullptr; 256 unsigned int size = sizeof(BufferBlob); 257 // align the size to CodeEntryAlignment 258 size = CodeBlob::align_code_offset(size); 259 size += align_up(buffer_size, oopSize); 260 assert(name != nullptr, "must provide a name"); 261 { 262 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 263 blob = new (size) BufferBlob(name, size); 264 } 265 // Track memory usage statistic after releasing CodeCache_lock 266 MemoryService::track_code_cache_memory_usage(); 267 268 return blob; 269 } 270 271 272 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) 273 : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, nullptr) 274 {} 275 276 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 277 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 278 279 BufferBlob* blob = nullptr; 280 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 281 assert(name != nullptr, "must provide a name"); 282 { 283 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 284 blob = new (size) BufferBlob(name, size, cb); 285 } 286 // Track memory usage statistic after releasing CodeCache_lock 287 MemoryService::track_code_cache_memory_usage(); 288 289 return blob; 290 } 291 292 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 293 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 294 } 295 296 void BufferBlob::free(BufferBlob *blob) { 297 RuntimeBlob::free(blob); 298 } 299 300 301 //---------------------------------------------------------------------------------------------------- 302 // Implementation of AdapterBlob 303 304 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : 305 BufferBlob("I2C/C2I adapters", size, cb) { 306 CodeCache::commit(this); 307 } 308 309 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { 310 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 311 312 CodeCache::gc_on_allocation(); 313 314 AdapterBlob* blob = nullptr; 315 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 316 { 317 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 318 blob = new (size) AdapterBlob(size, cb); 319 } 320 // Track memory usage statistic after releasing CodeCache_lock 321 MemoryService::track_code_cache_memory_usage(); 322 323 return blob; 324 } 325 326 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 327 // Handling of allocation failure stops compilation and prints a bunch of 328 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 329 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 330 // this context as we hold the CompiledICLocker. So we just don't handle code 331 // cache exhaustion here; we leave that for a later allocation that does not 332 // hold the CompiledICLocker. 333 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 334 } 335 336 VtableBlob::VtableBlob(const char* name, int size) : 337 BufferBlob(name, size) { 338 } 339 340 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 341 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 342 343 VtableBlob* blob = nullptr; 344 unsigned int size = sizeof(VtableBlob); 345 // align the size to CodeEntryAlignment 346 size = align_code_offset(size); 347 size += align_up(buffer_size, oopSize); 348 assert(name != nullptr, "must provide a name"); 349 { 350 if (!CodeCache_lock->try_lock()) { 351 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 352 // IC transition to megamorphic, for which this stub will be needed. It is better to 353 // bail out the transition, and wait for a more opportune moment. Not only is it not 354 // worth waiting for the lock blockingly for the megamorphic transition, it might 355 // also result in a deadlock to blockingly wait, when concurrent class unloading is 356 // performed. At this point in time, the CompiledICLocker is taken, so we are not 357 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 358 // consistently taken in the opposite order. Bailing out results in an IC transition to 359 // the clean state instead, which will cause subsequent calls to retry the transitioning 360 // eventually. 361 return nullptr; 362 } 363 blob = new (size) VtableBlob(name, size); 364 CodeCache_lock->unlock(); 365 } 366 // Track memory usage statistic after releasing CodeCache_lock 367 MemoryService::track_code_cache_memory_usage(); 368 369 return blob; 370 } 371 372 //---------------------------------------------------------------------------------------------------- 373 // Implementation of MethodHandlesAdapterBlob 374 375 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 376 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 377 378 MethodHandlesAdapterBlob* blob = nullptr; 379 unsigned int size = sizeof(MethodHandlesAdapterBlob); 380 // align the size to CodeEntryAlignment 381 size = CodeBlob::align_code_offset(size); 382 size += align_up(buffer_size, oopSize); 383 { 384 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 385 blob = new (size) MethodHandlesAdapterBlob(size); 386 if (blob == nullptr) { 387 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 388 } 389 } 390 // Track memory usage statistic after releasing CodeCache_lock 391 MemoryService::track_code_cache_memory_usage(); 392 393 return blob; 394 } 395 396 //---------------------------------------------------------------------------------------------------- 397 // Implementation of RuntimeStub 398 399 RuntimeStub::RuntimeStub( 400 const char* name, 401 CodeBuffer* cb, 402 int size, 403 int frame_complete, 404 int frame_size, 405 OopMapSet* oop_maps, 406 bool caller_must_gc_arguments 407 ) 408 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 409 { 410 } 411 412 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 413 CodeBuffer* cb, 414 int frame_complete, 415 int frame_size, 416 OopMapSet* oop_maps, 417 bool caller_must_gc_arguments, 418 bool alloc_fail_is_fatal) 419 { 420 RuntimeStub* stub = nullptr; 421 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 422 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 423 { 424 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 425 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 426 if (stub == nullptr) { 427 if (!alloc_fail_is_fatal) { 428 return nullptr; 429 } 430 fatal("Initial size of CodeCache is too small"); 431 } 432 } 433 434 trace_new_stub(stub, "RuntimeStub - ", stub_name); 435 436 return stub; 437 } 438 439 440 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 441 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 442 } 443 444 // operator new shared by all singletons: 445 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 446 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 447 if (!p) fatal("Initial size of CodeCache is too small"); 448 return p; 449 } 450 451 452 //---------------------------------------------------------------------------------------------------- 453 // Implementation of DeoptimizationBlob 454 455 DeoptimizationBlob::DeoptimizationBlob( 456 CodeBuffer* cb, 457 int size, 458 OopMapSet* oop_maps, 459 int unpack_offset, 460 int unpack_with_exception_offset, 461 int unpack_with_reexecution_offset, 462 int frame_size 463 ) 464 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 465 { 466 _unpack_offset = unpack_offset; 467 _unpack_with_exception = unpack_with_exception_offset; 468 _unpack_with_reexecution = unpack_with_reexecution_offset; 469 #ifdef COMPILER1 470 _unpack_with_exception_in_tls = -1; 471 #endif 472 } 473 474 475 DeoptimizationBlob* DeoptimizationBlob::create( 476 CodeBuffer* cb, 477 OopMapSet* oop_maps, 478 int unpack_offset, 479 int unpack_with_exception_offset, 480 int unpack_with_reexecution_offset, 481 int frame_size) 482 { 483 DeoptimizationBlob* blob = nullptr; 484 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 485 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 486 { 487 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 488 blob = new (size) DeoptimizationBlob(cb, 489 size, 490 oop_maps, 491 unpack_offset, 492 unpack_with_exception_offset, 493 unpack_with_reexecution_offset, 494 frame_size); 495 } 496 497 trace_new_stub(blob, "DeoptimizationBlob"); 498 499 return blob; 500 } 501 502 503 //---------------------------------------------------------------------------------------------------- 504 // Implementation of UncommonTrapBlob 505 506 #ifdef COMPILER2 507 UncommonTrapBlob::UncommonTrapBlob( 508 CodeBuffer* cb, 509 int size, 510 OopMapSet* oop_maps, 511 int frame_size 512 ) 513 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 514 {} 515 516 517 UncommonTrapBlob* UncommonTrapBlob::create( 518 CodeBuffer* cb, 519 OopMapSet* oop_maps, 520 int frame_size) 521 { 522 UncommonTrapBlob* blob = nullptr; 523 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 524 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 525 { 526 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 527 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 528 } 529 530 trace_new_stub(blob, "UncommonTrapBlob"); 531 532 return blob; 533 } 534 535 536 #endif // COMPILER2 537 538 539 //---------------------------------------------------------------------------------------------------- 540 // Implementation of ExceptionBlob 541 542 #ifdef COMPILER2 543 ExceptionBlob::ExceptionBlob( 544 CodeBuffer* cb, 545 int size, 546 OopMapSet* oop_maps, 547 int frame_size 548 ) 549 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 550 {} 551 552 553 ExceptionBlob* ExceptionBlob::create( 554 CodeBuffer* cb, 555 OopMapSet* oop_maps, 556 int frame_size) 557 { 558 ExceptionBlob* blob = nullptr; 559 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 560 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 561 { 562 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 563 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 564 } 565 566 trace_new_stub(blob, "ExceptionBlob"); 567 568 return blob; 569 } 570 571 572 #endif // COMPILER2 573 574 575 //---------------------------------------------------------------------------------------------------- 576 // Implementation of SafepointBlob 577 578 SafepointBlob::SafepointBlob( 579 CodeBuffer* cb, 580 int size, 581 OopMapSet* oop_maps, 582 int frame_size 583 ) 584 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 585 {} 586 587 588 SafepointBlob* SafepointBlob::create( 589 CodeBuffer* cb, 590 OopMapSet* oop_maps, 591 int frame_size) 592 { 593 SafepointBlob* blob = nullptr; 594 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 595 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 596 { 597 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 598 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 599 } 600 601 trace_new_stub(blob, "SafepointBlob"); 602 603 return blob; 604 } 605 606 607 //---------------------------------------------------------------------------------------------------- 608 // Verification and printing 609 610 void CodeBlob::print_on(outputStream* st) const { 611 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 612 st->print_cr("Framesize: %d", _frame_size); 613 } 614 615 void CodeBlob::print() const { print_on(tty); } 616 617 void CodeBlob::print_value_on(outputStream* st) const { 618 st->print_cr("[CodeBlob]"); 619 } 620 621 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 622 if (is_buffer_blob()) { 623 // the interpreter is generated into a buffer blob 624 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 625 if (i != nullptr) { 626 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 627 i->print_on(st); 628 return; 629 } 630 if (Interpreter::contains(addr)) { 631 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 632 " (not bytecode specific)", p2i(addr)); 633 return; 634 } 635 // 636 if (AdapterHandlerLibrary::contains(this)) { 637 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 638 AdapterHandlerLibrary::print_handler_on(st, this); 639 } 640 // the stubroutines are generated into a buffer blob 641 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 642 if (d != nullptr) { 643 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 644 d->print_on(st); 645 st->cr(); 646 return; 647 } 648 if (StubRoutines::contains(addr)) { 649 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 650 return; 651 } 652 // the InlineCacheBuffer is using stubs generated into a buffer blob 653 if (InlineCacheBuffer::contains(addr)) { 654 st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr)); 655 return; 656 } 657 VtableStub* v = VtableStubs::stub_containing(addr); 658 if (v != nullptr) { 659 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 660 v->print_on(st); 661 st->cr(); 662 return; 663 } 664 } 665 if (is_nmethod()) { 666 nmethod* nm = (nmethod*)this; 667 ResourceMark rm; 668 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 669 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 670 if (verbose) { 671 st->print(" for "); 672 nm->method()->print_value_on(st); 673 } 674 st->cr(); 675 if (verbose && st == tty) { 676 // verbose is only ever true when called from findpc in debug.cpp 677 nm->print_nmethod(true); 678 } else { 679 nm->print(st); 680 } 681 return; 682 } 683 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 684 print_on(st); 685 } 686 687 void RuntimeBlob::verify() { 688 ShouldNotReachHere(); 689 } 690 691 void BufferBlob::verify() { 692 // unimplemented 693 } 694 695 void BufferBlob::print_on(outputStream* st) const { 696 RuntimeBlob::print_on(st); 697 print_value_on(st); 698 } 699 700 void BufferBlob::print_value_on(outputStream* st) const { 701 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 702 } 703 704 void RuntimeStub::verify() { 705 // unimplemented 706 } 707 708 void RuntimeStub::print_on(outputStream* st) const { 709 ttyLocker ttyl; 710 RuntimeBlob::print_on(st); 711 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 712 st->print_cr("%s", name()); 713 Disassembler::decode((RuntimeBlob*)this, st); 714 } 715 716 void RuntimeStub::print_value_on(outputStream* st) const { 717 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 718 } 719 720 void SingletonBlob::verify() { 721 // unimplemented 722 } 723 724 void SingletonBlob::print_on(outputStream* st) const { 725 ttyLocker ttyl; 726 RuntimeBlob::print_on(st); 727 st->print_cr("%s", name()); 728 Disassembler::decode((RuntimeBlob*)this, st); 729 } 730 731 void SingletonBlob::print_value_on(outputStream* st) const { 732 st->print_cr("%s", name()); 733 } 734 735 void DeoptimizationBlob::print_value_on(outputStream* st) const { 736 st->print_cr("Deoptimization (frame not available)"); 737 } 738 739 // Implementation of UpcallStub 740 741 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : 742 RuntimeBlob(name, cb, sizeof(UpcallStub), size, CodeOffsets::frame_never_safe, 0 /* no frame size */, 743 /* oop maps = */ nullptr, /* caller must gc arguments = */ false), 744 _receiver(receiver), 745 _frame_data_offset(frame_data_offset) { 746 CodeCache::commit(this); 747 } 748 749 void* UpcallStub::operator new(size_t s, unsigned size) throw() { 750 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 751 } 752 753 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { 754 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 755 756 UpcallStub* blob = nullptr; 757 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); 758 { 759 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 760 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); 761 } 762 // Track memory usage statistic after releasing CodeCache_lock 763 MemoryService::track_code_cache_memory_usage(); 764 765 trace_new_stub(blob, "UpcallStub"); 766 767 return blob; 768 } 769 770 void UpcallStub::oops_do(OopClosure* f, const frame& frame) { 771 frame_data_for_frame(frame)->old_handles->oops_do(f); 772 } 773 774 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { 775 return &frame_data_for_frame(frame)->jfa; 776 } 777 778 void UpcallStub::free(UpcallStub* blob) { 779 assert(blob != nullptr, "caller must check for nullptr"); 780 JNIHandles::destroy_global(blob->receiver()); 781 RuntimeBlob::free(blob); 782 } 783 784 void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { 785 ShouldNotReachHere(); // caller should never have to gc arguments 786 } 787 788 // Misc. 789 void UpcallStub::verify() { 790 // unimplemented 791 } 792 793 void UpcallStub::print_on(outputStream* st) const { 794 RuntimeBlob::print_on(st); 795 print_value_on(st); 796 Disassembler::decode((RuntimeBlob*)this, st); 797 } 798 799 void UpcallStub::print_value_on(outputStream* st) const { 800 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 801 }