1 /* 2 * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/relocInfo.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "interpreter/bytecode.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/heap.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/forte.hpp" 41 #include "prims/jvmtiExport.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/interfaceSupport.inline.hpp" 44 #include "runtime/javaFrameAnchor.hpp" 45 #include "runtime/jniHandles.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/safepoint.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubCodeGenerator.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/vframe.hpp" 52 #include "services/memoryService.hpp" 53 #include "utilities/align.hpp" 54 #ifdef COMPILER1 55 #include "c1/c1_Runtime1.hpp" 56 #endif 57 58 const char* CodeBlob::compiler_name() const { 59 return compilertype2name(_type); 60 } 61 62 unsigned int CodeBlob::align_code_offset(int offset) { 63 // align the size to CodeEntryAlignment 64 int header_size = (int)CodeHeap::header_size(); 65 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 66 } 67 68 69 // This must be consistent with the CodeBlob constructor's layout actions. 70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 71 unsigned int size = header_size; 72 size += align_up(cb->total_relocation_size(), oopSize); 73 // align the size to CodeEntryAlignment 74 size = align_code_offset(size); 75 size += align_up(cb->total_content_size(), oopSize); 76 size += align_up(cb->total_oop_size(), oopSize); 77 size += align_up(cb->total_metadata_size(), oopSize); 78 return size; 79 } 80 81 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) : 82 _type(type), 83 _size(layout.size()), 84 _header_size(layout.header_size()), 85 _frame_complete_offset(frame_complete_offset), 86 _data_offset(layout.data_offset()), 87 _frame_size(frame_size), 88 _code_begin(layout.code_begin()), 89 _code_end(layout.code_end()), 90 _content_begin(layout.content_begin()), 91 _data_end(layout.data_end()), 92 _relocation_begin(layout.relocation_begin()), 93 _relocation_end(layout.relocation_end()), 94 _oop_maps(oop_maps), 95 _caller_must_gc_arguments(caller_must_gc_arguments), 96 _name(name) 97 { 98 assert(is_aligned(layout.size(), oopSize), "unaligned size"); 99 assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); 100 assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); 101 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 102 #ifdef COMPILER1 103 // probably wrong for tiered 104 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 105 #endif // COMPILER1 106 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 107 } 108 109 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : 110 _type(type), 111 _size(layout.size()), 112 _header_size(layout.header_size()), 113 _frame_complete_offset(frame_complete_offset), 114 _data_offset(layout.data_offset()), 115 _frame_size(frame_size), 116 _code_begin(layout.code_begin()), 117 _code_end(layout.code_end()), 118 _content_begin(layout.content_begin()), 119 _data_end(layout.data_end()), 120 _relocation_begin(layout.relocation_begin()), 121 _relocation_end(layout.relocation_end()), 122 _caller_must_gc_arguments(caller_must_gc_arguments), 123 _name(name) 124 { 125 assert(is_aligned(_size, oopSize), "unaligned size"); 126 assert(is_aligned(_header_size, oopSize), "unaligned size"); 127 assert(_data_offset <= _size, "codeBlob is too small"); 128 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 129 130 set_oop_maps(oop_maps); 131 #ifdef COMPILER1 132 // probably wrong for tiered 133 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 134 #endif // COMPILER1 135 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 136 } 137 138 139 // Creates a simple CodeBlob. Sets up the size of the different regions. 140 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) 141 : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */) 142 { 143 assert(is_aligned(locs_size, oopSize), "unaligned size"); 144 } 145 146 147 // Creates a RuntimeBlob from a CodeBuffer 148 // and copy code and relocation info. 149 RuntimeBlob::RuntimeBlob( 150 const char* name, 151 CodeBuffer* cb, 152 int header_size, 153 int size, 154 int frame_complete, 155 int frame_size, 156 OopMapSet* oop_maps, 157 bool caller_must_gc_arguments 158 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 159 cb->copy_code_and_locs_to(this); 160 } 161 162 void CodeBlob::flush() { 163 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); 164 _oop_maps = NULL; 165 NOT_PRODUCT(_asm_remarks.clear()); 166 NOT_PRODUCT(_dbg_strings.clear()); 167 } 168 169 void CodeBlob::set_oop_maps(OopMapSet* p) { 170 // Danger Will Robinson! This method allocates a big 171 // chunk of memory, its your job to free it. 172 if (p != NULL) { 173 _oop_maps = ImmutableOopMapSet::build_from(p); 174 } else { 175 _oop_maps = NULL; 176 } 177 } 178 179 180 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 181 // Do not hold the CodeCache lock during name formatting. 182 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 183 184 if (stub != NULL) { 185 char stub_id[256]; 186 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 187 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 188 if (PrintStubCode) { 189 ttyLocker ttyl; 190 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 191 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); 192 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 193 NOT_PRODUCT(COMMA &stub->asm_remarks())); 194 if ((stub->oop_maps() != NULL) && AbstractDisassembler::show_structs()) { 195 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 196 stub->oop_maps()->print(); 197 } 198 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 199 tty->cr(); 200 } 201 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 202 203 if (JvmtiExport::should_post_dynamic_code_generated()) { 204 const char* stub_name = name2; 205 if (name2[0] == '\0') stub_name = name1; 206 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 207 } 208 } 209 210 // Track memory usage statistic after releasing CodeCache_lock 211 MemoryService::track_code_cache_memory_usage(); 212 } 213 214 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) { 215 assert(_oop_maps != NULL, "nope"); 216 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 217 } 218 219 void CodeBlob::print_code() { 220 ResourceMark m; 221 Disassembler::decode(this, tty); 222 } 223 224 //---------------------------------------------------------------------------------------------------- 225 // Implementation of BufferBlob 226 227 228 BufferBlob::BufferBlob(const char* name, int size) 229 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 230 {} 231 232 BufferBlob* BufferBlob::create(const char* name, int buffer_size) { 233 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 234 235 BufferBlob* blob = NULL; 236 unsigned int size = sizeof(BufferBlob); 237 // align the size to CodeEntryAlignment 238 size = CodeBlob::align_code_offset(size); 239 size += align_up(buffer_size, oopSize); 240 assert(name != NULL, "must provide a name"); 241 { 242 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 243 blob = new (size) BufferBlob(name, size); 244 } 245 // Track memory usage statistic after releasing CodeCache_lock 246 MemoryService::track_code_cache_memory_usage(); 247 248 return blob; 249 } 250 251 252 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) 253 : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) 254 {} 255 256 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 257 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 258 259 BufferBlob* blob = NULL; 260 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 261 assert(name != NULL, "must provide a name"); 262 { 263 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 264 blob = new (size) BufferBlob(name, size, cb); 265 } 266 // Track memory usage statistic after releasing CodeCache_lock 267 MemoryService::track_code_cache_memory_usage(); 268 269 return blob; 270 } 271 272 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 273 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 274 } 275 276 void BufferBlob::free(BufferBlob *blob) { 277 assert(blob != NULL, "caller must check for NULL"); 278 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 279 blob->flush(); 280 { 281 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 282 CodeCache::free((RuntimeBlob*)blob); 283 } 284 // Track memory usage statistic after releasing CodeCache_lock 285 MemoryService::track_code_cache_memory_usage(); 286 } 287 288 289 //---------------------------------------------------------------------------------------------------- 290 // Implementation of AdapterBlob 291 292 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : 293 BufferBlob("I2C/C2I adapters", size, cb) { 294 CodeCache::commit(this); 295 } 296 297 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { 298 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 299 300 AdapterBlob* blob = NULL; 301 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 302 { 303 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 304 blob = new (size) AdapterBlob(size, cb); 305 } 306 // Track memory usage statistic after releasing CodeCache_lock 307 MemoryService::track_code_cache_memory_usage(); 308 309 return blob; 310 } 311 312 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 313 // Handling of allocation failure stops compilation and prints a bunch of 314 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 315 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 316 // this context as we hold the CompiledICLocker. So we just don't handle code 317 // cache exhaustion here; we leave that for a later allocation that does not 318 // hold the CompiledICLocker. 319 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 320 } 321 322 VtableBlob::VtableBlob(const char* name, int size) : 323 BufferBlob(name, size) { 324 } 325 326 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 327 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 328 329 VtableBlob* blob = NULL; 330 unsigned int size = sizeof(VtableBlob); 331 // align the size to CodeEntryAlignment 332 size = align_code_offset(size); 333 size += align_up(buffer_size, oopSize); 334 assert(name != NULL, "must provide a name"); 335 { 336 if (!CodeCache_lock->try_lock()) { 337 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 338 // IC transition to megamorphic, for which this stub will be needed. It is better to 339 // bail out the transition, and wait for a more opportune moment. Not only is it not 340 // worth waiting for the lock blockingly for the megamorphic transition, it might 341 // also result in a deadlock to blockingly wait, when concurrent class unloading is 342 // performed. At this point in time, the CompiledICLocker is taken, so we are not 343 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 344 // consistently taken in the opposite order. Bailing out results in an IC transition to 345 // the clean state instead, which will cause subsequent calls to retry the transitioning 346 // eventually. 347 return NULL; 348 } 349 blob = new (size) VtableBlob(name, size); 350 CodeCache_lock->unlock(); 351 } 352 // Track memory usage statistic after releasing CodeCache_lock 353 MemoryService::track_code_cache_memory_usage(); 354 355 return blob; 356 } 357 358 //---------------------------------------------------------------------------------------------------- 359 // Implementation of MethodHandlesAdapterBlob 360 361 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 362 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 363 364 MethodHandlesAdapterBlob* blob = NULL; 365 unsigned int size = sizeof(MethodHandlesAdapterBlob); 366 // align the size to CodeEntryAlignment 367 size = CodeBlob::align_code_offset(size); 368 size += align_up(buffer_size, oopSize); 369 { 370 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 371 blob = new (size) MethodHandlesAdapterBlob(size); 372 if (blob == NULL) { 373 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 374 } 375 } 376 // Track memory usage statistic after releasing CodeCache_lock 377 MemoryService::track_code_cache_memory_usage(); 378 379 return blob; 380 } 381 382 //---------------------------------------------------------------------------------------------------- 383 // Implementation of RuntimeStub 384 385 RuntimeStub::RuntimeStub( 386 const char* name, 387 CodeBuffer* cb, 388 int size, 389 int frame_complete, 390 int frame_size, 391 OopMapSet* oop_maps, 392 bool caller_must_gc_arguments 393 ) 394 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 395 { 396 } 397 398 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 399 CodeBuffer* cb, 400 int frame_complete, 401 int frame_size, 402 OopMapSet* oop_maps, 403 bool caller_must_gc_arguments) 404 { 405 RuntimeStub* stub = NULL; 406 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 407 { 408 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 409 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 410 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 411 } 412 413 trace_new_stub(stub, "RuntimeStub - ", stub_name); 414 415 return stub; 416 } 417 418 419 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 420 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 421 if (!p) fatal("Initial size of CodeCache is too small"); 422 return p; 423 } 424 425 // operator new shared by all singletons: 426 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 427 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 428 if (!p) fatal("Initial size of CodeCache is too small"); 429 return p; 430 } 431 432 433 //---------------------------------------------------------------------------------------------------- 434 // Implementation of DeoptimizationBlob 435 436 DeoptimizationBlob::DeoptimizationBlob( 437 CodeBuffer* cb, 438 int size, 439 OopMapSet* oop_maps, 440 int unpack_offset, 441 int unpack_with_exception_offset, 442 int unpack_with_reexecution_offset, 443 int frame_size 444 ) 445 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 446 { 447 _unpack_offset = unpack_offset; 448 _unpack_with_exception = unpack_with_exception_offset; 449 _unpack_with_reexecution = unpack_with_reexecution_offset; 450 #ifdef COMPILER1 451 _unpack_with_exception_in_tls = -1; 452 #endif 453 } 454 455 456 DeoptimizationBlob* DeoptimizationBlob::create( 457 CodeBuffer* cb, 458 OopMapSet* oop_maps, 459 int unpack_offset, 460 int unpack_with_exception_offset, 461 int unpack_with_reexecution_offset, 462 int frame_size) 463 { 464 DeoptimizationBlob* blob = NULL; 465 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 466 { 467 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 468 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 469 blob = new (size) DeoptimizationBlob(cb, 470 size, 471 oop_maps, 472 unpack_offset, 473 unpack_with_exception_offset, 474 unpack_with_reexecution_offset, 475 frame_size); 476 } 477 478 trace_new_stub(blob, "DeoptimizationBlob"); 479 480 return blob; 481 } 482 483 484 //---------------------------------------------------------------------------------------------------- 485 // Implementation of UncommonTrapBlob 486 487 #ifdef COMPILER2 488 UncommonTrapBlob::UncommonTrapBlob( 489 CodeBuffer* cb, 490 int size, 491 OopMapSet* oop_maps, 492 int frame_size 493 ) 494 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 495 {} 496 497 498 UncommonTrapBlob* UncommonTrapBlob::create( 499 CodeBuffer* cb, 500 OopMapSet* oop_maps, 501 int frame_size) 502 { 503 UncommonTrapBlob* blob = NULL; 504 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 505 { 506 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 507 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 508 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 509 } 510 511 trace_new_stub(blob, "UncommonTrapBlob"); 512 513 return blob; 514 } 515 516 517 #endif // COMPILER2 518 519 520 //---------------------------------------------------------------------------------------------------- 521 // Implementation of ExceptionBlob 522 523 #ifdef COMPILER2 524 ExceptionBlob::ExceptionBlob( 525 CodeBuffer* cb, 526 int size, 527 OopMapSet* oop_maps, 528 int frame_size 529 ) 530 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 531 {} 532 533 534 ExceptionBlob* ExceptionBlob::create( 535 CodeBuffer* cb, 536 OopMapSet* oop_maps, 537 int frame_size) 538 { 539 ExceptionBlob* blob = NULL; 540 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 541 { 542 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 543 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 544 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 545 } 546 547 trace_new_stub(blob, "ExceptionBlob"); 548 549 return blob; 550 } 551 552 553 #endif // COMPILER2 554 555 556 //---------------------------------------------------------------------------------------------------- 557 // Implementation of SafepointBlob 558 559 SafepointBlob::SafepointBlob( 560 CodeBuffer* cb, 561 int size, 562 OopMapSet* oop_maps, 563 int frame_size 564 ) 565 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 566 {} 567 568 569 SafepointBlob* SafepointBlob::create( 570 CodeBuffer* cb, 571 OopMapSet* oop_maps, 572 int frame_size) 573 { 574 SafepointBlob* blob = NULL; 575 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 576 { 577 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 578 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 579 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 580 } 581 582 trace_new_stub(blob, "SafepointBlob"); 583 584 return blob; 585 } 586 587 588 //---------------------------------------------------------------------------------------------------- 589 // Verification and printing 590 591 void CodeBlob::print_on(outputStream* st) const { 592 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 593 st->print_cr("Framesize: %d", _frame_size); 594 } 595 596 void CodeBlob::print() const { print_on(tty); } 597 598 void CodeBlob::print_value_on(outputStream* st) const { 599 st->print_cr("[CodeBlob]"); 600 } 601 602 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 603 if (is_buffer_blob()) { 604 // the interpreter is generated into a buffer blob 605 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 606 if (i != NULL) { 607 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 608 i->print_on(st); 609 return; 610 } 611 if (Interpreter::contains(addr)) { 612 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 613 " (not bytecode specific)", p2i(addr)); 614 return; 615 } 616 // 617 if (AdapterHandlerLibrary::contains(this)) { 618 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 619 AdapterHandlerLibrary::print_handler_on(st, this); 620 } 621 // the stubroutines are generated into a buffer blob 622 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 623 if (d != NULL) { 624 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 625 d->print_on(st); 626 st->cr(); 627 return; 628 } 629 if (StubRoutines::contains(addr)) { 630 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 631 return; 632 } 633 // the InlineCacheBuffer is using stubs generated into a buffer blob 634 if (InlineCacheBuffer::contains(addr)) { 635 st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr)); 636 return; 637 } 638 VtableStub* v = VtableStubs::stub_containing(addr); 639 if (v != NULL) { 640 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 641 v->print_on(st); 642 st->cr(); 643 return; 644 } 645 } 646 if (is_nmethod()) { 647 nmethod* nm = (nmethod*)this; 648 ResourceMark rm; 649 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 650 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 651 if (verbose) { 652 st->print(" for "); 653 nm->method()->print_value_on(st); 654 } 655 st->cr(); 656 if (verbose && st == tty) { 657 // verbose is only ever true when called from findpc in debug.cpp 658 nm->print_nmethod(true); 659 } else { 660 nm->print(st); 661 } 662 return; 663 } 664 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 665 print_on(st); 666 } 667 668 void RuntimeBlob::verify() { 669 ShouldNotReachHere(); 670 } 671 672 void BufferBlob::verify() { 673 // unimplemented 674 } 675 676 void BufferBlob::print_on(outputStream* st) const { 677 RuntimeBlob::print_on(st); 678 print_value_on(st); 679 } 680 681 void BufferBlob::print_value_on(outputStream* st) const { 682 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 683 } 684 685 void RuntimeStub::verify() { 686 // unimplemented 687 } 688 689 void RuntimeStub::print_on(outputStream* st) const { 690 ttyLocker ttyl; 691 RuntimeBlob::print_on(st); 692 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 693 st->print_cr("%s", name()); 694 Disassembler::decode((RuntimeBlob*)this, st); 695 } 696 697 void RuntimeStub::print_value_on(outputStream* st) const { 698 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 699 } 700 701 void SingletonBlob::verify() { 702 // unimplemented 703 } 704 705 void SingletonBlob::print_on(outputStream* st) const { 706 ttyLocker ttyl; 707 RuntimeBlob::print_on(st); 708 st->print_cr("%s", name()); 709 Disassembler::decode((RuntimeBlob*)this, st); 710 } 711 712 void SingletonBlob::print_value_on(outputStream* st) const { 713 st->print_cr("%s", name()); 714 } 715 716 void DeoptimizationBlob::print_value_on(outputStream* st) const { 717 st->print_cr("Deoptimization (frame not available)"); 718 } 719 720 // Implementation of OptimizedEntryBlob 721 722 OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset, 723 jobject receiver, ByteSize frame_data_offset) : 724 BufferBlob(name, size, cb), 725 _exception_handler_offset(exception_handler_offset), 726 _receiver(receiver), 727 _frame_data_offset(frame_data_offset) { 728 CodeCache::commit(this); 729 } 730 731 OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset, 732 jobject receiver, ByteSize frame_data_offset) { 733 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 734 735 OptimizedEntryBlob* blob = nullptr; 736 unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob)); 737 { 738 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 739 blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, frame_data_offset); 740 } 741 // Track memory usage statistic after releasing CodeCache_lock 742 MemoryService::track_code_cache_memory_usage(); 743 744 return blob; 745 } 746 747 void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) { 748 frame_data_for_frame(frame)->old_handles->oops_do(f); 749 } 750 751 JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const { 752 return &frame_data_for_frame(frame)->jfa; 753 }