1 /* 2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/relocInfo.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "interpreter/bytecode.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "jvm.h" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/heap.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/forte.hpp" 41 #include "prims/jvmtiExport.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/interfaceSupport.inline.hpp" 44 #include "runtime/javaFrameAnchor.hpp" 45 #include "runtime/jniHandles.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/safepoint.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubCodeGenerator.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/vframe.hpp" 52 #include "services/memoryService.hpp" 53 #include "utilities/align.hpp" 54 #ifdef COMPILER1 55 #include "c1/c1_Runtime1.hpp" 56 #endif 57 58 const char* CodeBlob::compiler_name() const { 59 return compilertype2name(_type); 60 } 61 62 unsigned int CodeBlob::align_code_offset(int offset) { 63 // align the size to CodeEntryAlignment 64 int header_size = (int)CodeHeap::header_size(); 65 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 66 } 67 68 69 // This must be consistent with the CodeBlob constructor's layout actions. 70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 71 unsigned int size = header_size; 72 size += align_up(cb->total_relocation_size(), oopSize); 73 // align the size to CodeEntryAlignment 74 size = align_code_offset(size); 75 size += align_up(cb->total_content_size(), oopSize); 76 size += align_up(cb->total_oop_size(), oopSize); 77 size += align_up(cb->total_metadata_size(), oopSize); 78 return size; 79 } 80 81 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : 82 _code_begin(layout.code_begin()), 83 _code_end(layout.code_end()), 84 _content_begin(layout.content_begin()), 85 _data_end(layout.data_end()), 86 _relocation_begin(layout.relocation_begin()), 87 _relocation_end(layout.relocation_end()), 88 _oop_maps(oop_maps), 89 _name(name), 90 _size(layout.size()), 91 _header_size(layout.header_size()), 92 _frame_complete_offset(frame_complete_offset), 93 _data_offset(layout.data_offset()), 94 _frame_size(frame_size), 95 _caller_must_gc_arguments(caller_must_gc_arguments), 96 _is_compiled(compiled), 97 _type(type) 98 { 99 assert(is_aligned(layout.size(), oopSize), "unaligned size"); 100 assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); 101 assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); 102 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 103 #ifdef COMPILER1 104 // probably wrong for tiered 105 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 106 #endif // COMPILER1 107 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 108 } 109 110 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : 111 _code_begin(layout.code_begin()), 112 _code_end(layout.code_end()), 113 _content_begin(layout.content_begin()), 114 _data_end(layout.data_end()), 115 _relocation_begin(layout.relocation_begin()), 116 _relocation_end(layout.relocation_end()), 117 _name(name), 118 _size(layout.size()), 119 _header_size(layout.header_size()), 120 _frame_complete_offset(frame_complete_offset), 121 _data_offset(layout.data_offset()), 122 _frame_size(frame_size), 123 _caller_must_gc_arguments(caller_must_gc_arguments), 124 _is_compiled(compiled), 125 _type(type) 126 { 127 assert(is_aligned(_size, oopSize), "unaligned size"); 128 assert(is_aligned(_header_size, oopSize), "unaligned size"); 129 assert(_data_offset <= _size, "codeBlob is too small"); 130 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 131 132 set_oop_maps(oop_maps); 133 #ifdef COMPILER1 134 // probably wrong for tiered 135 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 136 #endif // COMPILER1 137 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 138 } 139 140 141 // Creates a simple CodeBlob. Sets up the size of the different regions. 142 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) 143 : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, nullptr, false /* caller_must_gc_arguments */) 144 { 145 assert(is_aligned(locs_size, oopSize), "unaligned size"); 146 } 147 148 149 // Creates a RuntimeBlob from a CodeBuffer 150 // and copy code and relocation info. 151 RuntimeBlob::RuntimeBlob( 152 const char* name, 153 CodeBuffer* cb, 154 int header_size, 155 int size, 156 int frame_complete, 157 int frame_size, 158 OopMapSet* oop_maps, 159 bool caller_must_gc_arguments 160 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 161 cb->copy_code_and_locs_to(this); 162 } 163 164 void RuntimeBlob::free(RuntimeBlob* blob) { 165 assert(blob != nullptr, "caller must check for nullptr"); 166 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 167 blob->flush(); 168 { 169 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 170 CodeCache::free(blob); 171 } 172 // Track memory usage statistic after releasing CodeCache_lock 173 MemoryService::track_code_cache_memory_usage(); 174 } 175 176 void CodeBlob::flush() { 177 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); 178 _oop_maps = nullptr; 179 NOT_PRODUCT(_asm_remarks.clear()); 180 NOT_PRODUCT(_dbg_strings.clear()); 181 } 182 183 void CodeBlob::set_oop_maps(OopMapSet* p) { 184 // Danger Will Robinson! This method allocates a big 185 // chunk of memory, its your job to free it. 186 if (p != nullptr) { 187 _oop_maps = ImmutableOopMapSet::build_from(p); 188 } else { 189 _oop_maps = nullptr; 190 } 191 } 192 193 194 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 195 // Do not hold the CodeCache lock during name formatting. 196 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 197 198 if (stub != nullptr && (PrintStubCode || 199 Forte::is_enabled() || 200 JvmtiExport::should_post_dynamic_code_generated())) { 201 char stub_id[256]; 202 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 203 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 204 if (PrintStubCode) { 205 ttyLocker ttyl; 206 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 207 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); 208 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 209 NOT_PRODUCT(COMMA &stub->asm_remarks())); 210 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) { 211 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 212 stub->oop_maps()->print(); 213 } 214 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 215 tty->cr(); 216 } 217 if (Forte::is_enabled()) { 218 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 219 } 220 221 if (JvmtiExport::should_post_dynamic_code_generated()) { 222 const char* stub_name = name2; 223 if (name2[0] == '\0') stub_name = name1; 224 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 225 } 226 } 227 228 // Track memory usage statistic after releasing CodeCache_lock 229 MemoryService::track_code_cache_memory_usage(); 230 } 231 232 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { 233 assert(_oop_maps != nullptr, "nope"); 234 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 235 } 236 237 void CodeBlob::print_code() { 238 ResourceMark m; 239 Disassembler::decode(this, tty); 240 } 241 242 //---------------------------------------------------------------------------------------------------- 243 // Implementation of BufferBlob 244 245 246 BufferBlob::BufferBlob(const char* name, int size) 247 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 248 {} 249 250 BufferBlob* BufferBlob::create(const char* name, int buffer_size) { 251 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 252 253 BufferBlob* blob = nullptr; 254 unsigned int size = sizeof(BufferBlob); 255 // align the size to CodeEntryAlignment 256 size = CodeBlob::align_code_offset(size); 257 size += align_up(buffer_size, oopSize); 258 assert(name != nullptr, "must provide a name"); 259 { 260 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 261 blob = new (size) BufferBlob(name, size); 262 } 263 // Track memory usage statistic after releasing CodeCache_lock 264 MemoryService::track_code_cache_memory_usage(); 265 266 return blob; 267 } 268 269 270 BufferBlob::BufferBlob(const char* name, int header_size, int size, CodeBuffer* cb) 271 : RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, 0, nullptr) 272 {} 273 274 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 275 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 276 277 BufferBlob* blob = nullptr; 278 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 279 assert(name != nullptr, "must provide a name"); 280 { 281 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 282 blob = new (size) BufferBlob(name, sizeof(BufferBlob), size, cb); 283 } 284 // Track memory usage statistic after releasing CodeCache_lock 285 MemoryService::track_code_cache_memory_usage(); 286 287 return blob; 288 } 289 290 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 291 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 292 } 293 294 void BufferBlob::free(BufferBlob *blob) { 295 RuntimeBlob::free(blob); 296 } 297 298 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 299 : RuntimeBlob(name, cb, sizeof(BufferBlob), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 300 {} 301 302 303 //---------------------------------------------------------------------------------------------------- 304 // Implementation of AdapterBlob 305 306 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : 307 BufferBlob("I2C/C2I adapters", size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 308 CodeCache::commit(this); 309 } 310 311 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) { 312 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 313 314 CodeCache::gc_on_allocation(); 315 316 AdapterBlob* blob = nullptr; 317 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 318 { 319 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 320 blob = new (size) AdapterBlob(size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 321 } 322 // Track memory usage statistic after releasing CodeCache_lock 323 MemoryService::track_code_cache_memory_usage(); 324 325 return blob; 326 } 327 328 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 329 // Handling of allocation failure stops compilation and prints a bunch of 330 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 331 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 332 // this context as we hold the CompiledICLocker. So we just don't handle code 333 // cache exhaustion here; we leave that for a later allocation that does not 334 // hold the CompiledICLocker. 335 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 336 } 337 338 VtableBlob::VtableBlob(const char* name, int size) : 339 BufferBlob(name, size) { 340 } 341 342 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 343 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 344 345 VtableBlob* blob = nullptr; 346 unsigned int size = sizeof(VtableBlob); 347 // align the size to CodeEntryAlignment 348 size = align_code_offset(size); 349 size += align_up(buffer_size, oopSize); 350 assert(name != nullptr, "must provide a name"); 351 { 352 if (!CodeCache_lock->try_lock()) { 353 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 354 // IC transition to megamorphic, for which this stub will be needed. It is better to 355 // bail out the transition, and wait for a more opportune moment. Not only is it not 356 // worth waiting for the lock blockingly for the megamorphic transition, it might 357 // also result in a deadlock to blockingly wait, when concurrent class unloading is 358 // performed. At this point in time, the CompiledICLocker is taken, so we are not 359 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 360 // consistently taken in the opposite order. Bailing out results in an IC transition to 361 // the clean state instead, which will cause subsequent calls to retry the transitioning 362 // eventually. 363 return nullptr; 364 } 365 blob = new (size) VtableBlob(name, size); 366 CodeCache_lock->unlock(); 367 } 368 // Track memory usage statistic after releasing CodeCache_lock 369 MemoryService::track_code_cache_memory_usage(); 370 371 return blob; 372 } 373 374 //---------------------------------------------------------------------------------------------------- 375 // Implementation of MethodHandlesAdapterBlob 376 377 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 378 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 379 380 MethodHandlesAdapterBlob* blob = nullptr; 381 unsigned int size = sizeof(MethodHandlesAdapterBlob); 382 // align the size to CodeEntryAlignment 383 size = CodeBlob::align_code_offset(size); 384 size += align_up(buffer_size, oopSize); 385 { 386 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 387 blob = new (size) MethodHandlesAdapterBlob(size); 388 if (blob == nullptr) { 389 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 390 } 391 } 392 // Track memory usage statistic after releasing CodeCache_lock 393 MemoryService::track_code_cache_memory_usage(); 394 395 return blob; 396 } 397 398 //---------------------------------------------------------------------------------------------------- 399 // Implementation of BufferedInlineTypeBlob 400 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) : 401 BufferBlob("buffered inline type", sizeof(BufferedInlineTypeBlob), size, cb), 402 _pack_fields_off(pack_fields_off), 403 _pack_fields_jobject_off(pack_fields_jobject_off), 404 _unpack_fields_off(unpack_fields_off) { 405 CodeCache::commit(this); 406 } 407 408 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) { 409 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 410 411 BufferedInlineTypeBlob* blob = nullptr; 412 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob)); 413 { 414 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 415 blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off); 416 } 417 // Track memory usage statistic after releasing CodeCache_lock 418 MemoryService::track_code_cache_memory_usage(); 419 420 return blob; 421 } 422 423 //---------------------------------------------------------------------------------------------------- 424 // Implementation of RuntimeStub 425 426 RuntimeStub::RuntimeStub( 427 const char* name, 428 CodeBuffer* cb, 429 int size, 430 int frame_complete, 431 int frame_size, 432 OopMapSet* oop_maps, 433 bool caller_must_gc_arguments 434 ) 435 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 436 { 437 } 438 439 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 440 CodeBuffer* cb, 441 int frame_complete, 442 int frame_size, 443 OopMapSet* oop_maps, 444 bool caller_must_gc_arguments) 445 { 446 RuntimeStub* stub = nullptr; 447 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 448 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 449 { 450 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 451 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 452 } 453 454 trace_new_stub(stub, "RuntimeStub - ", stub_name); 455 456 return stub; 457 } 458 459 460 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 461 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 462 if (!p) fatal("Initial size of CodeCache is too small"); 463 return p; 464 } 465 466 // operator new shared by all singletons: 467 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 468 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 469 if (!p) fatal("Initial size of CodeCache is too small"); 470 return p; 471 } 472 473 474 //---------------------------------------------------------------------------------------------------- 475 // Implementation of DeoptimizationBlob 476 477 DeoptimizationBlob::DeoptimizationBlob( 478 CodeBuffer* cb, 479 int size, 480 OopMapSet* oop_maps, 481 int unpack_offset, 482 int unpack_with_exception_offset, 483 int unpack_with_reexecution_offset, 484 int frame_size 485 ) 486 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 487 { 488 _unpack_offset = unpack_offset; 489 _unpack_with_exception = unpack_with_exception_offset; 490 _unpack_with_reexecution = unpack_with_reexecution_offset; 491 #ifdef COMPILER1 492 _unpack_with_exception_in_tls = -1; 493 #endif 494 } 495 496 497 DeoptimizationBlob* DeoptimizationBlob::create( 498 CodeBuffer* cb, 499 OopMapSet* oop_maps, 500 int unpack_offset, 501 int unpack_with_exception_offset, 502 int unpack_with_reexecution_offset, 503 int frame_size) 504 { 505 DeoptimizationBlob* blob = nullptr; 506 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 507 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 508 { 509 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 510 blob = new (size) DeoptimizationBlob(cb, 511 size, 512 oop_maps, 513 unpack_offset, 514 unpack_with_exception_offset, 515 unpack_with_reexecution_offset, 516 frame_size); 517 } 518 519 trace_new_stub(blob, "DeoptimizationBlob"); 520 521 return blob; 522 } 523 524 525 //---------------------------------------------------------------------------------------------------- 526 // Implementation of UncommonTrapBlob 527 528 #ifdef COMPILER2 529 UncommonTrapBlob::UncommonTrapBlob( 530 CodeBuffer* cb, 531 int size, 532 OopMapSet* oop_maps, 533 int frame_size 534 ) 535 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 536 {} 537 538 539 UncommonTrapBlob* UncommonTrapBlob::create( 540 CodeBuffer* cb, 541 OopMapSet* oop_maps, 542 int frame_size) 543 { 544 UncommonTrapBlob* blob = nullptr; 545 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 546 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 547 { 548 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 549 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 550 } 551 552 trace_new_stub(blob, "UncommonTrapBlob"); 553 554 return blob; 555 } 556 557 558 #endif // COMPILER2 559 560 561 //---------------------------------------------------------------------------------------------------- 562 // Implementation of ExceptionBlob 563 564 #ifdef COMPILER2 565 ExceptionBlob::ExceptionBlob( 566 CodeBuffer* cb, 567 int size, 568 OopMapSet* oop_maps, 569 int frame_size 570 ) 571 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 572 {} 573 574 575 ExceptionBlob* ExceptionBlob::create( 576 CodeBuffer* cb, 577 OopMapSet* oop_maps, 578 int frame_size) 579 { 580 ExceptionBlob* blob = nullptr; 581 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 582 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 583 { 584 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 585 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 586 } 587 588 trace_new_stub(blob, "ExceptionBlob"); 589 590 return blob; 591 } 592 593 594 #endif // COMPILER2 595 596 597 //---------------------------------------------------------------------------------------------------- 598 // Implementation of SafepointBlob 599 600 SafepointBlob::SafepointBlob( 601 CodeBuffer* cb, 602 int size, 603 OopMapSet* oop_maps, 604 int frame_size 605 ) 606 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 607 {} 608 609 610 SafepointBlob* SafepointBlob::create( 611 CodeBuffer* cb, 612 OopMapSet* oop_maps, 613 int frame_size) 614 { 615 SafepointBlob* blob = nullptr; 616 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 617 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 618 { 619 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 620 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 621 } 622 623 trace_new_stub(blob, "SafepointBlob"); 624 625 return blob; 626 } 627 628 629 //---------------------------------------------------------------------------------------------------- 630 // Verification and printing 631 632 void CodeBlob::print_on(outputStream* st) const { 633 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 634 st->print_cr("Framesize: %d", _frame_size); 635 } 636 637 void CodeBlob::print() const { print_on(tty); } 638 639 void CodeBlob::print_value_on(outputStream* st) const { 640 st->print_cr("[CodeBlob]"); 641 } 642 643 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 644 if (is_buffer_blob()) { 645 // the interpreter is generated into a buffer blob 646 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 647 if (i != nullptr) { 648 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 649 i->print_on(st); 650 return; 651 } 652 if (Interpreter::contains(addr)) { 653 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 654 " (not bytecode specific)", p2i(addr)); 655 return; 656 } 657 // 658 if (AdapterHandlerLibrary::contains(this)) { 659 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 660 AdapterHandlerLibrary::print_handler_on(st, this); 661 } 662 // the stubroutines are generated into a buffer blob 663 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 664 if (d != nullptr) { 665 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 666 d->print_on(st); 667 st->cr(); 668 return; 669 } 670 if (StubRoutines::contains(addr)) { 671 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 672 return; 673 } 674 // the InlineCacheBuffer is using stubs generated into a buffer blob 675 if (InlineCacheBuffer::contains(addr)) { 676 st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr)); 677 return; 678 } 679 VtableStub* v = VtableStubs::stub_containing(addr); 680 if (v != nullptr) { 681 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 682 v->print_on(st); 683 st->cr(); 684 return; 685 } 686 } 687 if (is_nmethod()) { 688 nmethod* nm = (nmethod*)this; 689 ResourceMark rm; 690 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 691 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 692 if (verbose) { 693 st->print(" for "); 694 nm->method()->print_value_on(st); 695 } 696 st->cr(); 697 if (verbose && st == tty) { 698 // verbose is only ever true when called from findpc in debug.cpp 699 nm->print_nmethod(true); 700 } else { 701 nm->print(st); 702 } 703 return; 704 } 705 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 706 print_on(st); 707 } 708 709 void RuntimeBlob::verify() { 710 ShouldNotReachHere(); 711 } 712 713 void BufferBlob::verify() { 714 // unimplemented 715 } 716 717 void BufferBlob::print_on(outputStream* st) const { 718 RuntimeBlob::print_on(st); 719 print_value_on(st); 720 } 721 722 void BufferBlob::print_value_on(outputStream* st) const { 723 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 724 } 725 726 void RuntimeStub::verify() { 727 // unimplemented 728 } 729 730 void RuntimeStub::print_on(outputStream* st) const { 731 ttyLocker ttyl; 732 RuntimeBlob::print_on(st); 733 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 734 st->print_cr("%s", name()); 735 Disassembler::decode((RuntimeBlob*)this, st); 736 } 737 738 void RuntimeStub::print_value_on(outputStream* st) const { 739 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 740 } 741 742 void SingletonBlob::verify() { 743 // unimplemented 744 } 745 746 void SingletonBlob::print_on(outputStream* st) const { 747 ttyLocker ttyl; 748 RuntimeBlob::print_on(st); 749 st->print_cr("%s", name()); 750 Disassembler::decode((RuntimeBlob*)this, st); 751 } 752 753 void SingletonBlob::print_value_on(outputStream* st) const { 754 st->print_cr("%s", name()); 755 } 756 757 void DeoptimizationBlob::print_value_on(outputStream* st) const { 758 st->print_cr("Deoptimization (frame not available)"); 759 } 760 761 // Implementation of UpcallStub 762 763 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, 764 intptr_t exception_handler_offset, 765 jobject receiver, ByteSize frame_data_offset) : 766 RuntimeBlob(name, cb, sizeof(UpcallStub), size, CodeOffsets::frame_never_safe, 0 /* no frame size */, 767 /* oop maps = */ nullptr, /* caller must gc arguments = */ false), 768 _exception_handler_offset(exception_handler_offset), 769 _receiver(receiver), 770 _frame_data_offset(frame_data_offset) { 771 CodeCache::commit(this); 772 } 773 774 void* UpcallStub::operator new(size_t s, unsigned size) throw() { 775 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 776 } 777 778 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, 779 intptr_t exception_handler_offset, 780 jobject receiver, ByteSize frame_data_offset) { 781 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 782 783 UpcallStub* blob = nullptr; 784 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); 785 { 786 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 787 blob = new (size) UpcallStub(name, cb, size, 788 exception_handler_offset, receiver, frame_data_offset); 789 } 790 // Track memory usage statistic after releasing CodeCache_lock 791 MemoryService::track_code_cache_memory_usage(); 792 793 trace_new_stub(blob, "UpcallStub"); 794 795 return blob; 796 } 797 798 void UpcallStub::oops_do(OopClosure* f, const frame& frame) { 799 frame_data_for_frame(frame)->old_handles->oops_do(f); 800 } 801 802 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { 803 return &frame_data_for_frame(frame)->jfa; 804 } 805 806 void UpcallStub::free(UpcallStub* blob) { 807 assert(blob != nullptr, "caller must check for nullptr"); 808 JNIHandles::destroy_global(blob->receiver()); 809 RuntimeBlob::free(blob); 810 } 811 812 void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { 813 ShouldNotReachHere(); // caller should never have to gc arguments 814 } 815 816 // Misc. 817 void UpcallStub::verify() { 818 // unimplemented 819 } 820 821 void UpcallStub::print_on(outputStream* st) const { 822 RuntimeBlob::print_on(st); 823 print_value_on(st); 824 Disassembler::decode((RuntimeBlob*)this, st); 825 } 826 827 void UpcallStub::print_value_on(outputStream* st) const { 828 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 829 }