1 /* 2 * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/relocInfo.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "interpreter/bytecode.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/heap.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/forte.hpp" 41 #include "prims/jvmtiExport.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/interfaceSupport.inline.hpp" 44 #include "runtime/javaFrameAnchor.hpp" 45 #include "runtime/jniHandles.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/safepoint.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubCodeGenerator.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/vframe.hpp" 52 #include "services/memoryService.hpp" 53 #include "utilities/align.hpp" 54 #ifdef COMPILER1 55 #include "c1/c1_Runtime1.hpp" 56 #endif 57 58 const char* CodeBlob::compiler_name() const { 59 return compilertype2name(_type); 60 } 61 62 unsigned int CodeBlob::align_code_offset(int offset) { 63 // align the size to CodeEntryAlignment 64 int header_size = (int)CodeHeap::header_size(); 65 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 66 } 67 68 69 // This must be consistent with the CodeBlob constructor's layout actions. 70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 71 unsigned int size = header_size; 72 size += align_up(cb->total_relocation_size(), oopSize); 73 // align the size to CodeEntryAlignment 74 size = align_code_offset(size); 75 size += align_up(cb->total_content_size(), oopSize); 76 size += align_up(cb->total_oop_size(), oopSize); 77 size += align_up(cb->total_metadata_size(), oopSize); 78 return size; 79 } 80 81 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) : 82 _type(type), 83 _size(layout.size()), 84 _header_size(layout.header_size()), 85 _frame_complete_offset(frame_complete_offset), 86 _data_offset(layout.data_offset()), 87 _frame_size(frame_size), 88 _code_begin(layout.code_begin()), 89 _code_end(layout.code_end()), 90 _content_begin(layout.content_begin()), 91 _data_end(layout.data_end()), 92 _relocation_begin(layout.relocation_begin()), 93 _relocation_end(layout.relocation_end()), 94 _oop_maps(oop_maps), 95 _caller_must_gc_arguments(caller_must_gc_arguments), 96 _name(name) 97 { 98 assert(is_aligned(layout.size(), oopSize), "unaligned size"); 99 assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); 100 assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); 101 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 102 #ifdef COMPILER1 103 // probably wrong for tiered 104 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 105 #endif // COMPILER1 106 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 107 } 108 109 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : 110 _type(type), 111 _size(layout.size()), 112 _header_size(layout.header_size()), 113 _frame_complete_offset(frame_complete_offset), 114 _data_offset(layout.data_offset()), 115 _frame_size(frame_size), 116 _code_begin(layout.code_begin()), 117 _code_end(layout.code_end()), 118 _content_begin(layout.content_begin()), 119 _data_end(layout.data_end()), 120 _relocation_begin(layout.relocation_begin()), 121 _relocation_end(layout.relocation_end()), 122 _caller_must_gc_arguments(caller_must_gc_arguments), 123 _name(name) 124 { 125 assert(is_aligned(_size, oopSize), "unaligned size"); 126 assert(is_aligned(_header_size, oopSize), "unaligned size"); 127 assert(_data_offset <= _size, "codeBlob is too small"); 128 assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); 129 130 set_oop_maps(oop_maps); 131 #ifdef COMPILER1 132 // probably wrong for tiered 133 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 134 #endif // COMPILER1 135 S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields 136 } 137 138 139 // Creates a simple CodeBlob. Sets up the size of the different regions. 140 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) 141 : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */) 142 { 143 assert(is_aligned(locs_size, oopSize), "unaligned size"); 144 } 145 146 147 // Creates a RuntimeBlob from a CodeBuffer 148 // and copy code and relocation info. 149 RuntimeBlob::RuntimeBlob( 150 const char* name, 151 CodeBuffer* cb, 152 int header_size, 153 int size, 154 int frame_complete, 155 int frame_size, 156 OopMapSet* oop_maps, 157 bool caller_must_gc_arguments 158 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 159 cb->copy_code_and_locs_to(this); 160 } 161 162 void CodeBlob::flush() { 163 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); 164 _oop_maps = NULL; 165 NOT_PRODUCT(_asm_remarks.clear()); 166 NOT_PRODUCT(_dbg_strings.clear()); 167 } 168 169 void CodeBlob::set_oop_maps(OopMapSet* p) { 170 // Danger Will Robinson! This method allocates a big 171 // chunk of memory, its your job to free it. 172 if (p != NULL) { 173 _oop_maps = ImmutableOopMapSet::build_from(p); 174 } else { 175 _oop_maps = NULL; 176 } 177 } 178 179 180 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 181 // Do not hold the CodeCache lock during name formatting. 182 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 183 184 if (stub != NULL) { 185 char stub_id[256]; 186 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 187 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 188 if (PrintStubCode) { 189 ttyLocker ttyl; 190 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 191 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); 192 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 193 NOT_PRODUCT(COMMA &stub->asm_remarks())); 194 if ((stub->oop_maps() != NULL) && AbstractDisassembler::show_structs()) { 195 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 196 stub->oop_maps()->print(); 197 } 198 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 199 tty->cr(); 200 } 201 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 202 203 if (JvmtiExport::should_post_dynamic_code_generated()) { 204 const char* stub_name = name2; 205 if (name2[0] == '\0') stub_name = name1; 206 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 207 } 208 } 209 210 // Track memory usage statistic after releasing CodeCache_lock 211 MemoryService::track_code_cache_memory_usage(); 212 } 213 214 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) { 215 assert(_oop_maps != NULL, "nope"); 216 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 217 } 218 219 void CodeBlob::print_code() { 220 ResourceMark m; 221 Disassembler::decode(this, tty); 222 } 223 224 //---------------------------------------------------------------------------------------------------- 225 // Implementation of BufferBlob 226 227 228 BufferBlob::BufferBlob(const char* name, int size) 229 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 230 {} 231 232 BufferBlob* BufferBlob::create(const char* name, int buffer_size) { 233 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 234 235 BufferBlob* blob = NULL; 236 unsigned int size = sizeof(BufferBlob); 237 // align the size to CodeEntryAlignment 238 size = CodeBlob::align_code_offset(size); 239 size += align_up(buffer_size, oopSize); 240 assert(name != NULL, "must provide a name"); 241 { 242 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 243 blob = new (size) BufferBlob(name, size); 244 } 245 // Track memory usage statistic after releasing CodeCache_lock 246 MemoryService::track_code_cache_memory_usage(); 247 248 return blob; 249 } 250 251 252 BufferBlob::BufferBlob(const char* name, int header_size, int size, CodeBuffer* cb) 253 : RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, 0, NULL) 254 {} 255 256 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 257 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 258 259 BufferBlob* blob = NULL; 260 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 261 assert(name != NULL, "must provide a name"); 262 { 263 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 264 blob = new (size) BufferBlob(name, sizeof(BufferBlob), size, cb); 265 } 266 // Track memory usage statistic after releasing CodeCache_lock 267 MemoryService::track_code_cache_memory_usage(); 268 269 return blob; 270 } 271 272 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 273 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 274 } 275 276 void BufferBlob::free(BufferBlob *blob) { 277 assert(blob != NULL, "caller must check for NULL"); 278 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 279 blob->flush(); 280 { 281 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 282 CodeCache::free((RuntimeBlob*)blob); 283 } 284 // Track memory usage statistic after releasing CodeCache_lock 285 MemoryService::track_code_cache_memory_usage(); 286 } 287 288 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 289 : RuntimeBlob(name, cb, sizeof(BufferBlob), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 290 {} 291 292 293 //---------------------------------------------------------------------------------------------------- 294 // Implementation of AdapterBlob 295 296 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : 297 BufferBlob("I2C/C2I adapters", size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { 298 CodeCache::commit(this); 299 } 300 301 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) { 302 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 303 304 AdapterBlob* blob = NULL; 305 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 306 { 307 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 308 blob = new (size) AdapterBlob(size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 309 } 310 // Track memory usage statistic after releasing CodeCache_lock 311 MemoryService::track_code_cache_memory_usage(); 312 313 return blob; 314 } 315 316 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 317 // Handling of allocation failure stops compilation and prints a bunch of 318 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 319 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 320 // this context as we hold the CompiledICLocker. So we just don't handle code 321 // cache exhaustion here; we leave that for a later allocation that does not 322 // hold the CompiledICLocker. 323 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 324 } 325 326 VtableBlob::VtableBlob(const char* name, int size) : 327 BufferBlob(name, size) { 328 } 329 330 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 331 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 332 333 VtableBlob* blob = NULL; 334 unsigned int size = sizeof(VtableBlob); 335 // align the size to CodeEntryAlignment 336 size = align_code_offset(size); 337 size += align_up(buffer_size, oopSize); 338 assert(name != NULL, "must provide a name"); 339 { 340 if (!CodeCache_lock->try_lock()) { 341 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 342 // IC transition to megamorphic, for which this stub will be needed. It is better to 343 // bail out the transition, and wait for a more opportune moment. Not only is it not 344 // worth waiting for the lock blockingly for the megamorphic transition, it might 345 // also result in a deadlock to blockingly wait, when concurrent class unloading is 346 // performed. At this point in time, the CompiledICLocker is taken, so we are not 347 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 348 // consistently taken in the opposite order. Bailing out results in an IC transition to 349 // the clean state instead, which will cause subsequent calls to retry the transitioning 350 // eventually. 351 return NULL; 352 } 353 blob = new (size) VtableBlob(name, size); 354 CodeCache_lock->unlock(); 355 } 356 // Track memory usage statistic after releasing CodeCache_lock 357 MemoryService::track_code_cache_memory_usage(); 358 359 return blob; 360 } 361 362 //---------------------------------------------------------------------------------------------------- 363 // Implementation of MethodHandlesAdapterBlob 364 365 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 366 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 367 368 MethodHandlesAdapterBlob* blob = NULL; 369 unsigned int size = sizeof(MethodHandlesAdapterBlob); 370 // align the size to CodeEntryAlignment 371 size = CodeBlob::align_code_offset(size); 372 size += align_up(buffer_size, oopSize); 373 { 374 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 375 blob = new (size) MethodHandlesAdapterBlob(size); 376 if (blob == NULL) { 377 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 378 } 379 } 380 // Track memory usage statistic after releasing CodeCache_lock 381 MemoryService::track_code_cache_memory_usage(); 382 383 return blob; 384 } 385 386 //---------------------------------------------------------------------------------------------------- 387 // Implementation of BufferedInlineTypeBlob 388 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) : 389 BufferBlob("buffered inline type", sizeof(BufferedInlineTypeBlob), size, cb), 390 _pack_fields_off(pack_fields_off), 391 _pack_fields_jobject_off(pack_fields_jobject_off), 392 _unpack_fields_off(unpack_fields_off) { 393 CodeCache::commit(this); 394 } 395 396 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) { 397 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 398 399 BufferedInlineTypeBlob* blob = NULL; 400 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob)); 401 { 402 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 403 blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off); 404 } 405 // Track memory usage statistic after releasing CodeCache_lock 406 MemoryService::track_code_cache_memory_usage(); 407 408 return blob; 409 } 410 411 //---------------------------------------------------------------------------------------------------- 412 // Implementation of RuntimeStub 413 414 RuntimeStub::RuntimeStub( 415 const char* name, 416 CodeBuffer* cb, 417 int size, 418 int frame_complete, 419 int frame_size, 420 OopMapSet* oop_maps, 421 bool caller_must_gc_arguments 422 ) 423 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 424 { 425 } 426 427 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 428 CodeBuffer* cb, 429 int frame_complete, 430 int frame_size, 431 OopMapSet* oop_maps, 432 bool caller_must_gc_arguments) 433 { 434 RuntimeStub* stub = NULL; 435 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 436 { 437 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 438 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 439 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 440 } 441 442 trace_new_stub(stub, "RuntimeStub - ", stub_name); 443 444 return stub; 445 } 446 447 448 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 449 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 450 if (!p) fatal("Initial size of CodeCache is too small"); 451 return p; 452 } 453 454 // operator new shared by all singletons: 455 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 456 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 457 if (!p) fatal("Initial size of CodeCache is too small"); 458 return p; 459 } 460 461 462 //---------------------------------------------------------------------------------------------------- 463 // Implementation of DeoptimizationBlob 464 465 DeoptimizationBlob::DeoptimizationBlob( 466 CodeBuffer* cb, 467 int size, 468 OopMapSet* oop_maps, 469 int unpack_offset, 470 int unpack_with_exception_offset, 471 int unpack_with_reexecution_offset, 472 int frame_size 473 ) 474 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 475 { 476 _unpack_offset = unpack_offset; 477 _unpack_with_exception = unpack_with_exception_offset; 478 _unpack_with_reexecution = unpack_with_reexecution_offset; 479 #ifdef COMPILER1 480 _unpack_with_exception_in_tls = -1; 481 #endif 482 } 483 484 485 DeoptimizationBlob* DeoptimizationBlob::create( 486 CodeBuffer* cb, 487 OopMapSet* oop_maps, 488 int unpack_offset, 489 int unpack_with_exception_offset, 490 int unpack_with_reexecution_offset, 491 int frame_size) 492 { 493 DeoptimizationBlob* blob = NULL; 494 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 495 { 496 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 497 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 498 blob = new (size) DeoptimizationBlob(cb, 499 size, 500 oop_maps, 501 unpack_offset, 502 unpack_with_exception_offset, 503 unpack_with_reexecution_offset, 504 frame_size); 505 } 506 507 trace_new_stub(blob, "DeoptimizationBlob"); 508 509 return blob; 510 } 511 512 513 //---------------------------------------------------------------------------------------------------- 514 // Implementation of UncommonTrapBlob 515 516 #ifdef COMPILER2 517 UncommonTrapBlob::UncommonTrapBlob( 518 CodeBuffer* cb, 519 int size, 520 OopMapSet* oop_maps, 521 int frame_size 522 ) 523 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 524 {} 525 526 527 UncommonTrapBlob* UncommonTrapBlob::create( 528 CodeBuffer* cb, 529 OopMapSet* oop_maps, 530 int frame_size) 531 { 532 UncommonTrapBlob* blob = NULL; 533 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 534 { 535 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 536 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 537 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 538 } 539 540 trace_new_stub(blob, "UncommonTrapBlob"); 541 542 return blob; 543 } 544 545 546 #endif // COMPILER2 547 548 549 //---------------------------------------------------------------------------------------------------- 550 // Implementation of ExceptionBlob 551 552 #ifdef COMPILER2 553 ExceptionBlob::ExceptionBlob( 554 CodeBuffer* cb, 555 int size, 556 OopMapSet* oop_maps, 557 int frame_size 558 ) 559 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 560 {} 561 562 563 ExceptionBlob* ExceptionBlob::create( 564 CodeBuffer* cb, 565 OopMapSet* oop_maps, 566 int frame_size) 567 { 568 ExceptionBlob* blob = NULL; 569 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 570 { 571 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 572 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 573 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 574 } 575 576 trace_new_stub(blob, "ExceptionBlob"); 577 578 return blob; 579 } 580 581 582 #endif // COMPILER2 583 584 585 //---------------------------------------------------------------------------------------------------- 586 // Implementation of SafepointBlob 587 588 SafepointBlob::SafepointBlob( 589 CodeBuffer* cb, 590 int size, 591 OopMapSet* oop_maps, 592 int frame_size 593 ) 594 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 595 {} 596 597 598 SafepointBlob* SafepointBlob::create( 599 CodeBuffer* cb, 600 OopMapSet* oop_maps, 601 int frame_size) 602 { 603 SafepointBlob* blob = NULL; 604 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 605 { 606 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 607 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 608 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 609 } 610 611 trace_new_stub(blob, "SafepointBlob"); 612 613 return blob; 614 } 615 616 617 //---------------------------------------------------------------------------------------------------- 618 // Verification and printing 619 620 void CodeBlob::print_on(outputStream* st) const { 621 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 622 st->print_cr("Framesize: %d", _frame_size); 623 } 624 625 void CodeBlob::print() const { print_on(tty); } 626 627 void CodeBlob::print_value_on(outputStream* st) const { 628 st->print_cr("[CodeBlob]"); 629 } 630 631 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 632 if (is_buffer_blob()) { 633 // the interpreter is generated into a buffer blob 634 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 635 if (i != NULL) { 636 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 637 i->print_on(st); 638 return; 639 } 640 if (Interpreter::contains(addr)) { 641 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 642 " (not bytecode specific)", p2i(addr)); 643 return; 644 } 645 // 646 if (AdapterHandlerLibrary::contains(this)) { 647 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 648 AdapterHandlerLibrary::print_handler_on(st, this); 649 } 650 // the stubroutines are generated into a buffer blob 651 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 652 if (d != NULL) { 653 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 654 d->print_on(st); 655 st->cr(); 656 return; 657 } 658 if (StubRoutines::contains(addr)) { 659 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 660 return; 661 } 662 // the InlineCacheBuffer is using stubs generated into a buffer blob 663 if (InlineCacheBuffer::contains(addr)) { 664 st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr)); 665 return; 666 } 667 VtableStub* v = VtableStubs::stub_containing(addr); 668 if (v != NULL) { 669 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 670 v->print_on(st); 671 st->cr(); 672 return; 673 } 674 } 675 if (is_nmethod()) { 676 nmethod* nm = (nmethod*)this; 677 ResourceMark rm; 678 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 679 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 680 if (verbose) { 681 st->print(" for "); 682 nm->method()->print_value_on(st); 683 } 684 st->cr(); 685 if (verbose && st == tty) { 686 // verbose is only ever true when called from findpc in debug.cpp 687 nm->print_nmethod(true); 688 } else { 689 nm->print(st); 690 } 691 return; 692 } 693 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 694 print_on(st); 695 } 696 697 void RuntimeBlob::verify() { 698 ShouldNotReachHere(); 699 } 700 701 void BufferBlob::verify() { 702 // unimplemented 703 } 704 705 void BufferBlob::print_on(outputStream* st) const { 706 RuntimeBlob::print_on(st); 707 print_value_on(st); 708 } 709 710 void BufferBlob::print_value_on(outputStream* st) const { 711 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 712 } 713 714 void RuntimeStub::verify() { 715 // unimplemented 716 } 717 718 void RuntimeStub::print_on(outputStream* st) const { 719 ttyLocker ttyl; 720 RuntimeBlob::print_on(st); 721 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 722 st->print_cr("%s", name()); 723 Disassembler::decode((RuntimeBlob*)this, st); 724 } 725 726 void RuntimeStub::print_value_on(outputStream* st) const { 727 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 728 } 729 730 void SingletonBlob::verify() { 731 // unimplemented 732 } 733 734 void SingletonBlob::print_on(outputStream* st) const { 735 ttyLocker ttyl; 736 RuntimeBlob::print_on(st); 737 st->print_cr("%s", name()); 738 Disassembler::decode((RuntimeBlob*)this, st); 739 } 740 741 void SingletonBlob::print_value_on(outputStream* st) const { 742 st->print_cr("%s", name()); 743 } 744 745 void DeoptimizationBlob::print_value_on(outputStream* st) const { 746 st->print_cr("Deoptimization (frame not available)"); 747 } 748 749 // Implementation of OptimizedEntryBlob 750 751 OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset, 752 jobject receiver, ByteSize frame_data_offset) : 753 BufferBlob(name, sizeof(OptimizedEntryBlob), size, cb), 754 _exception_handler_offset(exception_handler_offset), 755 _receiver(receiver), 756 _frame_data_offset(frame_data_offset) { 757 CodeCache::commit(this); 758 } 759 760 OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset, 761 jobject receiver, ByteSize frame_data_offset) { 762 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 763 764 OptimizedEntryBlob* blob = nullptr; 765 unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob)); 766 { 767 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 768 blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, frame_data_offset); 769 } 770 // Track memory usage statistic after releasing CodeCache_lock 771 MemoryService::track_code_cache_memory_usage(); 772 773 return blob; 774 } 775 776 void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) { 777 frame_data_for_frame(frame)->old_handles->oops_do(f); 778 } 779 780 JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const { 781 return &frame_data_for_frame(frame)->jfa; 782 }