1 /*
   2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "code/codeBlob.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/relocInfo.hpp"
  28 #include "code/vtableStubs.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "interpreter/bytecode.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "jvm.h"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/heap.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/forte.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/interfaceSupport.inline.hpp"
  42 #include "runtime/javaFrameAnchor.hpp"
  43 #include "runtime/jniHandles.inline.hpp"
  44 #include "runtime/mutexLocker.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubCodeGenerator.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/vframe.hpp"
  50 #include "services/memoryService.hpp"
  51 #include "utilities/align.hpp"
  52 #ifdef COMPILER1
  53 #include "c1/c1_Runtime1.hpp"
  54 #endif
  55 
  56 #include <type_traits>
  57 
  58 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
  59 // Check all "leaf" subclasses of CodeBlob class.
  60 
  61 static_assert(!std::is_polymorphic<nmethod>::value,            "no virtual methods are allowed in nmethod");
  62 static_assert(!std::is_polymorphic<AdapterBlob>::value,        "no virtual methods are allowed in code blobs");
  63 static_assert(!std::is_polymorphic<VtableBlob>::value,         "no virtual methods are allowed in code blobs");
  64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
  65 static_assert(!std::is_polymorphic<RuntimeStub>::value,        "no virtual methods are allowed in code blobs");
  66 static_assert(!std::is_polymorphic<BufferedInlineTypeBlob>::value,   "no virtual methods are allowed in code blobs");
  67 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
  68 static_assert(!std::is_polymorphic<SafepointBlob>::value,      "no virtual methods are allowed in code blobs");
  69 static_assert(!std::is_polymorphic<UpcallStub>::value,         "no virtual methods are allowed in code blobs");
  70 #ifdef COMPILER2
  71 static_assert(!std::is_polymorphic<ExceptionBlob>::value,      "no virtual methods are allowed in code blobs");
  72 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value,   "no virtual methods are allowed in code blobs");
  73 #endif
  74 
  75 // Add proxy vtables.
  76 // We need only few for now - they are used only from prints.
  77 const nmethod::Vptr                  nmethod::_vpntr;
  78 const BufferBlob::Vptr               BufferBlob::_vpntr;
  79 const RuntimeStub::Vptr              RuntimeStub::_vpntr;
  80 const SingletonBlob::Vptr            SingletonBlob::_vpntr;
  81 const DeoptimizationBlob::Vptr       DeoptimizationBlob::_vpntr;
  82 #ifdef COMPILER2
  83 const ExceptionBlob::Vptr            ExceptionBlob::_vpntr;
  84 #endif // COMPILER2
  85 const UpcallStub::Vptr               UpcallStub::_vpntr;
  86 
  87 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
  88   constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
  89       nullptr/* None */,
  90       &nmethod::_vpntr,
  91       &BufferBlob::_vpntr,
  92       &AdapterBlob::_vpntr,
  93       &VtableBlob::_vpntr,
  94       &MethodHandlesAdapterBlob::_vpntr,
  95       &BufferedInlineTypeBlob::_vpntr,
  96       &RuntimeStub::_vpntr,
  97       &DeoptimizationBlob::_vpntr,
  98       &SafepointBlob::_vpntr,
  99 #ifdef COMPILER2
 100       &ExceptionBlob::_vpntr,
 101       &UncommonTrapBlob::_vpntr,
 102 #endif
 103       &UpcallStub::_vpntr
 104   };
 105 
 106   return array[(size_t)kind];
 107 }
 108 
 109 const CodeBlob::Vptr* CodeBlob::vptr() const {
 110   return vptr(_kind);
 111 }
 112 
 113 unsigned int CodeBlob::align_code_offset(int offset) {
 114   // align the size to CodeEntryAlignment
 115   int header_size = (int)CodeHeap::header_size();
 116   return align_up(offset + header_size, CodeEntryAlignment) - header_size;
 117 }
 118 
 119 // This must be consistent with the CodeBlob constructor's layout actions.
 120 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
 121   // align the size to CodeEntryAlignment
 122   unsigned int size = align_code_offset(header_size);
 123   size += align_up(cb->total_content_size(), oopSize);
 124   size += align_up(cb->total_oop_size(), oopSize);
 125   return size;
 126 }
 127 
 128 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
 129                    int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
 130                    int mutable_data_size) :
 131   _oop_maps(nullptr), // will be set by set_oop_maps() call
 132   _name(name),
 133   _mutable_data(header_begin() + size), // default value is blob_end()
 134   _size(size),
 135   _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
 136   _content_offset(CodeBlob::align_code_offset(header_size)),
 137   _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
 138   _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
 139   _frame_size(frame_size),
 140   _mutable_data_size(mutable_data_size),
 141   S390_ONLY(_ctable_offset(0) COMMA)
 142   _header_size(header_size),
 143   _frame_complete_offset(frame_complete_offset),
 144   _kind(kind),
 145   _caller_must_gc_arguments(caller_must_gc_arguments)
 146 {
 147   assert(is_aligned(_size,            oopSize), "unaligned size");
 148   assert(is_aligned(header_size,      oopSize), "unaligned size");
 149   assert(is_aligned(_relocation_size, oopSize), "unaligned size");
 150   assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
 151   assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
 152   assert(code_end() == content_end(), "must be the same - see code_end()");
 153 #ifdef COMPILER1
 154   // probably wrong for tiered
 155   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
 156 #endif // COMPILER1
 157 
 158   if (_mutable_data_size > 0) {
 159     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
 160     if (_mutable_data == nullptr) {
 161       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
 162     }
 163   } else {
 164     // We need unique and valid not null address
 165     assert(_mutable_data == blob_end(), "sanity");
 166   }
 167 
 168   set_oop_maps(oop_maps);
 169 }
 170 
 171 // Simple CodeBlob used for simple BufferBlob.
 172 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
 173   _oop_maps(nullptr),
 174   _name(name),
 175   _mutable_data(header_begin() + size), // default value is blob_end()
 176   _size(size),
 177   _relocation_size(0),
 178   _content_offset(CodeBlob::align_code_offset(header_size)),
 179   _code_offset(_content_offset),
 180   _data_offset(size),
 181   _frame_size(0),
 182   _mutable_data_size(0),
 183   S390_ONLY(_ctable_offset(0) COMMA)
 184   _header_size(header_size),
 185   _frame_complete_offset(CodeOffsets::frame_never_safe),
 186   _kind(kind),
 187   _caller_must_gc_arguments(false)
 188 {
 189   assert(is_aligned(size,            oopSize), "unaligned size");
 190   assert(is_aligned(header_size,     oopSize), "unaligned size");
 191   assert(_mutable_data == blob_end(), "sanity");
 192 }
 193 
 194 void CodeBlob::restore_mutable_data(address reloc_data) {
 195   // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
 196   if (_mutable_data_size > 0) {
 197     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
 198     if (_mutable_data == nullptr) {
 199       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
 200     }
 201   } else {
 202     _mutable_data = blob_end(); // default value
 203   }
 204   if (_relocation_size > 0) {
 205     assert(_mutable_data_size > 0, "relocation is part of mutable data section");
 206     memcpy((address)relocation_begin(), reloc_data, relocation_size());
 207   }
 208 }
 209 
 210 void CodeBlob::purge() {
 211   assert(_mutable_data != nullptr, "should never be null");
 212   if (_mutable_data != blob_end()) {
 213     os::free(_mutable_data);
 214     _mutable_data = blob_end(); // Valid not null address
 215     _mutable_data_size = 0;
 216     _relocation_size = 0;
 217   }
 218   if (_oop_maps != nullptr) {
 219     delete _oop_maps;
 220     _oop_maps = nullptr;
 221   }
 222   NOT_PRODUCT(_asm_remarks.clear());
 223   NOT_PRODUCT(_dbg_strings.clear());
 224 }
 225 
 226 void CodeBlob::set_oop_maps(OopMapSet* p) {
 227   // Danger Will Robinson! This method allocates a big
 228   // chunk of memory, its your job to free it.
 229   if (p != nullptr) {
 230     _oop_maps = ImmutableOopMapSet::build_from(p);
 231   } else {
 232     _oop_maps = nullptr;
 233   }
 234 }
 235 
 236 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
 237   assert(_oop_maps != nullptr, "nope");
 238   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
 239 }
 240 
 241 void CodeBlob::print_code_on(outputStream* st) {
 242   ResourceMark m;
 243   Disassembler::decode(this, st);
 244 }
 245 
 246 void CodeBlob::prepare_for_archiving_impl() {
 247   set_name(nullptr);
 248   _oop_maps = nullptr;
 249   _mutable_data = nullptr;
 250 #ifndef PRODUCT
 251   asm_remarks().clear();
 252   dbg_strings().clear();
 253 #endif /* PRODUCT */
 254 }
 255 
 256 void CodeBlob::prepare_for_archiving() {
 257   vptr(_kind)->prepare_for_archiving(this);
 258 }
 259 
 260 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
 261   blob->copy_to(archive_buffer);
 262   CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
 263   archived_blob->prepare_for_archiving();
 264 }
 265 
 266 void CodeBlob::post_restore_impl() {
 267   // Track memory usage statistic after releasing CodeCache_lock
 268   MemoryService::track_code_cache_memory_usage();
 269 }
 270 
 271 void CodeBlob::post_restore() {
 272   vptr(_kind)->post_restore(this);
 273 }
 274 
 275 CodeBlob* CodeBlob::restore(address code_cache_buffer,
 276                             const char* name,
 277                             address archived_reloc_data,
 278                             ImmutableOopMapSet* archived_oop_maps)
 279 {
 280   copy_to(code_cache_buffer);
 281   CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
 282   code_blob->set_name(name);
 283   code_blob->restore_mutable_data(archived_reloc_data);
 284   code_blob->set_oop_maps(archived_oop_maps);
 285   return code_blob;
 286 }
 287 
 288 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
 289                            const char* name,
 290                            address archived_reloc_data,
 291                            ImmutableOopMapSet* archived_oop_maps
 292                           )
 293 {
 294   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 295 
 296   CodeCache::gc_on_allocation();
 297 
 298   CodeBlob* blob = nullptr;
 299   unsigned int size = archived_blob->size();
 300   {
 301     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 302     address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
 303     if (code_cache_buffer != nullptr) {
 304       blob = archived_blob->restore(code_cache_buffer,
 305                                     name,
 306                                     archived_reloc_data,
 307                                     archived_oop_maps);
 308       assert(blob != nullptr, "sanity check");
 309 
 310       // Flush the code block
 311       ICache::invalidate_range(blob->code_begin(), blob->code_size());
 312       CodeCache::commit(blob); // Count adapters
 313     }
 314   }
 315   if (blob != nullptr) {
 316     blob->post_restore();
 317   }
 318   return blob;
 319 }
 320 
 321 //-----------------------------------------------------------------------------------------
 322 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
 323 
 324 RuntimeBlob::RuntimeBlob(
 325   const char* name,
 326   CodeBlobKind kind,
 327   CodeBuffer* cb,
 328   int         size,
 329   uint16_t    header_size,
 330   int16_t     frame_complete,
 331   int         frame_size,
 332   OopMapSet*  oop_maps,
 333   bool        caller_must_gc_arguments)
 334   : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
 335              align_up(cb->total_relocation_size(), oopSize))
 336 {
 337   cb->copy_code_and_locs_to(this);
 338 }
 339 
 340 void RuntimeBlob::free(RuntimeBlob* blob) {
 341   assert(blob != nullptr, "caller must check for nullptr");
 342   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 343   blob->purge();
 344   {
 345     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 346     CodeCache::free(blob);
 347   }
 348   // Track memory usage statistic after releasing CodeCache_lock
 349   MemoryService::track_code_cache_memory_usage();
 350 }
 351 
 352 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
 353   // Do not hold the CodeCache lock during name formatting.
 354   assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
 355 
 356   if (stub != nullptr && (PrintStubCode ||
 357                        Forte::is_enabled() ||
 358                        JvmtiExport::should_post_dynamic_code_generated())) {
 359     char stub_id[256];
 360     assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
 361     jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
 362     if (PrintStubCode) {
 363       ttyLocker ttyl;
 364       tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
 365       tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
 366                     stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
 367       Disassembler::decode(stub->code_begin(), stub->code_end(), tty
 368                            NOT_PRODUCT(COMMA &stub->asm_remarks()));
 369       if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
 370         tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
 371         stub->oop_maps()->print();
 372       }
 373       tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
 374       tty->cr();
 375     }
 376     if (Forte::is_enabled()) {
 377       Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
 378     }
 379 
 380     if (JvmtiExport::should_post_dynamic_code_generated()) {
 381       const char* stub_name = name2;
 382       if (name2[0] == '\0')  stub_name = name1;
 383       JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
 384     }
 385   }
 386 
 387   // Track memory usage statistic after releasing CodeCache_lock
 388   MemoryService::track_code_cache_memory_usage();
 389 }
 390 
 391 //----------------------------------------------------------------------------------------------------
 392 // Implementation of BufferBlob
 393 
 394 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
 395 : RuntimeBlob(name, kind, size, header_size)
 396 {}
 397 
 398 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
 399   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 400 
 401   BufferBlob* blob = nullptr;
 402   unsigned int size = sizeof(BufferBlob);
 403   // align the size to CodeEntryAlignment
 404   size = CodeBlob::align_code_offset(size);
 405   size += align_up(buffer_size, oopSize);
 406   assert(name != nullptr, "must provide a name");
 407   {
 408     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 409     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
 410   }
 411   // Track memory usage statistic after releasing CodeCache_lock
 412   MemoryService::track_code_cache_memory_usage();
 413 
 414   return blob;
 415 }
 416 
 417 
 418 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size)
 419   : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
 420 {}
 421 
 422 // Used by gtest
 423 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
 424   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 425 
 426   BufferBlob* blob = nullptr;
 427   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
 428   assert(name != nullptr, "must provide a name");
 429   {
 430     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 431     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size, sizeof(BufferBlob));
 432   }
 433   // Track memory usage statistic after releasing CodeCache_lock
 434   MemoryService::track_code_cache_memory_usage();
 435 
 436   return blob;
 437 }
 438 
 439 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
 440   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
 441 }
 442 
 443 void BufferBlob::free(BufferBlob *blob) {
 444   RuntimeBlob::free(blob);
 445 }
 446 
 447 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
 448   : RuntimeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
 449 {}
 450 
 451 
 452 //----------------------------------------------------------------------------------------------------
 453 // Implementation of AdapterBlob
 454 
 455 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT], int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
 456   BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, sizeof(AdapterBlob), frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
 457   assert(entry_offset[0] == 0, "sanity check");
 458   for (int i = 1; i < AdapterBlob::ENTRY_COUNT; i++) {
 459     // The entry is within the adapter blob or unset.
 460     assert((entry_offset[i] > 0 && entry_offset[i] < cb->insts()->size()) ||
 461            (entry_offset[i] == -1),
 462            "invalid entry offset[%d] = 0x%x", i, entry_offset[i]);
 463   }
 464   _c2i_offset = entry_offset[1];
 465   _c2i_inline_offset = entry_offset[2];
 466   _c2i_inline_ro_offset = entry_offset[3];
 467   _c2i_unverified_offset = entry_offset[4];
 468   _c2i_unverified_inline_offset = entry_offset[5];
 469   _c2i_no_clinit_check_offset = entry_offset[6];
 470   CodeCache::commit(this);
 471 }
 472 
 473 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int entry_offset[AdapterBlob::ENTRY_COUNT], int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) {
 474   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 475 
 476   CodeCache::gc_on_allocation();
 477 
 478   AdapterBlob* blob = nullptr;
 479   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
 480   {
 481     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 482     blob = new (size) AdapterBlob(size, cb, entry_offset, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
 483   }
 484   // Track memory usage statistic after releasing CodeCache_lock
 485   MemoryService::track_code_cache_memory_usage();
 486 
 487   return blob;
 488 }
 489 
 490 void AdapterBlob::get_offsets(int entry_offset[ENTRY_COUNT]) {
 491   entry_offset[0] = 0;
 492   entry_offset[1] = _c2i_offset;
 493   entry_offset[2] = _c2i_inline_offset;
 494   entry_offset[3] = _c2i_inline_ro_offset;
 495   entry_offset[4] = _c2i_unverified_offset;
 496   entry_offset[5] = _c2i_unverified_inline_offset;
 497   entry_offset[6] = _c2i_no_clinit_check_offset;
 498 }
 499 
 500 //----------------------------------------------------------------------------------------------------
 501 // Implementation of VtableBlob
 502 
 503 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
 504   // Handling of allocation failure stops compilation and prints a bunch of
 505   // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
 506   // can be locked, and then re-locking the CodeCache_lock. That is not safe in
 507   // this context as we hold the CompiledICLocker. So we just don't handle code
 508   // cache exhaustion here; we leave that for a later allocation that does not
 509   // hold the CompiledICLocker.
 510   return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
 511 }
 512 
 513 VtableBlob::VtableBlob(const char* name, int size) :
 514   BufferBlob(name, CodeBlobKind::Vtable, size) {
 515 }
 516 
 517 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
 518   assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
 519 
 520   VtableBlob* blob = nullptr;
 521   unsigned int size = sizeof(VtableBlob);
 522   // align the size to CodeEntryAlignment
 523   size = align_code_offset(size);
 524   size += align_up(buffer_size, oopSize);
 525   assert(name != nullptr, "must provide a name");
 526   {
 527     if (!CodeCache_lock->try_lock()) {
 528       // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
 529       // IC transition to megamorphic, for which this stub will be needed. It is better to
 530       // bail out the transition, and wait for a more opportune moment. Not only is it not
 531       // worth waiting for the lock blockingly for the megamorphic transition, it might
 532       // also result in a deadlock to blockingly wait, when concurrent class unloading is
 533       // performed. At this point in time, the CompiledICLocker is taken, so we are not
 534       // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
 535       // consistently taken in the opposite order. Bailing out results in an IC transition to
 536       // the clean state instead, which will cause subsequent calls to retry the transitioning
 537       // eventually.
 538       return nullptr;
 539     }
 540     blob = new (size) VtableBlob(name, size);
 541     CodeCache_lock->unlock();
 542   }
 543   // Track memory usage statistic after releasing CodeCache_lock
 544   MemoryService::track_code_cache_memory_usage();
 545 
 546   return blob;
 547 }
 548 
 549 //----------------------------------------------------------------------------------------------------
 550 // Implementation of MethodHandlesAdapterBlob
 551 
 552 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
 553   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 554 
 555   MethodHandlesAdapterBlob* blob = nullptr;
 556   unsigned int size = sizeof(MethodHandlesAdapterBlob);
 557   // align the size to CodeEntryAlignment
 558   size = CodeBlob::align_code_offset(size);
 559   size += align_up(buffer_size, oopSize);
 560   {
 561     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 562     blob = new (size) MethodHandlesAdapterBlob(size);
 563     if (blob == nullptr) {
 564       vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
 565     }
 566   }
 567   // Track memory usage statistic after releasing CodeCache_lock
 568   MemoryService::track_code_cache_memory_usage();
 569 
 570   return blob;
 571 }
 572 
 573 //----------------------------------------------------------------------------------------------------
 574 // Implementation of BufferedInlineTypeBlob
 575 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) :
 576   BufferBlob("buffered inline type", CodeBlobKind::BufferedInlineType, cb, size, sizeof(BufferedInlineTypeBlob)),
 577   _pack_fields_off(pack_fields_off),
 578   _pack_fields_jobject_off(pack_fields_jobject_off),
 579   _unpack_fields_off(unpack_fields_off) {
 580   CodeCache::commit(this);
 581 }
 582 
 583 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) {
 584   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 585 
 586   BufferedInlineTypeBlob* blob = nullptr;
 587   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob));
 588   {
 589     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 590     blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
 591   }
 592   // Track memory usage statistic after releasing CodeCache_lock
 593   MemoryService::track_code_cache_memory_usage();
 594 
 595   return blob;
 596 }
 597 
 598 //----------------------------------------------------------------------------------------------------
 599 // Implementation of RuntimeStub
 600 
 601 RuntimeStub::RuntimeStub(
 602   const char* name,
 603   CodeBuffer* cb,
 604   int         size,
 605   int16_t     frame_complete,
 606   int         frame_size,
 607   OopMapSet*  oop_maps,
 608   bool        caller_must_gc_arguments
 609 )
 610 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
 611               frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
 612 {
 613 }
 614 
 615 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
 616                                            CodeBuffer* cb,
 617                                            int16_t frame_complete,
 618                                            int frame_size,
 619                                            OopMapSet* oop_maps,
 620                                            bool caller_must_gc_arguments,
 621                                            bool alloc_fail_is_fatal)
 622 {
 623   RuntimeStub* stub = nullptr;
 624   unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
 625   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 626   {
 627     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 628     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
 629     if (stub == nullptr) {
 630       if (!alloc_fail_is_fatal) {
 631         return nullptr;
 632       }
 633       fatal("Initial size of CodeCache is too small");
 634     }
 635   }
 636 
 637   trace_new_stub(stub, "RuntimeStub - ", stub_name);
 638 
 639   return stub;
 640 }
 641 
 642 
 643 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
 644   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
 645 }
 646 
 647 // operator new shared by all singletons:
 648 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
 649   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
 650   if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
 651   return p;
 652 }
 653 
 654 
 655 //----------------------------------------------------------------------------------------------------
 656 // Implementation of DeoptimizationBlob
 657 
 658 DeoptimizationBlob::DeoptimizationBlob(
 659   CodeBuffer* cb,
 660   int         size,
 661   OopMapSet*  oop_maps,
 662   int         unpack_offset,
 663   int         unpack_with_exception_offset,
 664   int         unpack_with_reexecution_offset,
 665   int         frame_size
 666 )
 667 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
 668                 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
 669 {
 670   _unpack_offset           = unpack_offset;
 671   _unpack_with_exception   = unpack_with_exception_offset;
 672   _unpack_with_reexecution = unpack_with_reexecution_offset;
 673 #ifdef COMPILER1
 674   _unpack_with_exception_in_tls   = -1;
 675 #endif
 676 }
 677 
 678 
 679 DeoptimizationBlob* DeoptimizationBlob::create(
 680   CodeBuffer* cb,
 681   OopMapSet*  oop_maps,
 682   int        unpack_offset,
 683   int        unpack_with_exception_offset,
 684   int        unpack_with_reexecution_offset,
 685   int        frame_size)
 686 {
 687   DeoptimizationBlob* blob = nullptr;
 688   unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
 689   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 690   {
 691     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 692     blob = new (size) DeoptimizationBlob(cb,
 693                                          size,
 694                                          oop_maps,
 695                                          unpack_offset,
 696                                          unpack_with_exception_offset,
 697                                          unpack_with_reexecution_offset,
 698                                          frame_size);
 699   }
 700 
 701   trace_new_stub(blob, "DeoptimizationBlob");
 702 
 703   return blob;
 704 }
 705 
 706 #ifdef COMPILER2
 707 
 708 //----------------------------------------------------------------------------------------------------
 709 // Implementation of UncommonTrapBlob
 710 
 711 UncommonTrapBlob::UncommonTrapBlob(
 712   CodeBuffer* cb,
 713   int         size,
 714   OopMapSet*  oop_maps,
 715   int         frame_size
 716 )
 717 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
 718                 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
 719 {}
 720 
 721 
 722 UncommonTrapBlob* UncommonTrapBlob::create(
 723   CodeBuffer* cb,
 724   OopMapSet*  oop_maps,
 725   int        frame_size)
 726 {
 727   UncommonTrapBlob* blob = nullptr;
 728   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
 729   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 730   {
 731     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 732     blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
 733   }
 734 
 735   trace_new_stub(blob, "UncommonTrapBlob");
 736 
 737   return blob;
 738 }
 739 
 740 //----------------------------------------------------------------------------------------------------
 741 // Implementation of ExceptionBlob
 742 
 743 ExceptionBlob::ExceptionBlob(
 744   CodeBuffer* cb,
 745   int         size,
 746   OopMapSet*  oop_maps,
 747   int         frame_size
 748 )
 749 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
 750                 size, sizeof(ExceptionBlob), frame_size, oop_maps)
 751 {}
 752 
 753 
 754 ExceptionBlob* ExceptionBlob::create(
 755   CodeBuffer* cb,
 756   OopMapSet*  oop_maps,
 757   int         frame_size)
 758 {
 759   ExceptionBlob* blob = nullptr;
 760   unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
 761   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 762   {
 763     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 764     blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
 765   }
 766 
 767   trace_new_stub(blob, "ExceptionBlob");
 768 
 769   return blob;
 770 }
 771 
 772 #endif // COMPILER2
 773 
 774 //----------------------------------------------------------------------------------------------------
 775 // Implementation of SafepointBlob
 776 
 777 SafepointBlob::SafepointBlob(
 778   CodeBuffer* cb,
 779   int         size,
 780   OopMapSet*  oop_maps,
 781   int         frame_size
 782 )
 783 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
 784                 size, sizeof(SafepointBlob), frame_size, oop_maps)
 785 {}
 786 
 787 
 788 SafepointBlob* SafepointBlob::create(
 789   CodeBuffer* cb,
 790   OopMapSet*  oop_maps,
 791   int         frame_size)
 792 {
 793   SafepointBlob* blob = nullptr;
 794   unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
 795   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 796   {
 797     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 798     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
 799   }
 800 
 801   trace_new_stub(blob, "SafepointBlob");
 802 
 803   return blob;
 804 }
 805 
 806 //----------------------------------------------------------------------------------------------------
 807 // Implementation of UpcallStub
 808 
 809 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
 810   RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
 811               CodeOffsets::frame_never_safe, 0 /* no frame size */,
 812               /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
 813   _receiver(receiver),
 814   _frame_data_offset(frame_data_offset)
 815 {
 816   CodeCache::commit(this);
 817 }
 818 
 819 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
 820   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
 821 }
 822 
 823 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
 824   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 825 
 826   UpcallStub* blob = nullptr;
 827   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
 828   {
 829     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 830     blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
 831   }
 832   if (blob == nullptr) {
 833     return nullptr; // caller must handle this
 834   }
 835 
 836   // Track memory usage statistic after releasing CodeCache_lock
 837   MemoryService::track_code_cache_memory_usage();
 838 
 839   trace_new_stub(blob, "UpcallStub - ", name);
 840 
 841   return blob;
 842 }
 843 
 844 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
 845   frame_data_for_frame(frame)->old_handles->oops_do(f);
 846 }
 847 
 848 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
 849   return &frame_data_for_frame(frame)->jfa;
 850 }
 851 
 852 void UpcallStub::free(UpcallStub* blob) {
 853   assert(blob != nullptr, "caller must check for nullptr");
 854   JNIHandles::destroy_global(blob->receiver());
 855   RuntimeBlob::free(blob);
 856 }
 857 
 858 //----------------------------------------------------------------------------------------------------
 859 // Verification and printing
 860 
 861 void CodeBlob::verify() {
 862   if (is_nmethod()) {
 863     as_nmethod()->verify();
 864   }
 865 }
 866 
 867 void CodeBlob::print_on(outputStream* st) const {
 868   vptr()->print_on(this, st);
 869 }
 870 
 871 void CodeBlob::print() const { print_on(tty); }
 872 
 873 void CodeBlob::print_value_on(outputStream* st) const {
 874   vptr()->print_value_on(this, st);
 875 }
 876 
 877 void CodeBlob::print_on_impl(outputStream* st) const {
 878   st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
 879   st->print_cr("Framesize: %d", _frame_size);
 880 }
 881 
 882 void CodeBlob::print_value_on_impl(outputStream* st) const {
 883   st->print_cr("[CodeBlob]");
 884 }
 885 
 886 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
 887 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
 888   if (is_nmethod()) {
 889     as_nmethod()->print_nmethod_labels(stream, block_begin);
 890   }
 891 #endif
 892 
 893 #ifndef PRODUCT
 894   ptrdiff_t offset = block_begin - code_begin();
 895   assert(offset >= 0, "Expecting non-negative offset!");
 896   _asm_remarks.print(uint(offset), stream);
 897 #endif
 898   }
 899 
 900 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
 901   if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
 902     // the interpreter is generated into a buffer blob
 903     InterpreterCodelet* i = Interpreter::codelet_containing(addr);
 904     if (i != nullptr) {
 905       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
 906       i->print_on(st);
 907       return;
 908     }
 909     if (Interpreter::contains(addr)) {
 910       st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
 911                    " (not bytecode specific)", p2i(addr));
 912       return;
 913     }
 914     //
 915     if (AdapterHandlerLibrary::contains(this)) {
 916       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
 917       AdapterHandlerLibrary::print_handler_on(st, this);
 918     }
 919     // the stubroutines are generated into a buffer blob
 920     StubCodeDesc* d = StubCodeDesc::desc_for(addr);
 921     if (d != nullptr) {
 922       st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
 923       d->print_on(st);
 924       st->cr();
 925       return;
 926     }
 927     if (StubRoutines::contains(addr)) {
 928       st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
 929       return;
 930     }
 931     VtableStub* v = VtableStubs::stub_containing(addr);
 932     if (v != nullptr) {
 933       st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
 934       v->print_on(st);
 935       st->cr();
 936       return;
 937     }
 938   }
 939   if (is_nmethod()) {
 940     nmethod* nm = (nmethod*)this;
 941     ResourceMark rm;
 942     st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
 943               p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
 944     if (verbose) {
 945       st->print(" for ");
 946       nm->method()->print_value_on(st);
 947     }
 948     st->cr();
 949     if (verbose && st == tty) {
 950       // verbose is only ever true when called from findpc in debug.cpp
 951       nm->print_nmethod(true);
 952     } else {
 953       nm->print_on(st);
 954     }
 955     return;
 956   }
 957   st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
 958   print_on(st);
 959 }
 960 
 961 void BufferBlob::print_on_impl(outputStream* st) const {
 962   RuntimeBlob::print_on_impl(st);
 963   print_value_on_impl(st);
 964 }
 965 
 966 void BufferBlob::print_value_on_impl(outputStream* st) const {
 967   st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
 968 }
 969 
 970 void RuntimeStub::print_on_impl(outputStream* st) const {
 971   ttyLocker ttyl;
 972   RuntimeBlob::print_on_impl(st);
 973   st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
 974   st->print_cr("%s", name());
 975   Disassembler::decode((RuntimeBlob*)this, st);
 976 }
 977 
 978 void RuntimeStub::print_value_on_impl(outputStream* st) const {
 979   st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
 980 }
 981 
 982 void SingletonBlob::print_on_impl(outputStream* st) const {
 983   ttyLocker ttyl;
 984   RuntimeBlob::print_on_impl(st);
 985   st->print_cr("%s", name());
 986   Disassembler::decode((RuntimeBlob*)this, st);
 987 }
 988 
 989 void SingletonBlob::print_value_on_impl(outputStream* st) const {
 990   st->print_cr("%s", name());
 991 }
 992 
 993 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
 994   st->print_cr("Deoptimization (frame not available)");
 995 }
 996 
 997 void UpcallStub::print_on_impl(outputStream* st) const {
 998   RuntimeBlob::print_on_impl(st);
 999   print_value_on_impl(st);
1000   st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
1001   oop recv = JNIHandles::resolve(_receiver);
1002   st->print("Receiver MH=");
1003   recv->print_on(st);
1004   Disassembler::decode((RuntimeBlob*)this, st);
1005 }
1006 
1007 void UpcallStub::print_value_on_impl(outputStream* st) const {
1008   st->print_cr("UpcallStub (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
1009 }