1 /*
  2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeBlob.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/relocInfo.hpp"
 28 #include "code/vtableStubs.hpp"
 29 #include "compiler/disassembler.hpp"
 30 #include "compiler/oopMap.hpp"
 31 #include "interpreter/bytecode.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "jvm.h"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/heap.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "prims/forte.hpp"
 39 #include "prims/jvmtiExport.hpp"
 40 #include "runtime/handles.inline.hpp"
 41 #include "runtime/interfaceSupport.inline.hpp"
 42 #include "runtime/javaFrameAnchor.hpp"
 43 #include "runtime/jniHandles.inline.hpp"
 44 #include "runtime/mutexLocker.hpp"
 45 #include "runtime/safepoint.hpp"
 46 #include "runtime/sharedRuntime.hpp"
 47 #include "runtime/stubCodeGenerator.hpp"
 48 #include "runtime/stubRoutines.hpp"
 49 #include "runtime/vframe.hpp"
 50 #include "services/memoryService.hpp"
 51 #include "utilities/align.hpp"
 52 #ifdef COMPILER1
 53 #include "c1/c1_Runtime1.hpp"
 54 #endif
 55 
 56 #include <type_traits>
 57 
 58 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
 59 // Check all "leaf" subclasses of CodeBlob class.
 60 
 61 static_assert(!std::is_polymorphic<nmethod>::value,            "no virtual methods are allowed in nmethod");
 62 static_assert(!std::is_polymorphic<AdapterBlob>::value,        "no virtual methods are allowed in code blobs");
 63 static_assert(!std::is_polymorphic<VtableBlob>::value,         "no virtual methods are allowed in code blobs");
 64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
 65 static_assert(!std::is_polymorphic<RuntimeStub>::value,        "no virtual methods are allowed in code blobs");
 66 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
 67 static_assert(!std::is_polymorphic<SafepointBlob>::value,      "no virtual methods are allowed in code blobs");
 68 static_assert(!std::is_polymorphic<UpcallStub>::value,         "no virtual methods are allowed in code blobs");
 69 #ifdef COMPILER2
 70 static_assert(!std::is_polymorphic<ExceptionBlob>::value,      "no virtual methods are allowed in code blobs");
 71 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value,   "no virtual methods are allowed in code blobs");
 72 #endif
 73 
 74 // Add proxy vtables.
 75 // We need only few for now - they are used only from prints.
 76 const nmethod::Vptr                  nmethod::_vpntr;
 77 const BufferBlob::Vptr               BufferBlob::_vpntr;
 78 const RuntimeStub::Vptr              RuntimeStub::_vpntr;
 79 const SingletonBlob::Vptr            SingletonBlob::_vpntr;
 80 const DeoptimizationBlob::Vptr       DeoptimizationBlob::_vpntr;
 81 #ifdef COMPILER2
 82 const ExceptionBlob::Vptr            ExceptionBlob::_vpntr;
 83 #endif // COMPILER2
 84 const UpcallStub::Vptr               UpcallStub::_vpntr;
 85 
 86 const CodeBlob::Vptr* CodeBlob::vptr(CodeBlobKind kind) {
 87   constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
 88       nullptr/* None */,
 89       &nmethod::_vpntr,
 90       &BufferBlob::_vpntr,
 91       &AdapterBlob::_vpntr,
 92       &VtableBlob::_vpntr,
 93       &MethodHandlesAdapterBlob::_vpntr,
 94       &RuntimeStub::_vpntr,
 95       &DeoptimizationBlob::_vpntr,
 96       &SafepointBlob::_vpntr,
 97 #ifdef COMPILER2
 98       &ExceptionBlob::_vpntr,
 99       &UncommonTrapBlob::_vpntr,
100 #endif
101       &UpcallStub::_vpntr
102   };
103 
104   return array[(size_t)kind];
105 }
106 
107 const CodeBlob::Vptr* CodeBlob::vptr() const {
108   return vptr(_kind);
109 }
110 
111 unsigned int CodeBlob::align_code_offset(int offset) {
112   // align the size to CodeEntryAlignment
113   int header_size = (int)CodeHeap::header_size();
114   return align_up(offset + header_size, CodeEntryAlignment) - header_size;
115 }
116 
117 // This must be consistent with the CodeBlob constructor's layout actions.
118 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
119   // align the size to CodeEntryAlignment
120   unsigned int size = align_code_offset(header_size);
121   size += align_up(cb->total_content_size(), oopSize);
122   size += align_up(cb->total_oop_size(), oopSize);
123   return size;
124 }
125 
126 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
127                    int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
128                    int mutable_data_size) :
129   _oop_maps(nullptr), // will be set by set_oop_maps() call
130   _name(name),
131   _mutable_data(header_begin() + size), // default value is blob_end()
132   _size(size),
133   _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
134   _content_offset(CodeBlob::align_code_offset(header_size)),
135   _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
136   _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
137   _frame_size(frame_size),
138   _mutable_data_size(mutable_data_size),
139   S390_ONLY(_ctable_offset(0) COMMA)
140   _header_size(header_size),
141   _frame_complete_offset(frame_complete_offset),
142   _kind(kind),
143   _caller_must_gc_arguments(caller_must_gc_arguments)
144 {
145   assert(is_aligned(_size,            oopSize), "unaligned size");
146   assert(is_aligned(header_size,      oopSize), "unaligned size");
147   assert(is_aligned(_relocation_size, oopSize), "unaligned size");
148   assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
149   assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
150   assert(code_end() == content_end(), "must be the same - see code_end()");
151 #ifdef COMPILER1
152   // probably wrong for tiered
153   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
154 #endif // COMPILER1
155 
156   if (_mutable_data_size > 0) {
157     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
158     if (_mutable_data == nullptr) {
159       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
160     }
161   } else {
162     // We need unique and valid not null address
163     assert(_mutable_data = blob_end(), "sanity");
164   }
165 
166   set_oop_maps(oop_maps);
167 }
168 
169 // Simple CodeBlob used for simple BufferBlob.
170 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
171   _oop_maps(nullptr),
172   _name(name),
173   _mutable_data(header_begin() + size), // default value is blob_end()
174   _size(size),
175   _relocation_size(0),
176   _content_offset(CodeBlob::align_code_offset(header_size)),
177   _code_offset(_content_offset),
178   _data_offset(size),
179   _frame_size(0),
180   S390_ONLY(_ctable_offset(0) COMMA)
181   _header_size(header_size),
182   _frame_complete_offset(CodeOffsets::frame_never_safe),
183   _kind(kind),
184   _caller_must_gc_arguments(false)
185 {
186   assert(is_aligned(size,            oopSize), "unaligned size");
187   assert(is_aligned(header_size,     oopSize), "unaligned size");
188   assert(_mutable_data = blob_end(), "sanity");
189 }
190 
191 void CodeBlob::restore_mutable_data(address reloc_data) {
192   // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
193   if (_mutable_data_size > 0) {
194     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
195     if (_mutable_data == nullptr) {
196       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
197     }
198   }
199   if (_relocation_size > 0) {
200     memcpy((address)relocation_begin(), reloc_data, relocation_size());
201   }
202 }
203 
204 void CodeBlob::purge() {
205   assert(_mutable_data != nullptr, "should never be null");
206   if (_mutable_data != blob_end()) {
207     os::free(_mutable_data);
208     _mutable_data = blob_end(); // Valid not null address
209   }
210   if (_oop_maps != nullptr) {
211     delete _oop_maps;
212     _oop_maps = nullptr;
213   }
214   NOT_PRODUCT(_asm_remarks.clear());
215   NOT_PRODUCT(_dbg_strings.clear());
216 }
217 
218 void CodeBlob::set_oop_maps(OopMapSet* p) {
219   // Danger Will Robinson! This method allocates a big
220   // chunk of memory, its your job to free it.
221   if (p != nullptr) {
222     _oop_maps = ImmutableOopMapSet::build_from(p);
223   } else {
224     _oop_maps = nullptr;
225   }
226 }
227 
228 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
229   assert(_oop_maps != nullptr, "nope");
230   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
231 }
232 
233 void CodeBlob::print_code_on(outputStream* st) {
234   ResourceMark m;
235   Disassembler::decode(this, st);
236 }
237 
238 void CodeBlob::prepare_for_archiving_impl() {
239   set_name(nullptr);
240   _oop_maps = nullptr;
241   _mutable_data = nullptr;
242 #ifndef PRODUCT
243   asm_remarks().clear();
244   dbg_strings().clear();
245 #endif /* PRODUCT */
246 }
247 
248 void CodeBlob::prepare_for_archiving() {
249   vptr(_kind)->prepare_for_archiving(this);
250 }
251 
252 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
253   blob->copy_to(archive_buffer);
254   CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
255   archived_blob->prepare_for_archiving();
256 }
257 
258 void CodeBlob::post_restore_impl() {
259   // Track memory usage statistic after releasing CodeCache_lock
260   MemoryService::track_code_cache_memory_usage();
261 }
262 
263 void CodeBlob::post_restore() {
264   vptr(_kind)->post_restore(this);
265 }
266 
267 CodeBlob* CodeBlob::restore(address code_cache_buffer,
268                             const char* name,
269                             address archived_reloc_data,
270                             ImmutableOopMapSet* archived_oop_maps)
271 {
272   copy_to(code_cache_buffer);
273   CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
274   code_blob->set_name(name);
275   code_blob->restore_mutable_data(archived_reloc_data);
276   code_blob->set_oop_maps(archived_oop_maps);
277   return code_blob;
278 }
279 
280 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
281                            const char* name,
282                            address archived_reloc_data,
283                            ImmutableOopMapSet* archived_oop_maps
284 #ifndef PRODUCT
285                            , AsmRemarks& archived_asm_remarks
286                            , DbgStrings& archived_dbg_strings
287 #endif // PRODUCT
288                           )
289 {
290   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
291 
292   CodeCache::gc_on_allocation();
293 
294   CodeBlob* blob = nullptr;
295   unsigned int size = archived_blob->size();
296   {
297     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
298     address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
299     if (code_cache_buffer != nullptr) {
300       blob = archived_blob->restore(code_cache_buffer,
301                                     name,
302                                     archived_reloc_data,
303                                     archived_oop_maps);
304 #ifndef PRODUCT
305       blob->use_remarks(archived_asm_remarks);
306       archived_asm_remarks.clear();
307       blob->use_strings(archived_dbg_strings);
308       archived_dbg_strings.clear();
309 #endif // PRODUCT
310 
311       assert(blob != nullptr, "sanity check");
312       // Flush the code block
313       ICache::invalidate_range(blob->code_begin(), blob->code_size());
314       CodeCache::commit(blob); // Count adapters
315     }
316   }
317   if (blob != nullptr) {
318     blob->post_restore();
319   }
320   return blob;
321 }
322 
323 //-----------------------------------------------------------------------------------------
324 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
325 
326 RuntimeBlob::RuntimeBlob(
327   const char* name,
328   CodeBlobKind kind,
329   CodeBuffer* cb,
330   int         size,
331   uint16_t    header_size,
332   int16_t     frame_complete,
333   int         frame_size,
334   OopMapSet*  oop_maps,
335   bool        caller_must_gc_arguments)
336   : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
337              align_up(cb->total_relocation_size(), oopSize))
338 {
339   cb->copy_code_and_locs_to(this);
340 }
341 
342 void RuntimeBlob::free(RuntimeBlob* blob) {
343   assert(blob != nullptr, "caller must check for nullptr");
344   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
345   blob->purge();
346   {
347     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
348     CodeCache::free(blob);
349   }
350   // Track memory usage statistic after releasing CodeCache_lock
351   MemoryService::track_code_cache_memory_usage();
352 }
353 
354 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
355   // Do not hold the CodeCache lock during name formatting.
356   assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
357 
358   if (stub != nullptr && (PrintStubCode ||
359                        Forte::is_enabled() ||
360                        JvmtiExport::should_post_dynamic_code_generated())) {
361     char stub_id[256];
362     assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
363     jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
364     if (PrintStubCode) {
365       ttyLocker ttyl;
366       tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
367       tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
368                     stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
369       Disassembler::decode(stub->code_begin(), stub->code_end(), tty
370                            NOT_PRODUCT(COMMA &stub->asm_remarks()));
371       if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
372         tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
373         stub->oop_maps()->print();
374       }
375       tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
376       tty->cr();
377     }
378     if (Forte::is_enabled()) {
379       Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
380     }
381 
382     if (JvmtiExport::should_post_dynamic_code_generated()) {
383       const char* stub_name = name2;
384       if (name2[0] == '\0')  stub_name = name1;
385       JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
386     }
387   }
388 
389   // Track memory usage statistic after releasing CodeCache_lock
390   MemoryService::track_code_cache_memory_usage();
391 }
392 
393 //----------------------------------------------------------------------------------------------------
394 // Implementation of BufferBlob
395 
396 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size)
397 : RuntimeBlob(name, kind, size, sizeof(BufferBlob))
398 {}
399 
400 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
401   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
402 
403   BufferBlob* blob = nullptr;
404   unsigned int size = sizeof(BufferBlob);
405   // align the size to CodeEntryAlignment
406   size = CodeBlob::align_code_offset(size);
407   size += align_up(buffer_size, oopSize);
408   assert(name != nullptr, "must provide a name");
409   {
410     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
411     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
412   }
413   // Track memory usage statistic after releasing CodeCache_lock
414   MemoryService::track_code_cache_memory_usage();
415 
416   return blob;
417 }
418 
419 
420 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size)
421   : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), CodeOffsets::frame_never_safe, 0, nullptr)
422 {}
423 
424 // Used by gtest
425 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
426   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
427 
428   BufferBlob* blob = nullptr;
429   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
430   assert(name != nullptr, "must provide a name");
431   {
432     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
433     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size);
434   }
435   // Track memory usage statistic after releasing CodeCache_lock
436   MemoryService::track_code_cache_memory_usage();
437 
438   return blob;
439 }
440 
441 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
442   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
443 }
444 
445 void BufferBlob::free(BufferBlob *blob) {
446   RuntimeBlob::free(blob);
447 }
448 
449 
450 //----------------------------------------------------------------------------------------------------
451 // Implementation of AdapterBlob
452 
453 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
454   BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size) {
455   CodeCache::commit(this);
456 }
457 
458 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
459   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
460 
461   CodeCache::gc_on_allocation();
462 
463   AdapterBlob* blob = nullptr;
464   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
465   {
466     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
467     blob = new (size) AdapterBlob(size, cb);
468   }
469   // Track memory usage statistic after releasing CodeCache_lock
470   MemoryService::track_code_cache_memory_usage();
471 
472   return blob;
473 }
474 
475 //----------------------------------------------------------------------------------------------------
476 // Implementation of VtableBlob
477 
478 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
479   // Handling of allocation failure stops compilation and prints a bunch of
480   // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
481   // can be locked, and then re-locking the CodeCache_lock. That is not safe in
482   // this context as we hold the CompiledICLocker. So we just don't handle code
483   // cache exhaustion here; we leave that for a later allocation that does not
484   // hold the CompiledICLocker.
485   return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
486 }
487 
488 VtableBlob::VtableBlob(const char* name, int size) :
489   BufferBlob(name, CodeBlobKind::Vtable, size) {
490 }
491 
492 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
493   assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
494 
495   VtableBlob* blob = nullptr;
496   unsigned int size = sizeof(VtableBlob);
497   // align the size to CodeEntryAlignment
498   size = align_code_offset(size);
499   size += align_up(buffer_size, oopSize);
500   assert(name != nullptr, "must provide a name");
501   {
502     if (!CodeCache_lock->try_lock()) {
503       // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
504       // IC transition to megamorphic, for which this stub will be needed. It is better to
505       // bail out the transition, and wait for a more opportune moment. Not only is it not
506       // worth waiting for the lock blockingly for the megamorphic transition, it might
507       // also result in a deadlock to blockingly wait, when concurrent class unloading is
508       // performed. At this point in time, the CompiledICLocker is taken, so we are not
509       // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
510       // consistently taken in the opposite order. Bailing out results in an IC transition to
511       // the clean state instead, which will cause subsequent calls to retry the transitioning
512       // eventually.
513       return nullptr;
514     }
515     blob = new (size) VtableBlob(name, size);
516     CodeCache_lock->unlock();
517   }
518   // Track memory usage statistic after releasing CodeCache_lock
519   MemoryService::track_code_cache_memory_usage();
520 
521   return blob;
522 }
523 
524 //----------------------------------------------------------------------------------------------------
525 // Implementation of MethodHandlesAdapterBlob
526 
527 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
528   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
529 
530   MethodHandlesAdapterBlob* blob = nullptr;
531   unsigned int size = sizeof(MethodHandlesAdapterBlob);
532   // align the size to CodeEntryAlignment
533   size = CodeBlob::align_code_offset(size);
534   size += align_up(buffer_size, oopSize);
535   {
536     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
537     blob = new (size) MethodHandlesAdapterBlob(size);
538     if (blob == nullptr) {
539       vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
540     }
541   }
542   // Track memory usage statistic after releasing CodeCache_lock
543   MemoryService::track_code_cache_memory_usage();
544 
545   return blob;
546 }
547 
548 //----------------------------------------------------------------------------------------------------
549 // Implementation of RuntimeStub
550 
551 RuntimeStub::RuntimeStub(
552   const char* name,
553   CodeBuffer* cb,
554   int         size,
555   int16_t     frame_complete,
556   int         frame_size,
557   OopMapSet*  oop_maps,
558   bool        caller_must_gc_arguments
559 )
560 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
561               frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
562 {
563 }
564 
565 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
566                                            CodeBuffer* cb,
567                                            int16_t frame_complete,
568                                            int frame_size,
569                                            OopMapSet* oop_maps,
570                                            bool caller_must_gc_arguments,
571                                            bool alloc_fail_is_fatal)
572 {
573   RuntimeStub* stub = nullptr;
574   unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
575   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
576   {
577     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
578     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
579     if (stub == nullptr) {
580       if (!alloc_fail_is_fatal) {
581         return nullptr;
582       }
583       fatal("Initial size of CodeCache is too small");
584     }
585   }
586 
587   trace_new_stub(stub, "RuntimeStub - ", stub_name);
588 
589   return stub;
590 }
591 
592 
593 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
594   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
595 }
596 
597 // operator new shared by all singletons:
598 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
599   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
600   if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
601   return p;
602 }
603 
604 
605 //----------------------------------------------------------------------------------------------------
606 // Implementation of DeoptimizationBlob
607 
608 DeoptimizationBlob::DeoptimizationBlob(
609   CodeBuffer* cb,
610   int         size,
611   OopMapSet*  oop_maps,
612   int         unpack_offset,
613   int         unpack_with_exception_offset,
614   int         unpack_with_reexecution_offset,
615   int         frame_size
616 )
617 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
618                 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
619 {
620   _unpack_offset           = unpack_offset;
621   _unpack_with_exception   = unpack_with_exception_offset;
622   _unpack_with_reexecution = unpack_with_reexecution_offset;
623 #ifdef COMPILER1
624   _unpack_with_exception_in_tls   = -1;
625 #endif
626 }
627 
628 
629 DeoptimizationBlob* DeoptimizationBlob::create(
630   CodeBuffer* cb,
631   OopMapSet*  oop_maps,
632   int        unpack_offset,
633   int        unpack_with_exception_offset,
634   int        unpack_with_reexecution_offset,
635   int        frame_size)
636 {
637   DeoptimizationBlob* blob = nullptr;
638   unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
639   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
640   {
641     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
642     blob = new (size) DeoptimizationBlob(cb,
643                                          size,
644                                          oop_maps,
645                                          unpack_offset,
646                                          unpack_with_exception_offset,
647                                          unpack_with_reexecution_offset,
648                                          frame_size);
649   }
650 
651   trace_new_stub(blob, "DeoptimizationBlob");
652 
653   return blob;
654 }
655 
656 #ifdef COMPILER2
657 
658 //----------------------------------------------------------------------------------------------------
659 // Implementation of UncommonTrapBlob
660 
661 UncommonTrapBlob::UncommonTrapBlob(
662   CodeBuffer* cb,
663   int         size,
664   OopMapSet*  oop_maps,
665   int         frame_size
666 )
667 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
668                 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
669 {}
670 
671 
672 UncommonTrapBlob* UncommonTrapBlob::create(
673   CodeBuffer* cb,
674   OopMapSet*  oop_maps,
675   int        frame_size)
676 {
677   UncommonTrapBlob* blob = nullptr;
678   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
679   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
680   {
681     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
682     blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
683   }
684 
685   trace_new_stub(blob, "UncommonTrapBlob");
686 
687   return blob;
688 }
689 
690 //----------------------------------------------------------------------------------------------------
691 // Implementation of ExceptionBlob
692 
693 ExceptionBlob::ExceptionBlob(
694   CodeBuffer* cb,
695   int         size,
696   OopMapSet*  oop_maps,
697   int         frame_size
698 )
699 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
700                 size, sizeof(ExceptionBlob), frame_size, oop_maps)
701 {}
702 
703 
704 ExceptionBlob* ExceptionBlob::create(
705   CodeBuffer* cb,
706   OopMapSet*  oop_maps,
707   int         frame_size)
708 {
709   ExceptionBlob* blob = nullptr;
710   unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
711   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
712   {
713     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
714     blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
715   }
716 
717   trace_new_stub(blob, "ExceptionBlob");
718 
719   return blob;
720 }
721 
722 #endif // COMPILER2
723 
724 //----------------------------------------------------------------------------------------------------
725 // Implementation of SafepointBlob
726 
727 SafepointBlob::SafepointBlob(
728   CodeBuffer* cb,
729   int         size,
730   OopMapSet*  oop_maps,
731   int         frame_size
732 )
733 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
734                 size, sizeof(SafepointBlob), frame_size, oop_maps)
735 {}
736 
737 
738 SafepointBlob* SafepointBlob::create(
739   CodeBuffer* cb,
740   OopMapSet*  oop_maps,
741   int         frame_size)
742 {
743   SafepointBlob* blob = nullptr;
744   unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
745   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
746   {
747     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
748     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
749   }
750 
751   trace_new_stub(blob, "SafepointBlob");
752 
753   return blob;
754 }
755 
756 //----------------------------------------------------------------------------------------------------
757 // Implementation of UpcallStub
758 
759 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
760   RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
761               CodeOffsets::frame_never_safe, 0 /* no frame size */,
762               /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
763   _receiver(receiver),
764   _frame_data_offset(frame_data_offset)
765 {
766   CodeCache::commit(this);
767 }
768 
769 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
770   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
771 }
772 
773 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
774   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
775 
776   UpcallStub* blob = nullptr;
777   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
778   {
779     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
780     blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
781   }
782   if (blob == nullptr) {
783     return nullptr; // caller must handle this
784   }
785 
786   // Track memory usage statistic after releasing CodeCache_lock
787   MemoryService::track_code_cache_memory_usage();
788 
789   trace_new_stub(blob, "UpcallStub - ", name);
790 
791   return blob;
792 }
793 
794 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
795   frame_data_for_frame(frame)->old_handles->oops_do(f);
796 }
797 
798 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
799   return &frame_data_for_frame(frame)->jfa;
800 }
801 
802 void UpcallStub::free(UpcallStub* blob) {
803   assert(blob != nullptr, "caller must check for nullptr");
804   JNIHandles::destroy_global(blob->receiver());
805   RuntimeBlob::free(blob);
806 }
807 
808 //----------------------------------------------------------------------------------------------------
809 // Verification and printing
810 
811 void CodeBlob::verify() {
812   if (is_nmethod()) {
813     as_nmethod()->verify();
814   }
815 }
816 
817 void CodeBlob::print_on(outputStream* st) const {
818   vptr()->print_on(this, st);
819 }
820 
821 void CodeBlob::print() const { print_on(tty); }
822 
823 void CodeBlob::print_value_on(outputStream* st) const {
824   vptr()->print_value_on(this, st);
825 }
826 
827 void CodeBlob::print_on_impl(outputStream* st) const {
828   st->print_cr("[CodeBlob kind:%d (" INTPTR_FORMAT ")]", (int)_kind, p2i(this));
829   st->print_cr("Framesize: %d", _frame_size);
830 }
831 
832 void CodeBlob::print_value_on_impl(outputStream* st) const {
833   st->print_cr("[CodeBlob]");
834 }
835 
836 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
837 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
838   if (is_nmethod()) {
839     as_nmethod()->print_nmethod_labels(stream, block_begin);
840   }
841 #endif
842 
843 #ifndef PRODUCT
844   ptrdiff_t offset = block_begin - code_begin();
845   assert(offset >= 0, "Expecting non-negative offset!");
846   _asm_remarks.print(uint(offset), stream);
847 #endif
848   }
849 
850 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
851   if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
852     // the interpreter is generated into a buffer blob
853     InterpreterCodelet* i = Interpreter::codelet_containing(addr);
854     if (i != nullptr) {
855       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
856       i->print_on(st);
857       return;
858     }
859     if (Interpreter::contains(addr)) {
860       st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
861                    " (not bytecode specific)", p2i(addr));
862       return;
863     }
864     //
865     if (AdapterHandlerLibrary::contains(this)) {
866       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
867       AdapterHandlerLibrary::print_handler_on(st, this);
868     }
869     // the stubroutines are generated into a buffer blob
870     StubCodeDesc* d = StubCodeDesc::desc_for(addr);
871     if (d != nullptr) {
872       st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
873       d->print_on(st);
874       st->cr();
875       return;
876     }
877     if (StubRoutines::contains(addr)) {
878       st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
879       return;
880     }
881     VtableStub* v = VtableStubs::stub_containing(addr);
882     if (v != nullptr) {
883       st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
884       v->print_on(st);
885       st->cr();
886       return;
887     }
888   }
889   if (is_nmethod()) {
890     nmethod* nm = (nmethod*)this;
891     ResourceMark rm;
892     st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
893               p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
894     if (verbose) {
895       st->print(" for ");
896       nm->method()->print_value_on(st);
897     }
898     st->cr();
899     if (verbose && st == tty) {
900       // verbose is only ever true when called from findpc in debug.cpp
901       nm->print_nmethod(true);
902     } else {
903       nm->print_on(st);
904     }
905     return;
906   }
907   st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
908   print_on(st);
909 }
910 
911 void BufferBlob::print_on_impl(outputStream* st) const {
912   RuntimeBlob::print_on_impl(st);
913   print_value_on_impl(st);
914 }
915 
916 void BufferBlob::print_value_on_impl(outputStream* st) const {
917   st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
918 }
919 
920 void RuntimeStub::print_on_impl(outputStream* st) const {
921   ttyLocker ttyl;
922   RuntimeBlob::print_on_impl(st);
923   st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
924   st->print_cr("%s", name());
925   Disassembler::decode((RuntimeBlob*)this, st);
926 }
927 
928 void RuntimeStub::print_value_on_impl(outputStream* st) const {
929   st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
930 }
931 
932 void SingletonBlob::print_on_impl(outputStream* st) const {
933   ttyLocker ttyl;
934   RuntimeBlob::print_on_impl(st);
935   st->print_cr("%s", name());
936   Disassembler::decode((RuntimeBlob*)this, st);
937 }
938 
939 void SingletonBlob::print_value_on_impl(outputStream* st) const {
940   st->print_cr("%s", name());
941 }
942 
943 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
944   st->print_cr("Deoptimization (frame not available)");
945 }
946 
947 void UpcallStub::print_on_impl(outputStream* st) const {
948   RuntimeBlob::print_on_impl(st);
949   print_value_on_impl(st);
950   st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
951   oop recv = JNIHandles::resolve(_receiver);
952   st->print("Receiver MH=");
953   recv->print_on(st);
954   Disassembler::decode((RuntimeBlob*)this, st);
955 }
956 
957 void UpcallStub::print_value_on_impl(outputStream* st) const {
958   st->print_cr("UpcallStub (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
959 }