1 /*
  2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/SCCache.hpp"
 26 #include "code/codeBlob.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "code/relocInfo.hpp"
 29 #include "code/vtableStubs.hpp"
 30 #include "compiler/disassembler.hpp"
 31 #include "compiler/oopMap.hpp"
 32 #include "interpreter/bytecode.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "jvm.h"
 35 #include "memory/allocation.inline.hpp"
 36 #include "memory/heap.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "oops/oop.inline.hpp"
 39 #include "prims/forte.hpp"
 40 #include "prims/jvmtiExport.hpp"
 41 #include "runtime/handles.inline.hpp"
 42 #include "runtime/interfaceSupport.inline.hpp"
 43 #include "runtime/javaFrameAnchor.hpp"
 44 #include "runtime/jniHandles.inline.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "runtime/safepoint.hpp"
 47 #include "runtime/sharedRuntime.hpp"
 48 #include "runtime/stubCodeGenerator.hpp"
 49 #include "runtime/stubRoutines.hpp"
 50 #include "runtime/vframe.hpp"
 51 #include "services/memoryService.hpp"
 52 #include "utilities/align.hpp"
 53 #ifdef COMPILER1
 54 #include "c1/c1_Runtime1.hpp"
 55 #endif
 56 
 57 #include <type_traits>
 58 
 59 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
 60 // Check all "leaf" subclasses of CodeBlob class.
 61 
 62 static_assert(!std::is_polymorphic<nmethod>::value,            "no virtual methods are allowed in nmethod");
 63 static_assert(!std::is_polymorphic<AdapterBlob>::value,        "no virtual methods are allowed in code blobs");
 64 static_assert(!std::is_polymorphic<VtableBlob>::value,         "no virtual methods are allowed in code blobs");
 65 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
 66 static_assert(!std::is_polymorphic<RuntimeStub>::value,        "no virtual methods are allowed in code blobs");
 67 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
 68 static_assert(!std::is_polymorphic<SafepointBlob>::value,      "no virtual methods are allowed in code blobs");
 69 static_assert(!std::is_polymorphic<UpcallStub>::value,         "no virtual methods are allowed in code blobs");
 70 #ifdef COMPILER2
 71 static_assert(!std::is_polymorphic<ExceptionBlob>::value,      "no virtual methods are allowed in code blobs");
 72 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value,   "no virtual methods are allowed in code blobs");
 73 #endif
 74 
 75 // Add proxy vtables.
 76 // We need only few for now - they are used only from prints.
 77 const nmethod::Vptr                  nmethod::_vpntr;
 78 const BufferBlob::Vptr               BufferBlob::_vpntr;
 79 const RuntimeStub::Vptr              RuntimeStub::_vpntr;
 80 const SingletonBlob::Vptr            SingletonBlob::_vpntr;
 81 const DeoptimizationBlob::Vptr       DeoptimizationBlob::_vpntr;
 82 const UpcallStub::Vptr               UpcallStub::_vpntr;
 83 
 84 const CodeBlob::Vptr* CodeBlob::vptr() const {
 85   constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
 86       nullptr/* None */,
 87       &nmethod::_vpntr,
 88       &BufferBlob::_vpntr,
 89       &AdapterBlob::_vpntr,
 90       &VtableBlob::_vpntr,
 91       &MethodHandlesAdapterBlob::_vpntr,
 92       &RuntimeStub::_vpntr,
 93       &DeoptimizationBlob::_vpntr,
 94       &SafepointBlob::_vpntr,
 95 #ifdef COMPILER2
 96       &ExceptionBlob::_vpntr,
 97       &UncommonTrapBlob::_vpntr,
 98 #endif
 99       &UpcallStub::_vpntr
100   };
101 
102   return array[(size_t)_kind];
103 }
104 
105 unsigned int CodeBlob::align_code_offset(int offset) {
106   // align the size to CodeEntryAlignment
107   int header_size = (int)CodeHeap::header_size();
108   return align_up(offset + header_size, CodeEntryAlignment) - header_size;
109 }
110 
111 // This must be consistent with the CodeBlob constructor's layout actions.
112 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
113   // align the size to CodeEntryAlignment
114   unsigned int size = align_code_offset(header_size);
115   size += align_up(cb->total_content_size(), oopSize);
116   size += align_up(cb->total_oop_size(), oopSize);
117   return size;
118 }
119 
120 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
121                    int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
122                    int mutable_data_size) :
123   _oop_maps(nullptr), // will be set by set_oop_maps() call
124   _name(name),
125   _mutable_data(header_begin() + size), // default value is blob_end()
126   _size(size),
127   _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
128   _content_offset(CodeBlob::align_code_offset(header_size)),
129   _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
130   _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
131   _frame_size(frame_size),
132   _mutable_data_size(mutable_data_size),
133   S390_ONLY(_ctable_offset(0) COMMA)
134   _header_size(header_size),
135   _frame_complete_offset(frame_complete_offset),
136   _kind(kind),
137   _caller_must_gc_arguments(caller_must_gc_arguments)
138 {
139   assert(is_aligned(_size,            oopSize), "unaligned size");
140   assert(is_aligned(header_size,      oopSize), "unaligned size");
141   assert(is_aligned(_relocation_size, oopSize), "unaligned size");
142   assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
143   assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
144   assert(code_end() == content_end(), "must be the same - see code_end()");
145 #ifdef COMPILER1
146   // probably wrong for tiered
147   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
148 #endif // COMPILER1
149 
150   if (_mutable_data_size > 0) {
151     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
152     if (_mutable_data == nullptr) {
153       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
154     }
155   } else {
156     // We need unique and valid not null address
157     assert(_mutable_data = blob_end(), "sanity");
158   }
159 
160   set_oop_maps(oop_maps);
161 }
162 
163 // Simple CodeBlob used for simple BufferBlob.
164 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
165   _oop_maps(nullptr),
166   _name(name),
167   _mutable_data(header_begin() + size), // default value is blob_end()
168   _size(size),
169   _relocation_size(0),
170   _content_offset(CodeBlob::align_code_offset(header_size)),
171   _code_offset(_content_offset),
172   _data_offset(size),
173   _frame_size(0),
174   S390_ONLY(_ctable_offset(0) COMMA)
175   _header_size(header_size),
176   _frame_complete_offset(CodeOffsets::frame_never_safe),
177   _kind(kind),
178   _caller_must_gc_arguments(false)
179 {
180   assert(is_aligned(size,            oopSize), "unaligned size");
181   assert(is_aligned(header_size,     oopSize), "unaligned size");
182   assert(_mutable_data = blob_end(), "sanity");
183 }
184 
185 void CodeBlob::purge() {
186   assert(_mutable_data != nullptr, "should never be null");
187   if (_mutable_data != blob_end()) {
188     os::free(_mutable_data);
189     _mutable_data = blob_end(); // Valid not null address
190   }
191   if (_oop_maps != nullptr && !SCCache::is_address_in_aot_cache((address)_oop_maps)) {
192     delete _oop_maps;
193     _oop_maps = nullptr;
194   }
195   NOT_PRODUCT(_asm_remarks.clear());
196   NOT_PRODUCT(_dbg_strings.clear());
197 }
198 
199 void CodeBlob::set_oop_maps(OopMapSet* p) {
200   // Danger Will Robinson! This method allocates a big
201   // chunk of memory, its your job to free it.
202   if (p != nullptr) {
203     _oop_maps = ImmutableOopMapSet::build_from(p);
204   } else {
205     _oop_maps = nullptr;
206   }
207 }
208 
209 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
210   assert(_oop_maps != nullptr, "nope");
211   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
212 }
213 
214 void CodeBlob::print_code_on(outputStream* st) {
215   ResourceMark m;
216   Disassembler::decode(this, st);
217 }
218 
219 void CodeBlob::prepare_for_archiving() {
220   set_name(nullptr);
221   _oop_maps = nullptr;
222   _mutable_data = nullptr;
223 #ifndef PRODUCT
224   asm_remarks().clear();
225   dbg_strings().clear();
226 #endif /* PRODUCT */
227 }
228 
229 //-----------------------------------------------------------------------------------------
230 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
231 
232 RuntimeBlob::RuntimeBlob(
233   const char* name,
234   CodeBlobKind kind,
235   CodeBuffer* cb,
236   int         size,
237   uint16_t    header_size,
238   int16_t     frame_complete,
239   int         frame_size,
240   OopMapSet*  oop_maps,
241   bool        caller_must_gc_arguments)
242   : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
243              align_up(cb->total_relocation_size(), oopSize))
244 {
245   cb->copy_code_and_locs_to(this);
246 }
247 
248 void RuntimeBlob::free(RuntimeBlob* blob) {
249   assert(blob != nullptr, "caller must check for nullptr");
250   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
251   blob->purge();
252   {
253     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
254     CodeCache::free(blob);
255   }
256   // Track memory usage statistic after releasing CodeCache_lock
257   MemoryService::track_code_cache_memory_usage();
258 }
259 
260 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
261   // Do not hold the CodeCache lock during name formatting.
262   assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
263 
264   if (stub != nullptr && (PrintStubCode ||
265                        Forte::is_enabled() ||
266                        JvmtiExport::should_post_dynamic_code_generated())) {
267     char stub_id[256];
268     assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
269     jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
270     if (PrintStubCode) {
271       ttyLocker ttyl;
272       tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
273       tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
274                     stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
275       Disassembler::decode(stub->code_begin(), stub->code_end(), tty
276                            NOT_PRODUCT(COMMA &stub->asm_remarks()));
277       if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
278         tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
279         stub->oop_maps()->print();
280       }
281       tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
282       tty->cr();
283     }
284     if (Forte::is_enabled()) {
285       Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
286     }
287 
288     if (JvmtiExport::should_post_dynamic_code_generated()) {
289       const char* stub_name = name2;
290       if (name2[0] == '\0')  stub_name = name1;
291       JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
292     }
293   }
294 
295   // Track memory usage statistic after releasing CodeCache_lock
296   MemoryService::track_code_cache_memory_usage();
297 }
298 
299 //----------------------------------------------------------------------------------------------------
300 // Implementation of BufferBlob
301 
302 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size)
303 : RuntimeBlob(name, kind, size, sizeof(BufferBlob))
304 {}
305 
306 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
307   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
308 
309   BufferBlob* blob = nullptr;
310   unsigned int size = sizeof(BufferBlob);
311   // align the size to CodeEntryAlignment
312   size = CodeBlob::align_code_offset(size);
313   size += align_up(buffer_size, oopSize);
314   assert(name != nullptr, "must provide a name");
315   {
316     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
317     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
318   }
319   // Track memory usage statistic after releasing CodeCache_lock
320   MemoryService::track_code_cache_memory_usage();
321 
322   return blob;
323 }
324 
325 
326 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size)
327   : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), CodeOffsets::frame_never_safe, 0, nullptr)
328 {}
329 
330 // Used by gtest
331 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
332   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
333 
334   BufferBlob* blob = nullptr;
335   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
336   assert(name != nullptr, "must provide a name");
337   {
338     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
339     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size);
340   }
341   // Track memory usage statistic after releasing CodeCache_lock
342   MemoryService::track_code_cache_memory_usage();
343 
344   return blob;
345 }
346 
347 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
348   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
349 }
350 
351 void BufferBlob::free(BufferBlob *blob) {
352   RuntimeBlob::free(blob);
353 }
354 
355 
356 //----------------------------------------------------------------------------------------------------
357 // Implementation of AdapterBlob
358 
359 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
360   BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size) {
361   CodeCache::commit(this);
362 }
363 
364 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
365   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
366 
367   CodeCache::gc_on_allocation();
368 
369   AdapterBlob* blob = nullptr;
370   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
371   {
372     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
373     blob = new (size) AdapterBlob(size, cb);
374   }
375   // Track memory usage statistic after releasing CodeCache_lock
376   MemoryService::track_code_cache_memory_usage();
377 
378   return blob;
379 }
380 
381 //----------------------------------------------------------------------------------------------------
382 // Implementation of VtableBlob
383 
384 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
385   // Handling of allocation failure stops compilation and prints a bunch of
386   // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
387   // can be locked, and then re-locking the CodeCache_lock. That is not safe in
388   // this context as we hold the CompiledICLocker. So we just don't handle code
389   // cache exhaustion here; we leave that for a later allocation that does not
390   // hold the CompiledICLocker.
391   return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
392 }
393 
394 VtableBlob::VtableBlob(const char* name, int size) :
395   BufferBlob(name, CodeBlobKind::Vtable, size) {
396 }
397 
398 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
399   assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
400 
401   VtableBlob* blob = nullptr;
402   unsigned int size = sizeof(VtableBlob);
403   // align the size to CodeEntryAlignment
404   size = align_code_offset(size);
405   size += align_up(buffer_size, oopSize);
406   assert(name != nullptr, "must provide a name");
407   {
408     if (!CodeCache_lock->try_lock()) {
409       // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
410       // IC transition to megamorphic, for which this stub will be needed. It is better to
411       // bail out the transition, and wait for a more opportune moment. Not only is it not
412       // worth waiting for the lock blockingly for the megamorphic transition, it might
413       // also result in a deadlock to blockingly wait, when concurrent class unloading is
414       // performed. At this point in time, the CompiledICLocker is taken, so we are not
415       // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
416       // consistently taken in the opposite order. Bailing out results in an IC transition to
417       // the clean state instead, which will cause subsequent calls to retry the transitioning
418       // eventually.
419       return nullptr;
420     }
421     blob = new (size) VtableBlob(name, size);
422     CodeCache_lock->unlock();
423   }
424   // Track memory usage statistic after releasing CodeCache_lock
425   MemoryService::track_code_cache_memory_usage();
426 
427   return blob;
428 }
429 
430 //----------------------------------------------------------------------------------------------------
431 // Implementation of MethodHandlesAdapterBlob
432 
433 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
434   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
435 
436   MethodHandlesAdapterBlob* blob = nullptr;
437   unsigned int size = sizeof(MethodHandlesAdapterBlob);
438   // align the size to CodeEntryAlignment
439   size = CodeBlob::align_code_offset(size);
440   size += align_up(buffer_size, oopSize);
441   {
442     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
443     blob = new (size) MethodHandlesAdapterBlob(size);
444     if (blob == nullptr) {
445       vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
446     }
447   }
448   // Track memory usage statistic after releasing CodeCache_lock
449   MemoryService::track_code_cache_memory_usage();
450 
451   return blob;
452 }
453 
454 //----------------------------------------------------------------------------------------------------
455 // Implementation of RuntimeStub
456 
457 RuntimeStub::RuntimeStub(
458   const char* name,
459   CodeBuffer* cb,
460   int         size,
461   int16_t     frame_complete,
462   int         frame_size,
463   OopMapSet*  oop_maps,
464   bool        caller_must_gc_arguments
465 )
466 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
467               frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
468 {
469 }
470 
471 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
472                                            CodeBuffer* cb,
473                                            int16_t frame_complete,
474                                            int frame_size,
475                                            OopMapSet* oop_maps,
476                                            bool caller_must_gc_arguments,
477                                            bool alloc_fail_is_fatal)
478 {
479   RuntimeStub* stub = nullptr;
480   unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
481   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
482   {
483     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
484     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
485     if (stub == nullptr) {
486       if (!alloc_fail_is_fatal) {
487         return nullptr;
488       }
489       fatal("Initial size of CodeCache is too small");
490     }
491   }
492 
493   trace_new_stub(stub, "RuntimeStub - ", stub_name);
494 
495   return stub;
496 }
497 
498 
499 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
500   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
501 }
502 
503 // operator new shared by all singletons:
504 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
505   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
506   if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
507   return p;
508 }
509 
510 
511 //----------------------------------------------------------------------------------------------------
512 // Implementation of DeoptimizationBlob
513 
514 DeoptimizationBlob::DeoptimizationBlob(
515   CodeBuffer* cb,
516   int         size,
517   OopMapSet*  oop_maps,
518   int         unpack_offset,
519   int         unpack_with_exception_offset,
520   int         unpack_with_reexecution_offset,
521   int         frame_size
522 )
523 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
524                 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
525 {
526   _unpack_offset           = unpack_offset;
527   _unpack_with_exception   = unpack_with_exception_offset;
528   _unpack_with_reexecution = unpack_with_reexecution_offset;
529 #ifdef COMPILER1
530   _unpack_with_exception_in_tls   = -1;
531 #endif
532 }
533 
534 
535 DeoptimizationBlob* DeoptimizationBlob::create(
536   CodeBuffer* cb,
537   OopMapSet*  oop_maps,
538   int        unpack_offset,
539   int        unpack_with_exception_offset,
540   int        unpack_with_reexecution_offset,
541   int        frame_size)
542 {
543   DeoptimizationBlob* blob = nullptr;
544   unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
545   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
546   {
547     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
548     blob = new (size) DeoptimizationBlob(cb,
549                                          size,
550                                          oop_maps,
551                                          unpack_offset,
552                                          unpack_with_exception_offset,
553                                          unpack_with_reexecution_offset,
554                                          frame_size);
555   }
556 
557   trace_new_stub(blob, "DeoptimizationBlob");
558 
559   return blob;
560 }
561 
562 #ifdef COMPILER2
563 
564 //----------------------------------------------------------------------------------------------------
565 // Implementation of UncommonTrapBlob
566 
567 UncommonTrapBlob::UncommonTrapBlob(
568   CodeBuffer* cb,
569   int         size,
570   OopMapSet*  oop_maps,
571   int         frame_size
572 )
573 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
574                 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
575 {}
576 
577 
578 UncommonTrapBlob* UncommonTrapBlob::create(
579   CodeBuffer* cb,
580   OopMapSet*  oop_maps,
581   int        frame_size)
582 {
583   UncommonTrapBlob* blob = nullptr;
584   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
585   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
586   {
587     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
588     blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
589   }
590 
591   trace_new_stub(blob, "UncommonTrapBlob");
592 
593   return blob;
594 }
595 
596 //----------------------------------------------------------------------------------------------------
597 // Implementation of ExceptionBlob
598 
599 ExceptionBlob::ExceptionBlob(
600   CodeBuffer* cb,
601   int         size,
602   OopMapSet*  oop_maps,
603   int         frame_size
604 )
605 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
606                 size, sizeof(ExceptionBlob), frame_size, oop_maps)
607 {}
608 
609 
610 ExceptionBlob* ExceptionBlob::create(
611   CodeBuffer* cb,
612   OopMapSet*  oop_maps,
613   int         frame_size)
614 {
615   ExceptionBlob* blob = nullptr;
616   unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
617   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
618   {
619     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
620     blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
621   }
622 
623   trace_new_stub(blob, "ExceptionBlob");
624 
625   return blob;
626 }
627 
628 #endif // COMPILER2
629 
630 //----------------------------------------------------------------------------------------------------
631 // Implementation of SafepointBlob
632 
633 SafepointBlob::SafepointBlob(
634   CodeBuffer* cb,
635   int         size,
636   OopMapSet*  oop_maps,
637   int         frame_size
638 )
639 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
640                 size, sizeof(SafepointBlob), frame_size, oop_maps)
641 {}
642 
643 
644 SafepointBlob* SafepointBlob::create(
645   CodeBuffer* cb,
646   OopMapSet*  oop_maps,
647   int         frame_size)
648 {
649   SafepointBlob* blob = nullptr;
650   unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
651   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
652   {
653     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
654     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
655   }
656 
657   trace_new_stub(blob, "SafepointBlob");
658 
659   return blob;
660 }
661 
662 //----------------------------------------------------------------------------------------------------
663 // Implementation of UpcallStub
664 
665 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
666   RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
667               CodeOffsets::frame_never_safe, 0 /* no frame size */,
668               /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
669   _receiver(receiver),
670   _frame_data_offset(frame_data_offset)
671 {
672   CodeCache::commit(this);
673 }
674 
675 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
676   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
677 }
678 
679 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
680   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
681 
682   UpcallStub* blob = nullptr;
683   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
684   {
685     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
686     blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
687   }
688   if (blob == nullptr) {
689     return nullptr; // caller must handle this
690   }
691 
692   // Track memory usage statistic after releasing CodeCache_lock
693   MemoryService::track_code_cache_memory_usage();
694 
695   trace_new_stub(blob, "UpcallStub - ", name);
696 
697   return blob;
698 }
699 
700 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
701   frame_data_for_frame(frame)->old_handles->oops_do(f);
702 }
703 
704 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
705   return &frame_data_for_frame(frame)->jfa;
706 }
707 
708 void UpcallStub::free(UpcallStub* blob) {
709   assert(blob != nullptr, "caller must check for nullptr");
710   JNIHandles::destroy_global(blob->receiver());
711   RuntimeBlob::free(blob);
712 }
713 
714 //----------------------------------------------------------------------------------------------------
715 // Verification and printing
716 
717 void CodeBlob::verify() {
718   if (is_nmethod()) {
719     as_nmethod()->verify();
720   }
721 }
722 
723 void CodeBlob::print_on(outputStream* st) const {
724   vptr()->print_on(this, st);
725 }
726 
727 void CodeBlob::print() const { print_on(tty); }
728 
729 void CodeBlob::print_value_on(outputStream* st) const {
730   vptr()->print_value_on(this, st);
731 }
732 
733 void CodeBlob::print_on_impl(outputStream* st) const {
734   st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
735   st->print_cr("Framesize: %d", _frame_size);
736 }
737 
738 void CodeBlob::print_value_on_impl(outputStream* st) const {
739   st->print_cr("[CodeBlob]");
740 }
741 
742 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
743 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
744   if (is_nmethod()) {
745     as_nmethod()->print_nmethod_labels(stream, block_begin);
746   }
747 #endif
748 
749 #ifndef PRODUCT
750   ptrdiff_t offset = block_begin - code_begin();
751   assert(offset >= 0, "Expecting non-negative offset!");
752   _asm_remarks.print(uint(offset), stream);
753 #endif
754   }
755 
756 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
757   if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
758     // the interpreter is generated into a buffer blob
759     InterpreterCodelet* i = Interpreter::codelet_containing(addr);
760     if (i != nullptr) {
761       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
762       i->print_on(st);
763       return;
764     }
765     if (Interpreter::contains(addr)) {
766       st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
767                    " (not bytecode specific)", p2i(addr));
768       return;
769     }
770     //
771     if (AdapterHandlerLibrary::contains(this)) {
772       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
773       AdapterHandlerLibrary::print_handler_on(st, this);
774     }
775     // the stubroutines are generated into a buffer blob
776     StubCodeDesc* d = StubCodeDesc::desc_for(addr);
777     if (d != nullptr) {
778       st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
779       d->print_on(st);
780       st->cr();
781       return;
782     }
783     if (StubRoutines::contains(addr)) {
784       st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
785       return;
786     }
787     VtableStub* v = VtableStubs::stub_containing(addr);
788     if (v != nullptr) {
789       st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
790       v->print_on(st);
791       st->cr();
792       return;
793     }
794   }
795   if (is_nmethod()) {
796     nmethod* nm = (nmethod*)this;
797     ResourceMark rm;
798     st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
799               p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
800     if (verbose) {
801       st->print(" for ");
802       nm->method()->print_value_on(st);
803     }
804     st->cr();
805     if (verbose && st == tty) {
806       // verbose is only ever true when called from findpc in debug.cpp
807       nm->print_nmethod(true);
808     } else {
809       nm->print_on(st);
810     }
811     return;
812   }
813   st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
814   print_on(st);
815 }
816 
817 void BufferBlob::print_on_impl(outputStream* st) const {
818   RuntimeBlob::print_on_impl(st);
819   print_value_on_impl(st);
820 }
821 
822 void BufferBlob::print_value_on_impl(outputStream* st) const {
823   st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
824 }
825 
826 void RuntimeStub::print_on_impl(outputStream* st) const {
827   ttyLocker ttyl;
828   RuntimeBlob::print_on_impl(st);
829   st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
830   st->print_cr("%s", name());
831   Disassembler::decode((RuntimeBlob*)this, st);
832 }
833 
834 void RuntimeStub::print_value_on_impl(outputStream* st) const {
835   st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
836 }
837 
838 void SingletonBlob::print_on_impl(outputStream* st) const {
839   ttyLocker ttyl;
840   RuntimeBlob::print_on_impl(st);
841   st->print_cr("%s", name());
842   Disassembler::decode((RuntimeBlob*)this, st);
843 }
844 
845 void SingletonBlob::print_value_on_impl(outputStream* st) const {
846   st->print_cr("%s", name());
847 }
848 
849 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
850   st->print_cr("Deoptimization (frame not available)");
851 }
852 
853 void UpcallStub::print_on_impl(outputStream* st) const {
854   RuntimeBlob::print_on_impl(st);
855   print_value_on_impl(st);
856   st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
857   oop recv = JNIHandles::resolve(_receiver);
858   st->print("Receiver MH=");
859   recv->print_on(st);
860   Disassembler::decode((RuntimeBlob*)this, st);
861 }
862 
863 void UpcallStub::print_value_on_impl(outputStream* st) const {
864   st->print_cr("UpcallStub (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
865 }