< prev index next > src/hotspot/share/code/codeBlob.cpp
Print this page
bool caller_must_gc_arguments
) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
cb->copy_code_and_locs_to(this);
}
+ void RuntimeBlob::free(RuntimeBlob* blob) {
+ assert(blob != NULL, "caller must check for NULL");
+ ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
+ blob->flush();
+ {
+ MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeCache::free(blob);
+ }
+ // Track memory usage statistic after releasing CodeCache_lock
+ MemoryService::track_code_cache_memory_usage();
+ }
+
void CodeBlob::flush() {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
_oop_maps = NULL;
NOT_PRODUCT(_asm_remarks.clear());
NOT_PRODUCT(_dbg_strings.clear());
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
void BufferBlob::free(BufferBlob *blob) {
! assert(blob != NULL, "caller must check for NULL");
- ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
- blob->flush();
- {
- MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- CodeCache::free((RuntimeBlob*)blob);
- }
- // Track memory usage statistic after releasing CodeCache_lock
- MemoryService::track_code_cache_memory_usage();
}
//----------------------------------------------------------------------------------------------------
// Implementation of AdapterBlob
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
void BufferBlob::free(BufferBlob *blob) {
! RuntimeBlob::free(blob);
}
//----------------------------------------------------------------------------------------------------
// Implementation of AdapterBlob
st->print_cr("Deoptimization (frame not available)");
}
// Implementation of OptimizedEntryBlob
! OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset,
jobject receiver, ByteSize frame_data_offset) :
! BufferBlob(name, size, cb),
_exception_handler_offset(exception_handler_offset),
_receiver(receiver),
_frame_data_offset(frame_data_offset) {
CodeCache::commit(this);
}
! OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset,
jobject receiver, ByteSize frame_data_offset) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
OptimizedEntryBlob* blob = nullptr;
unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
! blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, frame_data_offset);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
}
void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) {
frame_data_for_frame(frame)->old_handles->oops_do(f);
}
JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
return &frame_data_for_frame(frame)->jfa;
}
st->print_cr("Deoptimization (frame not available)");
}
// Implementation of OptimizedEntryBlob
! OptimizedEntryBlob::OptimizedEntryBlob(const char* name, CodeBuffer* cb, int size,
+ intptr_t exception_handler_offset,
jobject receiver, ByteSize frame_data_offset) :
! RuntimeBlob(name, cb, sizeof(OptimizedEntryBlob), size, CodeOffsets::frame_never_safe, 0 /* no frame size */,
+ /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
_exception_handler_offset(exception_handler_offset),
_receiver(receiver),
_frame_data_offset(frame_data_offset) {
CodeCache::commit(this);
}
! void* OptimizedEntryBlob::operator new(size_t s, unsigned size) throw() {
+ return CodeCache::allocate(size, CodeBlobType::NonNMethod);
+ }
+
+ OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb,
+ intptr_t exception_handler_offset,
jobject receiver, ByteSize frame_data_offset) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
OptimizedEntryBlob* blob = nullptr;
unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
! blob = new (size) OptimizedEntryBlob(name, cb, size,
+ exception_handler_offset, receiver, frame_data_offset);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
+ trace_new_stub(blob, "OptimizedEntryBlob");
+
return blob;
}
void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) {
frame_data_for_frame(frame)->old_handles->oops_do(f);
}
JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
return &frame_data_for_frame(frame)->jfa;
}
+
+ void OptimizedEntryBlob::free(OptimizedEntryBlob* blob) {
+ assert(blob != nullptr, "caller must check for NULL");
+ JNIHandles::destroy_global(blob->receiver());
+ RuntimeBlob::free(blob);
+ }
+
+ void OptimizedEntryBlob::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) {
+ // do nothing for now
+ }
+
+ // Misc.
+ void OptimizedEntryBlob::verify() {
+ // unimplemented
+ }
+
+ void OptimizedEntryBlob::print_on(outputStream* st) const {
+ RuntimeBlob::print_on(st);
+ print_value_on(st);
+ }
+
+ void OptimizedEntryBlob::print_value_on(outputStream* st) const {
+ st->print_cr("OptimizedEntryBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
+ }
< prev index next >