< prev index next >

src/hotspot/share/code/codeBlob.cpp

Print this page

142 {
143   assert(is_aligned(locs_size, oopSize), "unaligned size");
144 }
145 
146 
147 // Creates a RuntimeBlob from a CodeBuffer
148 // and copy code and relocation info.
149 RuntimeBlob::RuntimeBlob(
150   const char* name,
151   CodeBuffer* cb,
152   int         header_size,
153   int         size,
154   int         frame_complete,
155   int         frame_size,
156   OopMapSet*  oop_maps,
157   bool        caller_must_gc_arguments
158 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
159   cb->copy_code_and_locs_to(this);
160 }
161 












162 void CodeBlob::flush() {
163   FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
164   _oop_maps = NULL;
165   NOT_PRODUCT(_asm_remarks.clear());
166   NOT_PRODUCT(_dbg_strings.clear());
167 }
168 
169 void CodeBlob::set_oop_maps(OopMapSet* p) {
170   // Danger Will Robinson! This method allocates a big
171   // chunk of memory, its your job to free it.
172   if (p != NULL) {
173     _oop_maps = ImmutableOopMapSet::build_from(p);
174   } else {
175     _oop_maps = NULL;
176   }
177 }
178 
179 
180 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
181   // Do not hold the CodeCache lock during name formatting.

257   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
258 
259   BufferBlob* blob = NULL;
260   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
261   assert(name != NULL, "must provide a name");
262   {
263     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
264     blob = new (size) BufferBlob(name, size, cb);
265   }
266   // Track memory usage statistic after releasing CodeCache_lock
267   MemoryService::track_code_cache_memory_usage();
268 
269   return blob;
270 }
271 
272 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
273   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
274 }
275 
276 void BufferBlob::free(BufferBlob *blob) {
277   assert(blob != NULL, "caller must check for NULL");
278   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
279   blob->flush();
280   {
281     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
282     CodeCache::free((RuntimeBlob*)blob);
283   }
284   // Track memory usage statistic after releasing CodeCache_lock
285   MemoryService::track_code_cache_memory_usage();
286 }
287 
288 
289 //----------------------------------------------------------------------------------------------------
290 // Implementation of AdapterBlob
291 
292 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
293   BufferBlob("I2C/C2I adapters", size, cb) {
294   CodeCache::commit(this);
295 }
296 
297 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
298   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
299 
300   AdapterBlob* blob = NULL;
301   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
302   {
303     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
304     blob = new (size) AdapterBlob(size, cb);
305   }

702   // unimplemented
703 }
704 
705 void SingletonBlob::print_on(outputStream* st) const {
706   ttyLocker ttyl;
707   RuntimeBlob::print_on(st);
708   st->print_cr("%s", name());
709   Disassembler::decode((RuntimeBlob*)this, st);
710 }
711 
712 void SingletonBlob::print_value_on(outputStream* st) const {
713   st->print_cr("%s", name());
714 }
715 
716 void DeoptimizationBlob::print_value_on(outputStream* st) const {
717   st->print_cr("Deoptimization (frame not available)");
718 }
719 
720 // Implementation of OptimizedEntryBlob
721 
722 OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset,

723                                        jobject receiver, ByteSize frame_data_offset) :
724   BufferBlob(name, size, cb),

725   _exception_handler_offset(exception_handler_offset),
726   _receiver(receiver),
727   _frame_data_offset(frame_data_offset) {
728   CodeCache::commit(this);
729 }
730 
731 OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset,





732                                                jobject receiver, ByteSize frame_data_offset) {
733   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
734 
735   OptimizedEntryBlob* blob = nullptr;
736   unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
737   {
738     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
739     blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, frame_data_offset);

740   }
741   // Track memory usage statistic after releasing CodeCache_lock
742   MemoryService::track_code_cache_memory_usage();
743 


744   return blob;
745 }
746 
747 void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) {
748   frame_data_for_frame(frame)->old_handles->oops_do(f);
749 }
750 
751 JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
752   return &frame_data_for_frame(frame)->jfa;
753 }

























142 {
143   assert(is_aligned(locs_size, oopSize), "unaligned size");
144 }
145 
146 
147 // Creates a RuntimeBlob from a CodeBuffer
148 // and copy code and relocation info.
149 RuntimeBlob::RuntimeBlob(
150   const char* name,
151   CodeBuffer* cb,
152   int         header_size,
153   int         size,
154   int         frame_complete,
155   int         frame_size,
156   OopMapSet*  oop_maps,
157   bool        caller_must_gc_arguments
158 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
159   cb->copy_code_and_locs_to(this);
160 }
161 
162 void RuntimeBlob::free(RuntimeBlob* blob) {
163   assert(blob != NULL, "caller must check for NULL");
164   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
165   blob->flush();
166   {
167     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
168     CodeCache::free(blob);
169   }
170   // Track memory usage statistic after releasing CodeCache_lock
171   MemoryService::track_code_cache_memory_usage();
172 }
173 
174 void CodeBlob::flush() {
175   FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
176   _oop_maps = NULL;
177   NOT_PRODUCT(_asm_remarks.clear());
178   NOT_PRODUCT(_dbg_strings.clear());
179 }
180 
181 void CodeBlob::set_oop_maps(OopMapSet* p) {
182   // Danger Will Robinson! This method allocates a big
183   // chunk of memory, its your job to free it.
184   if (p != NULL) {
185     _oop_maps = ImmutableOopMapSet::build_from(p);
186   } else {
187     _oop_maps = NULL;
188   }
189 }
190 
191 
192 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
193   // Do not hold the CodeCache lock during name formatting.

269   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
270 
271   BufferBlob* blob = NULL;
272   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
273   assert(name != NULL, "must provide a name");
274   {
275     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
276     blob = new (size) BufferBlob(name, size, cb);
277   }
278   // Track memory usage statistic after releasing CodeCache_lock
279   MemoryService::track_code_cache_memory_usage();
280 
281   return blob;
282 }
283 
284 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
285   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
286 }
287 
288 void BufferBlob::free(BufferBlob *blob) {
289   RuntimeBlob::free(blob);








290 }
291 
292 
293 //----------------------------------------------------------------------------------------------------
294 // Implementation of AdapterBlob
295 
296 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
297   BufferBlob("I2C/C2I adapters", size, cb) {
298   CodeCache::commit(this);
299 }
300 
301 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
302   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
303 
304   AdapterBlob* blob = NULL;
305   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
306   {
307     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
308     blob = new (size) AdapterBlob(size, cb);
309   }

706   // unimplemented
707 }
708 
709 void SingletonBlob::print_on(outputStream* st) const {
710   ttyLocker ttyl;
711   RuntimeBlob::print_on(st);
712   st->print_cr("%s", name());
713   Disassembler::decode((RuntimeBlob*)this, st);
714 }
715 
716 void SingletonBlob::print_value_on(outputStream* st) const {
717   st->print_cr("%s", name());
718 }
719 
720 void DeoptimizationBlob::print_value_on(outputStream* st) const {
721   st->print_cr("Deoptimization (frame not available)");
722 }
723 
724 // Implementation of OptimizedEntryBlob
725 
726 OptimizedEntryBlob::OptimizedEntryBlob(const char* name, CodeBuffer* cb, int size,
727                                        intptr_t exception_handler_offset,
728                                        jobject receiver, ByteSize frame_data_offset) :
729   RuntimeBlob(name, cb, sizeof(OptimizedEntryBlob), size, CodeOffsets::frame_never_safe, 0 /* no frame size */,
730               /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
731   _exception_handler_offset(exception_handler_offset),
732   _receiver(receiver),
733   _frame_data_offset(frame_data_offset) {
734   CodeCache::commit(this);
735 }
736 
737 void* OptimizedEntryBlob::operator new(size_t s, unsigned size) throw() {
738   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
739 }
740 
741 OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb,
742                                                intptr_t exception_handler_offset,
743                                                jobject receiver, ByteSize frame_data_offset) {
744   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
745 
746   OptimizedEntryBlob* blob = nullptr;
747   unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
748   {
749     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
750     blob = new (size) OptimizedEntryBlob(name, cb, size,
751                                          exception_handler_offset, receiver, frame_data_offset);
752   }
753   // Track memory usage statistic after releasing CodeCache_lock
754   MemoryService::track_code_cache_memory_usage();
755 
756   trace_new_stub(blob, "OptimizedEntryBlob");
757 
758   return blob;
759 }
760 
761 void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) {
762   frame_data_for_frame(frame)->old_handles->oops_do(f);
763 }
764 
765 JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
766   return &frame_data_for_frame(frame)->jfa;
767 }
768 
769 void OptimizedEntryBlob::free(OptimizedEntryBlob* blob) {
770   assert(blob != nullptr, "caller must check for NULL");
771   JNIHandles::destroy_global(blob->receiver());
772   RuntimeBlob::free(blob);
773 }
774 
775 void OptimizedEntryBlob::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) {
776   // do nothing for now
777 }
778 
779 // Misc.
780 void OptimizedEntryBlob::verify() {
781   // unimplemented
782 }
783 
784 void OptimizedEntryBlob::print_on(outputStream* st) const {
785   RuntimeBlob::print_on(st);
786   print_value_on(st);
787 }
788 
789 void OptimizedEntryBlob::print_value_on(outputStream* st) const {
790   st->print_cr("OptimizedEntryBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
791 }
< prev index next >