< prev index next >

src/hotspot/share/code/codeBlob.cpp

Print this page

143   _caller_must_gc_arguments(caller_must_gc_arguments)
144 {
145   assert(is_aligned(_size,            oopSize), "unaligned size");
146   assert(is_aligned(header_size,      oopSize), "unaligned size");
147   assert(is_aligned(_relocation_size, oopSize), "unaligned size");
148   assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
149   assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
150   assert(code_end() == content_end(), "must be the same - see code_end()");
151 #ifdef COMPILER1
152   // probably wrong for tiered
153   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
154 #endif // COMPILER1
155 
156   if (_mutable_data_size > 0) {
157     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
158     if (_mutable_data == nullptr) {
159       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
160     }
161   } else {
162     // We need unique and valid not null address
163     assert(_mutable_data = blob_end(), "sanity");
164   }
165 
166   set_oop_maps(oop_maps);
167 }
168 
169 // Simple CodeBlob used for simple BufferBlob.
170 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
171   _oop_maps(nullptr),
172   _name(name),
173   _mutable_data(header_begin() + size), // default value is blob_end()
174   _size(size),
175   _relocation_size(0),
176   _content_offset(CodeBlob::align_code_offset(header_size)),
177   _code_offset(_content_offset),
178   _data_offset(size),
179   _frame_size(0),

180   S390_ONLY(_ctable_offset(0) COMMA)
181   _header_size(header_size),
182   _frame_complete_offset(CodeOffsets::frame_never_safe),
183   _kind(kind),
184   _caller_must_gc_arguments(false)
185 {
186   assert(is_aligned(size,            oopSize), "unaligned size");
187   assert(is_aligned(header_size,     oopSize), "unaligned size");
188   assert(_mutable_data = blob_end(), "sanity");
189 }
190 
191 void CodeBlob::restore_mutable_data(address reloc_data) {
192   // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
193   if (_mutable_data_size > 0) {
194     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
195     if (_mutable_data == nullptr) {
196       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
197     }


198   }
199   if (_relocation_size > 0) {

200     memcpy((address)relocation_begin(), reloc_data, relocation_size());
201   }
202 }
203 
204 void CodeBlob::purge() {
205   assert(_mutable_data != nullptr, "should never be null");
206   if (_mutable_data != blob_end()) {
207     os::free(_mutable_data);
208     _mutable_data = blob_end(); // Valid not null address
209   }
210   if (_oop_maps != nullptr) {
211     delete _oop_maps;
212     _oop_maps = nullptr;
213   }
214   NOT_PRODUCT(_asm_remarks.clear());
215   NOT_PRODUCT(_dbg_strings.clear());
216 }
217 
218 void CodeBlob::set_oop_maps(OopMapSet* p) {
219   // Danger Will Robinson! This method allocates a big
220   // chunk of memory, its your job to free it.
221   if (p != nullptr) {
222     _oop_maps = ImmutableOopMapSet::build_from(p);
223   } else {
224     _oop_maps = nullptr;
225   }
226 }
227 
228 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
229   assert(_oop_maps != nullptr, "nope");
230   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
231 }
232 
233 void CodeBlob::print_code_on(outputStream* st) {
234   ResourceMark m;
235   Disassembler::decode(this, st);
236 }
237 
238 void CodeBlob::prepare_for_archiving_impl() {
239   set_name(nullptr);
240   _oop_maps = nullptr;
241   _mutable_data = nullptr;
242 #ifndef PRODUCT
243   asm_remarks().clear();
244   dbg_strings().clear();
245 #endif /* PRODUCT */
246 }
247 
248 void CodeBlob::prepare_for_archiving() {
249   vptr(_kind)->prepare_for_archiving(this);
250 }
251 
252 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
253   blob->copy_to(archive_buffer);
254   CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
255   archived_blob->prepare_for_archiving();
256 }
257 
258 void CodeBlob::post_restore_impl() {
259   // Track memory usage statistic after releasing CodeCache_lock
260   MemoryService::track_code_cache_memory_usage();
261 }
262 
263 void CodeBlob::post_restore() {
264   vptr(_kind)->post_restore(this);
265 }
266 
267 CodeBlob* CodeBlob::restore(address code_cache_buffer,
268                             const char* name,
269                             address archived_reloc_data,
270                             ImmutableOopMapSet* archived_oop_maps)
271 {
272   copy_to(code_cache_buffer);
273   CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
274   code_blob->set_name(name);
275   code_blob->restore_mutable_data(archived_reloc_data);
276   code_blob->set_oop_maps(archived_oop_maps);
277   return code_blob;
278 }
279 
280 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
281                            const char* name,
282                            address archived_reloc_data,
283                            ImmutableOopMapSet* archived_oop_maps
284 #ifndef PRODUCT
285                            , AsmRemarks& archived_asm_remarks
286                            , DbgStrings& archived_dbg_strings
287 #endif // PRODUCT
288                           )
289 {
290   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
291 
292   CodeCache::gc_on_allocation();
293 
294   CodeBlob* blob = nullptr;
295   unsigned int size = archived_blob->size();
296   {
297     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
298     address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
299     if (code_cache_buffer != nullptr) {
300       blob = archived_blob->restore(code_cache_buffer,
301                                     name,
302                                     archived_reloc_data,
303                                     archived_oop_maps);
304 #ifndef PRODUCT
305       blob->use_remarks(archived_asm_remarks);
306       archived_asm_remarks.clear();
307       blob->use_strings(archived_dbg_strings);
308       archived_dbg_strings.clear();
309 #endif // PRODUCT
310 
311       assert(blob != nullptr, "sanity check");
312       // Flush the code block
313       ICache::invalidate_range(blob->code_begin(), blob->code_size());
314       CodeCache::commit(blob); // Count adapters
315     }
316   }
317   if (blob != nullptr) {
318     blob->post_restore();
319   }
320   return blob;
321 }
322 
323 //-----------------------------------------------------------------------------------------
324 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
325 
326 RuntimeBlob::RuntimeBlob(
327   const char* name,
328   CodeBlobKind kind,
329   CodeBuffer* cb,

143   _caller_must_gc_arguments(caller_must_gc_arguments)
144 {
145   assert(is_aligned(_size,            oopSize), "unaligned size");
146   assert(is_aligned(header_size,      oopSize), "unaligned size");
147   assert(is_aligned(_relocation_size, oopSize), "unaligned size");
148   assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
149   assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
150   assert(code_end() == content_end(), "must be the same - see code_end()");
151 #ifdef COMPILER1
152   // probably wrong for tiered
153   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
154 #endif // COMPILER1
155 
156   if (_mutable_data_size > 0) {
157     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
158     if (_mutable_data == nullptr) {
159       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
160     }
161   } else {
162     // We need unique and valid not null address
163     assert(_mutable_data == blob_end(), "sanity");
164   }
165 
166   set_oop_maps(oop_maps);
167 }
168 
169 // Simple CodeBlob used for simple BufferBlob.
170 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
171   _oop_maps(nullptr),
172   _name(name),
173   _mutable_data(header_begin() + size), // default value is blob_end()
174   _size(size),
175   _relocation_size(0),
176   _content_offset(CodeBlob::align_code_offset(header_size)),
177   _code_offset(_content_offset),
178   _data_offset(size),
179   _frame_size(0),
180   _mutable_data_size(0),
181   S390_ONLY(_ctable_offset(0) COMMA)
182   _header_size(header_size),
183   _frame_complete_offset(CodeOffsets::frame_never_safe),
184   _kind(kind),
185   _caller_must_gc_arguments(false)
186 {
187   assert(is_aligned(size,            oopSize), "unaligned size");
188   assert(is_aligned(header_size,     oopSize), "unaligned size");
189   assert(_mutable_data == blob_end(), "sanity");
190 }
191 
192 void CodeBlob::restore_mutable_data(address reloc_data) {
193   // Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
194   if (_mutable_data_size > 0) {
195     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
196     if (_mutable_data == nullptr) {
197       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
198     }
199   } else {
200     _mutable_data = blob_end(); // default value
201   }
202   if (_relocation_size > 0) {
203     assert(_mutable_data_size > 0, "relocation is part of mutable data section");
204     memcpy((address)relocation_begin(), reloc_data, relocation_size());
205   }
206 }
207 
208 void CodeBlob::purge() {
209   assert(_mutable_data != nullptr, "should never be null");
210   if (_mutable_data != blob_end()) {
211     os::free(_mutable_data);
212     _mutable_data = blob_end(); // Valid not null address
213   }
214   if (_oop_maps != nullptr && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) {
215     delete _oop_maps;
216     _oop_maps = nullptr;
217   }
218   NOT_PRODUCT(_asm_remarks.clear());
219   NOT_PRODUCT(_dbg_strings.clear());
220 }
221 
222 void CodeBlob::set_oop_maps(OopMapSet* p) {
223   // Danger Will Robinson! This method allocates a big
224   // chunk of memory, its your job to free it.
225   if (p != nullptr) {
226     _oop_maps = ImmutableOopMapSet::build_from(p);
227   } else {
228     _oop_maps = nullptr;
229   }
230 }
231 
232 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
233   assert(_oop_maps != nullptr, "nope");
234   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
235 }
236 
237 void CodeBlob::print_code_on(outputStream* st) {
238   ResourceMark m;
239   Disassembler::decode(this, st);
240 }
241 
242 void CodeBlob::prepare_for_archiving_impl() {
243   set_name(nullptr);
244   _oop_maps = nullptr;
245   _mutable_data = nullptr;
246 #ifndef PRODUCT
247   asm_remarks().clear_ref();
248   dbg_strings().clear_ref();
249 #endif /* PRODUCT */
250 }
251 
252 void CodeBlob::prepare_for_archiving() {
253   vptr(_kind)->prepare_for_archiving(this);
254 }
255 
256 void CodeBlob::archive_blob(CodeBlob* blob, address archive_buffer) {
257   blob->copy_to(archive_buffer);
258   CodeBlob* archived_blob = (CodeBlob*)archive_buffer;
259   archived_blob->prepare_for_archiving();
260 }
261 
262 void CodeBlob::post_restore_impl() {
263   // Track memory usage statistic after releasing CodeCache_lock
264   MemoryService::track_code_cache_memory_usage();
265 }
266 
267 void CodeBlob::post_restore() {
268   vptr(_kind)->post_restore(this);
269 }
270 
271 CodeBlob* CodeBlob::restore(address code_cache_buffer,
272                             const char* name,
273                             address archived_reloc_data,
274                             ImmutableOopMapSet* archived_oop_maps)
275 {
276   copy_to(code_cache_buffer);
277   CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
278   code_blob->set_name(name);
279   code_blob->restore_mutable_data(archived_reloc_data);
280   code_blob->set_oop_maps(archived_oop_maps);
281   return code_blob;
282 }
283 
284 CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
285                            const char* name,
286                            address archived_reloc_data,
287                            ImmutableOopMapSet* archived_oop_maps




288                           )
289 {
290   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
291 
292   CodeCache::gc_on_allocation();
293 
294   CodeBlob* blob = nullptr;
295   unsigned int size = archived_blob->size();
296   {
297     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
298     address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
299     if (code_cache_buffer != nullptr) {
300       blob = archived_blob->restore(code_cache_buffer,
301                                     name,
302                                     archived_reloc_data,
303                                     archived_oop_maps);






304 
305       assert(blob != nullptr, "sanity check");
306       // Flush the code block
307       ICache::invalidate_range(blob->code_begin(), blob->code_size());
308       CodeCache::commit(blob); // Count adapters
309     }
310   }
311   if (blob != nullptr) {
312     blob->post_restore();
313   }
314   return blob;
315 }
316 
317 //-----------------------------------------------------------------------------------------
318 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
319 
320 RuntimeBlob::RuntimeBlob(
321   const char* name,
322   CodeBlobKind kind,
323   CodeBuffer* cb,
< prev index next >