< prev index next >

src/hotspot/share/runtime/stubRoutines.cpp

Print this page

208 bool UnsafeMemoryAccess::contains_pc(address pc) {
209   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
210     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
211     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
212       return true;
213     }
214   }
215   return false;
216 }
217 
218 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
219   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
220     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
221     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
222       return entry->error_exit_pc();
223     }
224   }
225   return nullptr;
226 }
227 
228 
229 static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind,
230                                     int code_size, int max_aligned_stubs,
231                                     const char* timer_msg,
232                                     const char* buffer_name,
233                                     const char* assert_msg) {
234   ResourceMark rm;
235   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
236   // Add extra space for large CodeEntryAlignment
237   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
238   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
239   if (stubs_code == nullptr) {
240     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
241   }
242   CodeBuffer buffer(stubs_code);
243   StubGenerator_generate(&buffer, kind);
244   // When new stubs added we need to make sure there is some space left
245   // to catch situation when we should increase size again.
246   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
247 
248   LogTarget(Info, stubs) lt;

208 bool UnsafeMemoryAccess::contains_pc(address pc) {
209   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
210     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
211     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
212       return true;
213     }
214   }
215   return false;
216 }
217 
218 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
219   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
220     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
221     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
222       return entry->error_exit_pc();
223     }
224   }
225   return nullptr;
226 }
227 

228 static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind,
229                                     int code_size, int max_aligned_stubs,
230                                     const char* timer_msg,
231                                     const char* buffer_name,
232                                     const char* assert_msg) {
233   ResourceMark rm;
234   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
235   // Add extra space for large CodeEntryAlignment
236   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
237   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
238   if (stubs_code == nullptr) {
239     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
240   }
241   CodeBuffer buffer(stubs_code);
242   StubGenerator_generate(&buffer, kind);
243   // When new stubs added we need to make sure there is some space left
244   // to catch situation when we should increase size again.
245   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
246 
247   LogTarget(Info, stubs) lt;
< prev index next >