210 bool UnsafeMemoryAccess::contains_pc(address pc) {
211 for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
212 UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
213 if (pc >= entry->start_pc() && pc < entry->end_pc()) {
214 return true;
215 }
216 }
217 return false;
218 }
219
220 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
221 for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
222 UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
223 if (pc >= entry->start_pc() && pc < entry->end_pc()) {
224 return entry->error_exit_pc();
225 }
226 }
227 return nullptr;
228 }
229
230
231 static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind,
232 int code_size, int max_aligned_stubs,
233 const char* timer_msg,
234 const char* buffer_name,
235 const char* assert_msg) {
236 ResourceMark rm;
237 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
238 // Add extra space for large CodeEntryAlignment
239 int size = code_size + CodeEntryAlignment * max_aligned_stubs;
240 BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
241 if (stubs_code == nullptr) {
242 vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
243 }
244 CodeBuffer buffer(stubs_code);
245 StubGenerator_generate(&buffer, kind);
246 // When new stubs added we need to make sure there is some space left
247 // to catch situation when we should increase size again.
248 assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
249
250 LogTarget(Info, stubs) lt;
|
210 bool UnsafeMemoryAccess::contains_pc(address pc) {
211 for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
212 UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
213 if (pc >= entry->start_pc() && pc < entry->end_pc()) {
214 return true;
215 }
216 }
217 return false;
218 }
219
220 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
221 for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
222 UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
223 if (pc >= entry->start_pc() && pc < entry->end_pc()) {
224 return entry->error_exit_pc();
225 }
226 }
227 return nullptr;
228 }
229
230 static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind,
231 int code_size, int max_aligned_stubs,
232 const char* timer_msg,
233 const char* buffer_name,
234 const char* assert_msg) {
235 ResourceMark rm;
236 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
237 // Add extra space for large CodeEntryAlignment
238 int size = code_size + CodeEntryAlignment * max_aligned_stubs;
239 BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
240 if (stubs_code == nullptr) {
241 vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
242 }
243 CodeBuffer buffer(stubs_code);
244 StubGenerator_generate(&buffer, kind);
245 // When new stubs added we need to make sure there is some space left
246 // to catch situation when we should increase size again.
247 assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
248
249 LogTarget(Info, stubs) lt;
|