< prev index next >

src/hotspot/share/runtime/stubRoutines.cpp

Print this page

164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 
183 // Initialization
184 //
185 // Note: to break cycle with universe initialization, stubs are generated in two phases.
186 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
187 // The second phase includes all other stubs (which may depend on universe being initialized.)
188 
189 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
190 
191 void UnsafeMemoryAccess::create_table(int max_size) {
192   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
193   UnsafeMemoryAccess::_table_max_length = max_size;
194 }
195 
196 bool UnsafeMemoryAccess::contains_pc(address pc) {

197   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
198     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
199     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
200       return true;
201     }
202   }
203   return false;
204 }
205 
206 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {

207   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
208     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
209     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
210       return entry->error_exit_pc();
211     }
212   }
213   return nullptr;
214 }
215 
216 
217 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
218                                     int code_size, int max_aligned_stubs,
219                                     const char* timer_msg,
220                                     const char* buffer_name,
221                                     const char* assert_msg) {
222   ResourceMark rm;








223   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
224   // Add extra space for large CodeEntryAlignment
225   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
226   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
227   if (stubs_code == nullptr) {
228     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
229   }
230   CodeBuffer buffer(stubs_code);
231   StubGenerator_generate(&buffer, blob_id);
232   // When new stubs added we need to make sure there is some space left
233   // to catch situation when we should increase size again.
234   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
235 
236   LogTarget(Info, stubs) lt;
237   if (lt.is_enabled()) {
238     LogStream ls(lt);
239     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
240                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
241                 buffer.total_content_size(), buffer.insts_remaining());
242   }

164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 
183 // Initialization




184 
185 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
186 
187 void UnsafeMemoryAccess::create_table(int max_size) {
188   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
189   UnsafeMemoryAccess::_table_max_length = max_size;
190 }
191 
192 bool UnsafeMemoryAccess::contains_pc(address pc) {
193   assert(UnsafeMemoryAccess::_table != nullptr, "");
194   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
195     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
196     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
197       return true;
198     }
199   }
200   return false;
201 }
202 
203 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
204   assert(UnsafeMemoryAccess::_table != nullptr, "");
205   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
206     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
207     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
208       return entry->error_exit_pc();
209     }
210   }
211   return nullptr;
212 }
213 
214 
215 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
216                                     int code_size, int max_aligned_stubs,
217                                     const char* timer_msg,
218                                     const char* buffer_name,
219                                     const char* assert_msg) {
220   ResourceMark rm;
221   if (code_size == 0) {
222     LogTarget(Info, stubs) lt;
223     if (lt.is_enabled()) {
224       LogStream ls(lt);
225       ls.print_cr("%s\t not generated", buffer_name);
226     }
227     return nullptr;
228   }
229   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
230   // Add extra space for large CodeEntryAlignment
231   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
232   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
233   if (stubs_code == nullptr) {
234     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
235   }
236   CodeBuffer buffer(stubs_code);
237   StubGenerator_generate(&buffer, blob_id);
238   // When new stubs added we need to make sure there is some space left
239   // to catch situation when we should increase size again.
240   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
241 
242   LogTarget(Info, stubs) lt;
243   if (lt.is_enabled()) {
244     LogStream ls(lt);
245     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
246                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
247                 buffer.total_content_size(), buffer.insts_remaining());
248   }
< prev index next >