< prev index next >

src/hotspot/share/runtime/stubRoutines.cpp

Print this page

165 #define BLOB_CHECK_OFFSET(blob_name)                                \
166   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
167   blobId = StubGenBlobId:: blob_name ## _id;                        \
168 
169 // translate a global stub id to an associated blob id based on the
170 // computed blob limits
171 
172 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
173   int id = (int)stubId;
174   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
175   // start with no blob to catch stub id == -1
176   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
177   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
178   // if we reach here we should have the last blob id
179   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
180   return blobId;
181 }
182 
183 #endif // ASSERT
184 





185 // Initialization
186 //
187 // Note: to break cycle with universe initialization, stubs are generated in two phases.
188 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
189 // The second phase includes all other stubs (which may depend on universe being initialized.)
190 
191 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
192 
193 void UnsafeMemoryAccess::create_table(int max_size) {
194   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
195   UnsafeMemoryAccess::_table_max_length = max_size;
196 }
197 
198 bool UnsafeMemoryAccess::contains_pc(address pc) {
199   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
200     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
201     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
202       return true;
203     }
204   }

165 #define BLOB_CHECK_OFFSET(blob_name)                                \
166   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
167   blobId = StubGenBlobId:: blob_name ## _id;                        \
168 
169 // translate a global stub id to an associated blob id based on the
170 // computed blob limits
171 
172 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
173   int id = (int)stubId;
174   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
175   // start with no blob to catch stub id == -1
176   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
177   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
178   // if we reach here we should have the last blob id
179   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
180   return blobId;
181 }
182 
183 #endif // ASSERT
184 
185 // TODO: update with 8343767
186 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
187 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
188 
189 
190 // Initialization
191 //
192 // Note: to break cycle with universe initialization, stubs are generated in two phases.
193 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
194 // The second phase includes all other stubs (which may depend on universe being initialized.)
195 
196 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
197 
198 void UnsafeMemoryAccess::create_table(int max_size) {
199   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
200   UnsafeMemoryAccess::_table_max_length = max_size;
201 }
202 
203 bool UnsafeMemoryAccess::contains_pc(address pc) {
204   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
205     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
206     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
207       return true;
208     }
209   }
< prev index next >