< prev index next >

src/hotspot/share/runtime/stubRoutines.cpp

Print this page

163 #define BLOB_CHECK_OFFSET(blob_name)                                \
164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 





183 // Initialization
184 //
185 // Note: to break cycle with universe initialization, stubs are generated in two phases.
186 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
187 // The second phase includes all other stubs (which may depend on universe being initialized.)
188 
189 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
190 
191 void UnsafeMemoryAccess::create_table(int max_size) {
192   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
193   UnsafeMemoryAccess::_table_max_length = max_size;
194 }
195 
196 bool UnsafeMemoryAccess::contains_pc(address pc) {
197   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
198     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
199     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
200       return true;
201     }
202   }

163 #define BLOB_CHECK_OFFSET(blob_name)                                \
164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 
183 // TODO: update with 8343767
184 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
185 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
186 
187 
188 // Initialization
189 //
190 // Note: to break cycle with universe initialization, stubs are generated in two phases.
191 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
192 // The second phase includes all other stubs (which may depend on universe being initialized.)
193 
194 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
195 
196 void UnsafeMemoryAccess::create_table(int max_size) {
197   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
198   UnsafeMemoryAccess::_table_max_length = max_size;
199 }
200 
201 bool UnsafeMemoryAccess::contains_pc(address pc) {
202   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
203     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
204     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
205       return true;
206     }
207   }
< prev index next >