< prev index next >

src/hotspot/share/code/vtableStubs.cpp

Print this page

190                            (int)(masm->pc() - s->code_begin()),
191                            stub_length,
192                            (int)(s->code_end() - masm->pc()));
193   }
194   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
195                                          name, index, stub_length,
196                                          (int)(masm->pc() - s->code_begin()),
197                                          (int)(masm->pc() - s->code_end()));
198   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
199                                          name, index, index_dependent_slop,
200                                          (int)(s->code_end() - masm->pc()));
201 
202   // After the first vtable/itable stub is generated, we have a much
203   // better estimate for the stub size. Remember/update this
204   // estimate after some sanity checks.
205   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
206   s->set_exception_points(npe_addr, ame_addr);
207 }
208 
209 
210 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
211   assert(vtable_index >= 0, "must be positive");
212 
213   VtableStub* s;
214   {
215     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
216     s = lookup(is_vtable_stub, vtable_index);
217     if (s == nullptr) {
218       if (is_vtable_stub) {
219         s = create_vtable_stub(vtable_index);
220       } else {
221         s = create_itable_stub(vtable_index);
222       }
223 
224       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
225       if (s == nullptr) {
226         return nullptr;
227       }
228 
229       enter(is_vtable_stub, vtable_index, s);
230       if (PrintAdapterHandlers) {
231         tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",

232                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
233                       p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
234         Disassembler::decode(s->code_begin(), s->code_end());
235       }
236       // Notify JVMTI about this stub. The event will be recorded by the enclosing
237       // JvmtiDynamicCodeEventCollector and posted when this thread has released
238       // all locks. Only post this event if a new state is not required. Creating a new state would
239       // cause a safepoint and the caller of this code has a NoSafepointVerifier.
240       if (JvmtiExport::should_post_dynamic_code_generated()) {
241         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
242                                                                      s->code_begin(), s->code_end());
243       }
244     }
245   }
246   return s->entry_point();
247 }
248 
249 
250 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
251   // Assumption: receiver_location < 4 in most cases.
252   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;



253   return (is_vtable_stub ? ~hash : hash)  & mask;
254 }
255 
256 
257 inline uint VtableStubs::unsafe_hash(address entry_point) {
258   // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
259   address vtable_stub_addr = entry_point - VtableStub::entry_offset();
260   assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
261   address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
262   address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
263   bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
264   short vtable_index;
265   static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
266   memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
267   return hash(is_vtable_stub, vtable_index);
268 }
269 
270 
271 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
272   assert_lock_strong(VtableStubs_lock);
273   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
274   VtableStub* s = AtomicAccess::load(&_table[hash]);
275   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
276   return s;
277 }
278 
279 
280 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
281   assert_lock_strong(VtableStubs_lock);
282   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
283   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
284   // Insert s at the beginning of the corresponding list.
285   s->set_next(AtomicAccess::load(&_table[h]));
286   // Make sure that concurrent readers not taking the mutex observe the writing of "next".
287   AtomicAccess::release_store(&_table[h], s);
288 }
289 
290 VtableStub* VtableStubs::entry_point(address pc) {
291   // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
292   // to generate the hash that would have been used if it was. The lookup in the
293   // _table will only succeed if there is a VtableStub with an entry point at
294   // the pc.
295   MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
296   uint hash = VtableStubs::unsafe_hash(pc);

297   VtableStub* s;
298   for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
299   return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
300 }
301 
302 bool VtableStubs::contains(address pc) {
303   // simple solution for now - we may want to use
304   // a faster way if this function is called often
305   return stub_containing(pc) != nullptr;
306 }
307 
308 
309 VtableStub* VtableStubs::stub_containing(address pc) {
310   for (int i = 0; i < N; i++) {
311     for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
312       if (s->contains(pc)) return s;
313     }
314   }
315   return nullptr;
316 }

190                            (int)(masm->pc() - s->code_begin()),
191                            stub_length,
192                            (int)(s->code_end() - masm->pc()));
193   }
194   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
195                                          name, index, stub_length,
196                                          (int)(masm->pc() - s->code_begin()),
197                                          (int)(masm->pc() - s->code_end()));
198   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
199                                          name, index, index_dependent_slop,
200                                          (int)(s->code_end() - masm->pc()));
201 
202   // After the first vtable/itable stub is generated, we have a much
203   // better estimate for the stub size. Remember/update this
204   // estimate after some sanity checks.
205   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
206   s->set_exception_points(npe_addr, ame_addr);
207 }
208 
209 
210 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
211   assert(vtable_index >= 0, "must be positive");
212 
213   VtableStub* s;
214   {
215     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
216     s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
217     if (s == nullptr) {
218       if (is_vtable_stub) {
219         s = create_vtable_stub(vtable_index, caller_is_c1);
220       } else {
221         s = create_itable_stub(vtable_index, caller_is_c1);
222       }
223 
224       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
225       if (s == nullptr) {
226         return nullptr;
227       }
228 
229       enter(is_vtable_stub, vtable_index, caller_is_c1, s);
230       if (PrintAdapterHandlers) {
231         tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
232                       caller_is_c1 ? "c1" : "full opt",
233                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
234                       p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
235         Disassembler::decode(s->code_begin(), s->code_end());
236       }
237       // Notify JVMTI about this stub. The event will be recorded by the enclosing
238       // JvmtiDynamicCodeEventCollector and posted when this thread has released
239       // all locks. Only post this event if a new state is not required. Creating a new state would
240       // cause a safepoint and the caller of this code has a NoSafepointVerifier.
241       if (JvmtiExport::should_post_dynamic_code_generated()) {
242         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",  // FIXME: need to pass caller_is_c1??
243                                                                      s->code_begin(), s->code_end());
244       }
245     }
246   }
247   return s->entry_point();
248 }
249 
250 
251 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
252   // Assumption: receiver_location < 4 in most cases.
253   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
254   if (caller_is_c1) {
255     hash = 7 - hash;
256   }
257   return (is_vtable_stub ? ~hash : hash)  & mask;
258 }
259 
260 
261 inline uint VtableStubs::unsafe_hash(address entry_point, bool caller_is_c1) {
262   // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
263   address vtable_stub_addr = entry_point - VtableStub::entry_offset();
264   assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
265   address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
266   address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
267   bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
268   short vtable_index;
269   static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
270   memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
271   return hash(is_vtable_stub, vtable_index, caller_is_c1);
272 }
273 
274 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {

275   assert_lock_strong(VtableStubs_lock);
276   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
277   VtableStub* s = AtomicAccess::load(&_table[hash]);
278   while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
279   return s;
280 }
281 
282 
283 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
284   assert_lock_strong(VtableStubs_lock);
285   assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
286   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
287   // Insert s at the beginning of the corresponding list.
288   s->set_next(AtomicAccess::load(&_table[h]));
289   // Make sure that concurrent readers not taking the mutex observe the writing of "next".
290   AtomicAccess::release_store(&_table[h], s);
291 }
292 
293 VtableStub* VtableStubs::entry_point(address pc) {
294   // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
295   // to generate the hash that would have been used if it was. The lookup in the
296   // _table will only succeed if there is a VtableStub with an entry point at
297   // the pc.
298   MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
299   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
300   uint hash = VtableStubs::unsafe_hash(pc, stub->caller_is_c1());
301   VtableStub* s;
302   for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
303   return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
304 }
305 
306 bool VtableStubs::contains(address pc) {
307   // simple solution for now - we may want to use
308   // a faster way if this function is called often
309   return stub_containing(pc) != nullptr;
310 }
311 
312 
313 VtableStub* VtableStubs::stub_containing(address pc) {
314   for (int i = 0; i < N; i++) {
315     for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
316       if (s->contains(pc)) return s;
317     }
318   }
319   return nullptr;
320 }
< prev index next >