< prev index next >

src/hotspot/share/code/vtableStubs.cpp

Print this page

191                            (int)(masm->pc() - s->code_begin()),
192                            stub_length,
193                            (int)(s->code_end() - masm->pc()));
194   }
195   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
196                                          name, index, stub_length,
197                                          (int)(masm->pc() - s->code_begin()),
198                                          (int)(masm->pc() - s->code_end()));
199   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
200                                          name, index, index_dependent_slop,
201                                          (int)(s->code_end() - masm->pc()));
202 
203   // After the first vtable/itable stub is generated, we have a much
204   // better estimate for the stub size. Remember/update this
205   // estimate after some sanity checks.
206   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
207   s->set_exception_points(npe_addr, ame_addr);
208 }
209 
210 
211 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
212   assert(vtable_index >= 0, "must be positive");
213 
214   VtableStub* s;
215   {
216     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
217     s = lookup(is_vtable_stub, vtable_index);
218     if (s == nullptr) {
219       if (is_vtable_stub) {
220         s = create_vtable_stub(vtable_index);
221       } else {
222         s = create_itable_stub(vtable_index);
223       }
224 
225       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
226       if (s == nullptr) {
227         return nullptr;
228       }
229 
230       enter(is_vtable_stub, vtable_index, s);
231       if (PrintAdapterHandlers) {
232         tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",

233                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
234                       p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
235         Disassembler::decode(s->code_begin(), s->code_end());
236       }
237       // Notify JVMTI about this stub. The event will be recorded by the enclosing
238       // JvmtiDynamicCodeEventCollector and posted when this thread has released
239       // all locks. Only post this event if a new state is not required. Creating a new state would
240       // cause a safepoint and the caller of this code has a NoSafepointVerifier.
241       if (JvmtiExport::should_post_dynamic_code_generated()) {
242         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
243                                                                      s->code_begin(), s->code_end());
244       }
245     }
246   }
247   return s->entry_point();
248 }
249 
250 
251 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
252   // Assumption: receiver_location < 4 in most cases.
253   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;



254   return (is_vtable_stub ? ~hash : hash)  & mask;
255 }
256 
257 
258 inline uint VtableStubs::unsafe_hash(address entry_point) {
259   // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
260   address vtable_stub_addr = entry_point - VtableStub::entry_offset();
261   assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
262   address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
263   address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
264   bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
265   short vtable_index;
266   static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
267   memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
268   return hash(is_vtable_stub, vtable_index);
269 }
270 
271 
272 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
273   assert_lock_strong(VtableStubs_lock);
274   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
275   VtableStub* s = Atomic::load(&_table[hash]);
276   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
277   return s;
278 }
279 
280 
281 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
282   assert_lock_strong(VtableStubs_lock);
283   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
284   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
285   // Insert s at the beginning of the corresponding list.
286   s->set_next(Atomic::load(&_table[h]));
287   // Make sure that concurrent readers not taking the mutex observe the writing of "next".
288   Atomic::release_store(&_table[h], s);
289 }
290 
291 VtableStub* VtableStubs::entry_point(address pc) {
292   // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
293   // to generate the hash that would have been used if it was. The lookup in the
294   // _table will only succeed if there is a VtableStub with an entry point at
295   // the pc.
296   MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
297   uint hash = VtableStubs::unsafe_hash(pc);

298   VtableStub* s;
299   for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
300   return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
301 }
302 
303 bool VtableStubs::contains(address pc) {
304   // simple solution for now - we may want to use
305   // a faster way if this function is called often
306   return stub_containing(pc) != nullptr;
307 }
308 
309 
310 VtableStub* VtableStubs::stub_containing(address pc) {
311   for (int i = 0; i < N; i++) {
312     for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
313       if (s->contains(pc)) return s;
314     }
315   }
316   return nullptr;
317 }

191                            (int)(masm->pc() - s->code_begin()),
192                            stub_length,
193                            (int)(s->code_end() - masm->pc()));
194   }
195   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
196                                          name, index, stub_length,
197                                          (int)(masm->pc() - s->code_begin()),
198                                          (int)(masm->pc() - s->code_end()));
199   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
200                                          name, index, index_dependent_slop,
201                                          (int)(s->code_end() - masm->pc()));
202 
203   // After the first vtable/itable stub is generated, we have a much
204   // better estimate for the stub size. Remember/update this
205   // estimate after some sanity checks.
206   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
207   s->set_exception_points(npe_addr, ame_addr);
208 }
209 
210 
211 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
212   assert(vtable_index >= 0, "must be positive");
213 
214   VtableStub* s;
215   {
216     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
217     s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
218     if (s == nullptr) {
219       if (is_vtable_stub) {
220         s = create_vtable_stub(vtable_index, caller_is_c1);
221       } else {
222         s = create_itable_stub(vtable_index, caller_is_c1);
223       }
224 
225       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
226       if (s == nullptr) {
227         return nullptr;
228       }
229 
230       enter(is_vtable_stub, vtable_index, caller_is_c1, s);
231       if (PrintAdapterHandlers) {
232         tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",
233                       caller_is_c1 ? "c1" : "full opt",
234                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
235                       p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
236         Disassembler::decode(s->code_begin(), s->code_end());
237       }
238       // Notify JVMTI about this stub. The event will be recorded by the enclosing
239       // JvmtiDynamicCodeEventCollector and posted when this thread has released
240       // all locks. Only post this event if a new state is not required. Creating a new state would
241       // cause a safepoint and the caller of this code has a NoSafepointVerifier.
242       if (JvmtiExport::should_post_dynamic_code_generated()) {
243         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",  // FIXME: need to pass caller_is_c1??
244                                                                      s->code_begin(), s->code_end());
245       }
246     }
247   }
248   return s->entry_point();
249 }
250 
251 
252 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
253   // Assumption: receiver_location < 4 in most cases.
254   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
255   if (caller_is_c1) {
256     hash = 7 - hash;
257   }
258   return (is_vtable_stub ? ~hash : hash)  & mask;
259 }
260 
261 
262 inline uint VtableStubs::unsafe_hash(address entry_point, bool caller_is_c1) {
263   // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
264   address vtable_stub_addr = entry_point - VtableStub::entry_offset();
265   assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
266   address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
267   address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
268   bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
269   short vtable_index;
270   static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
271   memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
272   return hash(is_vtable_stub, vtable_index, caller_is_c1);
273 }
274 
275 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {

276   assert_lock_strong(VtableStubs_lock);
277   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
278   VtableStub* s = Atomic::load(&_table[hash]);
279   while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
280   return s;
281 }
282 
283 
284 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
285   assert_lock_strong(VtableStubs_lock);
286   assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
287   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
288   // Insert s at the beginning of the corresponding list.
289   s->set_next(Atomic::load(&_table[h]));
290   // Make sure that concurrent readers not taking the mutex observe the writing of "next".
291   Atomic::release_store(&_table[h], s);
292 }
293 
294 VtableStub* VtableStubs::entry_point(address pc) {
295   // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
296   // to generate the hash that would have been used if it was. The lookup in the
297   // _table will only succeed if there is a VtableStub with an entry point at
298   // the pc.
299   MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
300   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
301   uint hash = VtableStubs::unsafe_hash(pc, stub->caller_is_c1());
302   VtableStub* s;
303   for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
304   return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
305 }
306 
307 bool VtableStubs::contains(address pc) {
308   // simple solution for now - we may want to use
309   // a faster way if this function is called often
310   return stub_containing(pc) != nullptr;
311 }
312 
313 
314 VtableStub* VtableStubs::stub_containing(address pc) {
315   for (int i = 0; i < N; i++) {
316     for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
317       if (s->contains(pc)) return s;
318     }
319   }
320   return nullptr;
321 }
< prev index next >