190 (int)(masm->pc() - s->code_begin()),
191 stub_length,
192 (int)(s->code_end() - masm->pc()));
193 }
194 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
195 name, index, stub_length,
196 (int)(masm->pc() - s->code_begin()),
197 (int)(masm->pc() - s->code_end()));
198 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
199 name, index, index_dependent_slop,
200 (int)(s->code_end() - masm->pc()));
201
202 // After the first vtable/itable stub is generated, we have a much
203 // better estimate for the stub size. Remember/update this
204 // estimate after some sanity checks.
205 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
206 s->set_exception_points(npe_addr, ame_addr);
207 }
208
209
210 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
211 assert(vtable_index >= 0, "must be positive");
212
213 VtableStub* s;
214 {
215 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
216 s = lookup(is_vtable_stub, vtable_index);
217 if (s == nullptr) {
218 if (is_vtable_stub) {
219 s = create_vtable_stub(vtable_index);
220 } else {
221 s = create_itable_stub(vtable_index);
222 }
223
224 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
225 if (s == nullptr) {
226 return nullptr;
227 }
228
229 enter(is_vtable_stub, vtable_index, s);
230 if (PrintAdapterHandlers) {
231 tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
232 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
233 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
234 Disassembler::decode(s->code_begin(), s->code_end());
235 }
236 // Notify JVMTI about this stub. The event will be recorded by the enclosing
237 // JvmtiDynamicCodeEventCollector and posted when this thread has released
238 // all locks. Only post this event if a new state is not required. Creating a new state would
239 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
240 if (JvmtiExport::should_post_dynamic_code_generated()) {
241 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
242 s->code_begin(), s->code_end());
243 }
244 }
245 }
246 return s->entry_point();
247 }
248
249
250 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
251 // Assumption: receiver_location < 4 in most cases.
252 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
253 return (is_vtable_stub ? ~hash : hash) & mask;
254 }
255
256
257 inline uint VtableStubs::unsafe_hash(address entry_point) {
258 // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
259 address vtable_stub_addr = entry_point - VtableStub::entry_offset();
260 assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
261 address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
262 address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
263 bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
264 short vtable_index;
265 static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
266 memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
267 return hash(is_vtable_stub, vtable_index);
268 }
269
270
271 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
272 assert_lock_strong(VtableStubs_lock);
273 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
274 VtableStub* s = AtomicAccess::load(&_table[hash]);
275 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
276 return s;
277 }
278
279
280 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
281 assert_lock_strong(VtableStubs_lock);
282 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
283 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
284 // Insert s at the beginning of the corresponding list.
285 s->set_next(AtomicAccess::load(&_table[h]));
286 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
287 AtomicAccess::release_store(&_table[h], s);
288 }
289
290 VtableStub* VtableStubs::entry_point(address pc) {
291 // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
292 // to generate the hash that would have been used if it was. The lookup in the
293 // _table will only succeed if there is a VtableStub with an entry point at
294 // the pc.
295 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
296 uint hash = VtableStubs::unsafe_hash(pc);
297 VtableStub* s;
298 for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
299 return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
300 }
301
302 bool VtableStubs::contains(address pc) {
303 // simple solution for now - we may want to use
|
190 (int)(masm->pc() - s->code_begin()),
191 stub_length,
192 (int)(s->code_end() - masm->pc()));
193 }
194 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
195 name, index, stub_length,
196 (int)(masm->pc() - s->code_begin()),
197 (int)(masm->pc() - s->code_end()));
198 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
199 name, index, index_dependent_slop,
200 (int)(s->code_end() - masm->pc()));
201
202 // After the first vtable/itable stub is generated, we have a much
203 // better estimate for the stub size. Remember/update this
204 // estimate after some sanity checks.
205 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
206 s->set_exception_points(npe_addr, ame_addr);
207 }
208
209
210 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
211 assert(vtable_index >= 0, "must be positive");
212
213 VtableStub* s;
214 {
215 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
216 s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
217 if (s == nullptr) {
218 if (is_vtable_stub) {
219 s = create_vtable_stub(vtable_index, caller_is_c1);
220 } else {
221 s = create_itable_stub(vtable_index, caller_is_c1);
222 }
223
224 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
225 if (s == nullptr) {
226 return nullptr;
227 }
228
229 enter(is_vtable_stub, vtable_index, caller_is_c1, s);
230 if (PrintAdapterHandlers) {
231 tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
232 caller_is_c1 ? "c1" : "full opt",
233 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
234 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
235 Disassembler::decode(s->code_begin(), s->code_end());
236 }
237 // Notify JVMTI about this stub. The event will be recorded by the enclosing
238 // JvmtiDynamicCodeEventCollector and posted when this thread has released
239 // all locks. Only post this event if a new state is not required. Creating a new state would
240 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
241 if (JvmtiExport::should_post_dynamic_code_generated()) {
242 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", // FIXME: need to pass caller_is_c1??
243 s->code_begin(), s->code_end());
244 }
245 }
246 }
247 return s->entry_point();
248 }
249
250
251 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
252 // Assumption: receiver_location < 4 in most cases.
253 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
254 if (caller_is_c1) {
255 hash = 7 - hash;
256 }
257 return (is_vtable_stub ? ~hash : hash) & mask;
258 }
259
260
261 inline uint VtableStubs::unsafe_hash(address entry_point) {
262 // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
263 address vtable_stub_addr = entry_point - VtableStub::entry_offset();
264 assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
265 address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
266 address vtable_caller_type_addr = vtable_stub_addr + offset_of(VtableStub, _caller_type);
267 address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
268 bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
269 bool caller_is_c1 = (*vtable_caller_type_addr == static_cast<uint8_t>(VtableStub::CallerType::c1));
270 short vtable_index;
271 static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
272 memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
273 return hash(is_vtable_stub, vtable_index, caller_is_c1);
274 }
275
276 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
277 assert_lock_strong(VtableStubs_lock);
278 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
279 VtableStub* s = AtomicAccess::load(&_table[hash]);
280 while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
281 return s;
282 }
283
284
285 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
286 assert_lock_strong(VtableStubs_lock);
287 assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
288 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
289 // Insert s at the beginning of the corresponding list.
290 s->set_next(AtomicAccess::load(&_table[h]));
291 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
292 AtomicAccess::release_store(&_table[h], s);
293 }
294
295 VtableStub* VtableStubs::entry_point(address pc) {
296 // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
297 // to generate the hash that would have been used if it was. The lookup in the
298 // _table will only succeed if there is a VtableStub with an entry point at
299 // the pc.
300 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
301 uint hash = VtableStubs::unsafe_hash(pc);
302 VtableStub* s;
303 for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
304 return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
305 }
306
307 bool VtableStubs::contains(address pc) {
308 // simple solution for now - we may want to use
|