191 (int)(masm->pc() - s->code_begin()),
192 stub_length,
193 (int)(s->code_end() - masm->pc()));
194 }
195 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
196 name, index, stub_length,
197 (int)(masm->pc() - s->code_begin()),
198 (int)(masm->pc() - s->code_end()));
199 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
200 name, index, index_dependent_slop,
201 (int)(s->code_end() - masm->pc()));
202
203 // After the first vtable/itable stub is generated, we have a much
204 // better estimate for the stub size. Remember/update this
205 // estimate after some sanity checks.
206 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
207 s->set_exception_points(npe_addr, ame_addr);
208 }
209
210
211 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
212 assert(vtable_index >= 0, "must be positive");
213
214 VtableStub* s;
215 {
216 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
217 s = lookup(is_vtable_stub, vtable_index);
218 if (s == nullptr) {
219 if (is_vtable_stub) {
220 s = create_vtable_stub(vtable_index);
221 } else {
222 s = create_itable_stub(vtable_index);
223 }
224
225 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
226 if (s == nullptr) {
227 return nullptr;
228 }
229
230 enter(is_vtable_stub, vtable_index, s);
231 if (PrintAdapterHandlers) {
232 tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",
233 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
234 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
235 Disassembler::decode(s->code_begin(), s->code_end());
236 }
237 // Notify JVMTI about this stub. The event will be recorded by the enclosing
238 // JvmtiDynamicCodeEventCollector and posted when this thread has released
239 // all locks. Only post this event if a new state is not required. Creating a new state would
240 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
241 if (JvmtiExport::should_post_dynamic_code_generated()) {
242 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
243 s->code_begin(), s->code_end());
244 }
245 }
246 }
247 return s->entry_point();
248 }
249
250
251 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
252 // Assumption: receiver_location < 4 in most cases.
253 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
254 return (is_vtable_stub ? ~hash : hash) & mask;
255 }
256
257
258 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
259 assert_lock_strong(VtableStubs_lock);
260 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
261 VtableStub* s = Atomic::load(&_table[hash]);
262 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
263 return s;
264 }
265
266
267 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
268 assert_lock_strong(VtableStubs_lock);
269 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
270 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
271 // Insert s at the beginning of the corresponding list.
272 s->set_next(Atomic::load(&_table[h]));
273 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
274 Atomic::release_store(&_table[h], s);
275 }
276
277 VtableStub* VtableStubs::entry_point(address pc) {
278 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
279 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
280 uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
281 VtableStub* s;
282 for (s = Atomic::load(&_table[hash]); s != nullptr && s != stub; s = s->next()) {}
283 return (s == stub) ? s : nullptr;
284 }
285
286 bool VtableStubs::contains(address pc) {
287 // simple solution for now - we may want to use
288 // a faster way if this function is called often
289 return stub_containing(pc) != nullptr;
290 }
291
292
293 VtableStub* VtableStubs::stub_containing(address pc) {
294 for (int i = 0; i < N; i++) {
295 for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
296 if (s->contains(pc)) return s;
297 }
298 }
299 return nullptr;
300 }
|
191 (int)(masm->pc() - s->code_begin()),
192 stub_length,
193 (int)(s->code_end() - masm->pc()));
194 }
195 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
196 name, index, stub_length,
197 (int)(masm->pc() - s->code_begin()),
198 (int)(masm->pc() - s->code_end()));
199 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
200 name, index, index_dependent_slop,
201 (int)(s->code_end() - masm->pc()));
202
203 // After the first vtable/itable stub is generated, we have a much
204 // better estimate for the stub size. Remember/update this
205 // estimate after some sanity checks.
206 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
207 s->set_exception_points(npe_addr, ame_addr);
208 }
209
210
211 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
212 assert(vtable_index >= 0, "must be positive");
213
214 VtableStub* s;
215 {
216 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
217 s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
218 if (s == nullptr) {
219 if (is_vtable_stub) {
220 s = create_vtable_stub(vtable_index, caller_is_c1);
221 } else {
222 s = create_itable_stub(vtable_index, caller_is_c1);
223 }
224
225 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
226 if (s == nullptr) {
227 return nullptr;
228 }
229
230 enter(is_vtable_stub, vtable_index, caller_is_c1, s);
231 if (PrintAdapterHandlers) {
232 tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",
233 caller_is_c1 ? "c1" : "full opt",
234 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
235 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
236 Disassembler::decode(s->code_begin(), s->code_end());
237 }
238 // Notify JVMTI about this stub. The event will be recorded by the enclosing
239 // JvmtiDynamicCodeEventCollector and posted when this thread has released
240 // all locks. Only post this event if a new state is not required. Creating a new state would
241 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
242 if (JvmtiExport::should_post_dynamic_code_generated()) {
243 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", // FIXME: need to pass caller_is_c1??
244 s->code_begin(), s->code_end());
245 }
246 }
247 }
248 return s->entry_point();
249 }
250
251
252 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
253 // Assumption: receiver_location < 4 in most cases.
254 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
255 if (caller_is_c1) {
256 hash = 7 - hash;
257 }
258 return (is_vtable_stub ? ~hash : hash) & mask;
259 }
260
261
262 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
263 assert_lock_strong(VtableStubs_lock);
264 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
265 VtableStub* s = Atomic::load(&_table[hash]);
266 while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
267 return s;
268 }
269
270
271 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
272 assert_lock_strong(VtableStubs_lock);
273 assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
274 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
275 // Insert s at the beginning of the corresponding list.
276 s->set_next(Atomic::load(&_table[h]));
277 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
278 Atomic::release_store(&_table[h], s);
279 }
280
281 VtableStub* VtableStubs::entry_point(address pc) {
282 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
283 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
284 uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index(), stub->caller_is_c1());
285 VtableStub* s;
286 for (s = Atomic::load(&_table[hash]); s != nullptr && s != stub; s = s->next()) {}
287 return (s == stub) ? s : nullptr;
288 }
289
290 bool VtableStubs::contains(address pc) {
291 // simple solution for now - we may want to use
292 // a faster way if this function is called often
293 return stub_containing(pc) != nullptr;
294 }
295
296
297 VtableStub* VtableStubs::stub_containing(address pc) {
298 for (int i = 0; i < N; i++) {
299 for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
300 if (s->contains(pc)) return s;
301 }
302 }
303 return nullptr;
304 }
|