1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/vtableStubs.hpp"
26 #include "compiler/compileBroker.hpp"
27 #include "compiler/disassembler.hpp"
28 #include "logging/log.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/klass.inline.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/forte.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "utilities/align.hpp"
41 #include "utilities/powerOfTwo.hpp"
42
43 // -----------------------------------------------------------------------------------------
44 // Implementation of VtableStub
45
46 address VtableStub::_chunk = nullptr;
47 address VtableStub::_chunk_end = nullptr;
48 VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
49
50
51 void* VtableStub::operator new(size_t size, int code_size) throw() {
52 assert_lock_strong(VtableStubs_lock);
53 assert(size == sizeof(VtableStub), "mismatched size");
54 // compute real VtableStub size (rounded to nearest word)
55 const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);
56 // malloc them in chunks to minimize header overhead
57 const int chunk_factor = 32;
58 if (_chunk == nullptr || _chunk + real_size > _chunk_end) {
59 const int bytes = chunk_factor * real_size + pd_code_alignment();
60
61 // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
62 // If changing the name, update the other file accordingly.
63 VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
64 if (blob == nullptr) {
65 return nullptr;
66 }
67 _chunk = blob->content_begin();
68 _chunk_end = _chunk + bytes;
69 Forte::register_stub("vtable stub", _chunk, _chunk_end);
70 align_chunk();
71 }
72 assert(_chunk + real_size <= _chunk_end, "bad allocation");
73 void* res = _chunk;
74 _chunk += real_size;
75 align_chunk();
76 return res;
77 }
78
79
80 void VtableStub::print_on(outputStream* st) const {
81 st->print("vtable stub (index = %d, receiver_location = %zd, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "])",
82 index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));
83 }
84
85 void VtableStub::print() const { print_on(tty); }
86
87 // -----------------------------------------------------------------------------------------
88 // Implementation of VtableStubs
89 //
90 // For each hash value there's a linked list of vtable stubs (with that
91 // hash value). Each list is anchored in a little hash _table, indexed
92 // by that hash value.
93
94 VtableStub* volatile VtableStubs::_table[VtableStubs::N];
95 int VtableStubs::_vtab_stub_size = 0;
96 int VtableStubs::_itab_stub_size = 0;
97
98 #if defined(PRODUCT)
99 // These values are good for the PRODUCT case (no tracing).
100 static const int first_vtableStub_size = 64;
101 static const int first_itableStub_size = 256;
102 #else
103 // These values are good for the non-PRODUCT case (when tracing can be switched on).
104 // To find out, run test workload with
105 // -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables
106 // and use the reported "estimate" value.
107 // Here is a list of observed worst-case values:
108 // vtable itable
109 // aarch64: 460 324
110 // arm: ? ?
111 // ppc (linux, BE): 404 288
112 // ppc (linux, LE): 356 276
113 // ppc (AIX): 416 296
114 // s390x: 408 256
115 // Solaris-sparc: 792 348
116 // x86 (Linux): 670 309
117 // x86 (MacOS): 682 321
118 static const int first_vtableStub_size = 1024;
119 static const int first_itableStub_size = 512;
120 #endif
121
122
123 void VtableStubs::initialize() {
124 assert(VtableStub::_receiver_location == VMRegImpl::Bad(), "initialized multiple times?");
125
126 VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
127 {
128 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
129 for (int i = 0; i < N; i++) {
130 AtomicAccess::store(&_table[i], (VtableStub*)nullptr);
131 }
132 }
133 }
134
135
136 int VtableStubs::code_size_limit(bool is_vtable_stub) {
137 if (is_vtable_stub) {
138 return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
139 } else { // itable stub
140 return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
141 }
142 } // code_size_limit
143
144
145 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
146 int code_size,
147 int padding) {
148 const char* name = is_vtable_stub ? "vtable" : "itable";
149
150 guarantee(code_size <= code_size_limit(is_vtable_stub),
151 "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));
152
153 if (is_vtable_stub) {
154 if (log_is_enabled(Trace, vtablestubs)) {
155 if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) {
156 log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
157 name, _vtab_stub_size, code_size + padding);
158 }
159 }
160 if ( (code_size + padding) > _vtab_stub_size ) {
161 _vtab_stub_size = code_size + padding;
162 }
163 } else { // itable stub
164 if (log_is_enabled(Trace, vtablestubs)) {
165 if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) {
166 log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
167 name, _itab_stub_size, code_size + padding);
168 }
169 }
170 if ( (code_size + padding) > _itab_stub_size ) {
171 _itab_stub_size = code_size + padding;
172 }
173 }
174 return;
175 } // check_and_set_size_limit
176
177
178 void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
179 address npe_addr, address ame_addr, bool is_vtable_stub,
180 int index, int slop_bytes, int index_dependent_slop) {
181 const char* name = is_vtable_stub ? "vtable" : "itable";
182 const int stub_length = code_size_limit(is_vtable_stub);
183
184 if (log_is_enabled(Trace, vtablestubs)) {
185 log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",
186 name, index, p2i(s->code_begin()),
187 (int)(masm->pc() - s->code_begin()),
188 stub_length,
189 (int)(s->code_end() - masm->pc()));
190 }
191 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
192 name, index, stub_length,
193 (int)(masm->pc() - s->code_begin()),
194 (int)(masm->pc() - s->code_end()));
195 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
196 name, index, index_dependent_slop,
197 (int)(s->code_end() - masm->pc()));
198
199 // After the first vtable/itable stub is generated, we have a much
200 // better estimate for the stub size. Remember/update this
201 // estimate after some sanity checks.
202 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
203 s->set_exception_points(npe_addr, ame_addr);
204 }
205
206
207 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
208 assert(vtable_index >= 0, "must be positive");
209
210 VtableStub* s;
211 {
212 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
213 s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
214 if (s == nullptr) {
215 if (is_vtable_stub) {
216 s = create_vtable_stub(vtable_index, caller_is_c1);
217 } else {
218 s = create_itable_stub(vtable_index, caller_is_c1);
219 }
220
221 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
222 if (s == nullptr) {
223 return nullptr;
224 }
225
226 enter(is_vtable_stub, vtable_index, caller_is_c1, s);
227 if (PrintAdapterHandlers) {
228 tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
229 caller_is_c1 ? "c1" : "full opt",
230 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
231 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
232 Disassembler::decode(s->code_begin(), s->code_end());
233 }
234 // Notify JVMTI about this stub. The event will be recorded by the enclosing
235 // JvmtiDynamicCodeEventCollector and posted when this thread has released
236 // all locks. Only post this event if a new state is not required. Creating a new state would
237 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
238 if (JvmtiExport::should_post_dynamic_code_generated()) {
239 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", // FIXME: need to pass caller_is_c1??
240 s->code_begin(), s->code_end());
241 }
242 }
243 }
244 return s->entry_point();
245 }
246
247
248 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
249 // Assumption: receiver_location < 4 in most cases.
250 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
251 if (caller_is_c1) {
252 hash = 7 - hash;
253 }
254 return (is_vtable_stub ? ~hash : hash) & mask;
255 }
256
257
258 inline uint VtableStubs::unsafe_hash(address entry_point, bool caller_is_c1) {
259 // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
260 address vtable_stub_addr = entry_point - VtableStub::entry_offset();
261 assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
262 address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
263 address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
264 bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
265 short vtable_index;
266 static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
267 memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
268 return hash(is_vtable_stub, vtable_index, caller_is_c1);
269 }
270
271 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
272 assert_lock_strong(VtableStubs_lock);
273 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
274 VtableStub* s = AtomicAccess::load(&_table[hash]);
275 while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
276 return s;
277 }
278
279
280 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
281 assert_lock_strong(VtableStubs_lock);
282 assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
283 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
284 // Insert s at the beginning of the corresponding list.
285 s->set_next(AtomicAccess::load(&_table[h]));
286 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
287 AtomicAccess::release_store(&_table[h], s);
288 }
289
290 VtableStub* VtableStubs::entry_point(address pc) {
291 // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
292 // to generate the hash that would have been used if it was. The lookup in the
293 // _table will only succeed if there is a VtableStub with an entry point at
294 // the pc.
295 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
296 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
297 uint hash = VtableStubs::unsafe_hash(pc, stub->caller_is_c1());
298 VtableStub* s;
299 for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
300 return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
301 }
302
303 bool VtableStubs::contains(address pc) {
304 // simple solution for now - we may want to use
305 // a faster way if this function is called often
306 return stub_containing(pc) != nullptr;
307 }
308
309
310 VtableStub* VtableStubs::stub_containing(address pc) {
311 for (int i = 0; i < N; i++) {
312 for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
313 if (s->contains(pc)) return s;
314 }
315 }
316 return nullptr;
317 }
318
319 void vtableStubs_init() {
320 VtableStubs::initialize();
321 }
322
323 void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
324 for (int i = 0; i < N; i++) {
325 for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
326 f(s);
327 }
328 }
329 }
330
331
332 //-----------------------------------------------------------------------------------------------------
333 // Non-product code
334 #ifndef PRODUCT
335
336 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
337 ResourceMark rm;
338 Klass* klass = receiver->klass();
339 InstanceKlass* ik = InstanceKlass::cast(klass);
340 klassVtable vt = ik->vtable();
341 ik->print();
342 fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
343 "index %d (vtable length %d)",
344 p2i(receiver), index, vt.length());
345 }
346
347 #endif // PRODUCT