1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/vtableStubs.hpp"
 26 #include "compiler/compileBroker.hpp"
 27 #include "compiler/disassembler.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/allocation.inline.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "oops/instanceKlass.hpp"
 32 #include "oops/klass.inline.hpp"
 33 #include "oops/klassVtable.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "prims/forte.hpp"
 36 #include "prims/jvmtiExport.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/mutexLocker.hpp"
 39 #include "runtime/sharedRuntime.hpp"
 40 #include "utilities/align.hpp"
 41 #include "utilities/powerOfTwo.hpp"
 42 
 43 // -----------------------------------------------------------------------------------------
 44 // Implementation of VtableStub
 45 
 46 address VtableStub::_chunk             = nullptr;
 47 address VtableStub::_chunk_end         = nullptr;
 48 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
 49 
 50 
 51 void* VtableStub::operator new(size_t size, int code_size) throw() {
 52   assert_lock_strong(VtableStubs_lock);
 53   assert(size == sizeof(VtableStub), "mismatched size");
 54 
 55   MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
 56 
 57   // compute real VtableStub size (rounded to nearest word)
 58   const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);
 59   // malloc them in chunks to minimize header overhead
 60   const int chunk_factor = 32;
 61   if (_chunk == nullptr || _chunk + real_size > _chunk_end) {
 62     const int bytes = chunk_factor * real_size + pd_code_alignment();
 63 
 64    // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
 65    // If changing the name, update the other file accordingly.
 66     VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
 67     if (blob == nullptr) {
 68       return nullptr;
 69     }
 70     _chunk = blob->content_begin();
 71     _chunk_end = _chunk + bytes;
 72     Forte::register_stub("vtable stub", _chunk, _chunk_end);
 73     align_chunk();
 74   }
 75   assert(_chunk + real_size <= _chunk_end, "bad allocation");
 76   void* res = _chunk;
 77   _chunk += real_size;
 78   align_chunk();
 79  return res;
 80 }
 81 
 82 
 83 void VtableStub::print_on(outputStream* st) const {
 84   st->print("vtable stub (index = %d, receiver_location = %zd, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "])",
 85              index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));
 86 }
 87 
 88 void VtableStub::print() const { print_on(tty); }
 89 
 90 // -----------------------------------------------------------------------------------------
 91 // Implementation of VtableStubs
 92 //
 93 // For each hash value there's a linked list of vtable stubs (with that
 94 // hash value). Each list is anchored in a little hash _table, indexed
 95 // by that hash value.
 96 
 97 VtableStub* volatile VtableStubs::_table[VtableStubs::N];
 98 int VtableStubs::_vtab_stub_size = 0;
 99 int VtableStubs::_itab_stub_size = 0;
100 
101 #if defined(PRODUCT)
102   // These values are good for the PRODUCT case (no tracing).
103   static const int first_vtableStub_size =  64;
104   static const int first_itableStub_size = 256;
105 #else
106   // These values are good for the non-PRODUCT case (when tracing can be switched on).
107   // To find out, run test workload with
108   //   -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables
109   // and use the reported "estimate" value.
110   // Here is a list of observed worst-case values:
111   //               vtable  itable
112   // aarch64:         460     324
113   // arm:               ?       ?
114   // ppc (linux, BE): 404     288
115   // ppc (linux, LE): 356     276
116   // ppc (AIX):       416     296
117   // s390x:           408     256
118   // Solaris-sparc:   792     348
119   // x86 (Linux):     670     309
120   // x86 (MacOS):     682     321
121   static const int first_vtableStub_size = 1024;
122   static const int first_itableStub_size =  512;
123 #endif
124 
125 
126 void VtableStubs::initialize() {
127   assert(VtableStub::_receiver_location == VMRegImpl::Bad(), "initialized multiple times?");
128 
129   VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
130   {
131     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
132     for (int i = 0; i < N; i++) {
133       AtomicAccess::store(&_table[i], (VtableStub*)nullptr);
134     }
135   }
136 }
137 
138 
139 int VtableStubs::code_size_limit(bool is_vtable_stub) {
140   if (is_vtable_stub) {
141     return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
142   } else { // itable stub
143     return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
144   }
145 }   // code_size_limit
146 
147 
148 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
149                                            int  code_size,
150                                            int  padding) {
151   const char* name = is_vtable_stub ? "vtable" : "itable";
152 
153   guarantee(code_size <= code_size_limit(is_vtable_stub),
154             "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));
155 
156   if (is_vtable_stub) {
157     if (log_is_enabled(Trace, vtablestubs)) {
158       if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) {
159         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
160                                name, _vtab_stub_size, code_size + padding);
161       }
162     }
163     if ( (code_size + padding) > _vtab_stub_size ) {
164       _vtab_stub_size = code_size + padding;
165     }
166   } else {  // itable stub
167     if (log_is_enabled(Trace, vtablestubs)) {
168       if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) {
169         log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
170                                name, _itab_stub_size, code_size + padding);
171       }
172     }
173     if ( (code_size + padding) > _itab_stub_size ) {
174       _itab_stub_size = code_size + padding;
175     }
176   }
177   return;
178 }   // check_and_set_size_limit
179 
180 
181 void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
182                               address npe_addr, address ame_addr,   bool is_vtable_stub,
183                               int     index,    int     slop_bytes, int  index_dependent_slop) {
184   const char* name        = is_vtable_stub ? "vtable" : "itable";
185   const int   stub_length = code_size_limit(is_vtable_stub);
186 
187   if (log_is_enabled(Trace, vtablestubs)) {
188     log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",
189                            name, index, p2i(s->code_begin()),
190                            (int)(masm->pc() - s->code_begin()),
191                            stub_length,
192                            (int)(s->code_end() - masm->pc()));
193   }
194   guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
195                                          name, index, stub_length,
196                                          (int)(masm->pc() - s->code_begin()),
197                                          (int)(masm->pc() - s->code_end()));
198   assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
199                                          name, index, index_dependent_slop,
200                                          (int)(s->code_end() - masm->pc()));
201 
202   // After the first vtable/itable stub is generated, we have a much
203   // better estimate for the stub size. Remember/update this
204   // estimate after some sanity checks.
205   check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
206   s->set_exception_points(npe_addr, ame_addr);
207 }
208 
209 
210 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
211   assert(vtable_index >= 0, "must be positive");
212 
213   VtableStub* s;
214   {
215     MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
216     s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
217     if (s == nullptr) {
218       if (is_vtable_stub) {
219         s = create_vtable_stub(vtable_index, caller_is_c1);
220       } else {
221         s = create_itable_stub(vtable_index, caller_is_c1);
222       }
223 
224       // Creation of vtable or itable can fail if there is not enough free space in the code cache.
225       if (s == nullptr) {
226         return nullptr;
227       }
228 
229       enter(is_vtable_stub, vtable_index, caller_is_c1, s);
230       if (PrintAdapterHandlers) {
231         tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
232                       caller_is_c1 ? "c1" : "full opt",
233                       is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
234                       p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
235         Disassembler::decode(s->code_begin(), s->code_end());
236       }
237       // Notify JVMTI about this stub. The event will be recorded by the enclosing
238       // JvmtiDynamicCodeEventCollector and posted when this thread has released
239       // all locks. Only post this event if a new state is not required. Creating a new state would
240       // cause a safepoint and the caller of this code has a NoSafepointVerifier.
241       if (JvmtiExport::should_post_dynamic_code_generated()) {
242         JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",  // FIXME: need to pass caller_is_c1??
243                                                                      s->code_begin(), s->code_end());
244       }
245     }
246   }
247   return s->entry_point();
248 }
249 
250 
251 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
252   // Assumption: receiver_location < 4 in most cases.
253   int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
254   if (caller_is_c1) {
255     hash = 7 - hash;
256   }
257   return (is_vtable_stub ? ~hash : hash)  & mask;
258 }
259 
260 
261 inline uint VtableStubs::unsafe_hash(address entry_point, bool caller_is_c1) {
262   // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
263   address vtable_stub_addr = entry_point - VtableStub::entry_offset();
264   assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
265   address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
266   address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
267   bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
268   short vtable_index;
269   static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
270   memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
271   return hash(is_vtable_stub, vtable_index, caller_is_c1);
272 }
273 
274 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
275   assert_lock_strong(VtableStubs_lock);
276   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
277   VtableStub* s = AtomicAccess::load(&_table[hash]);
278   while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
279   return s;
280 }
281 
282 
283 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
284   assert_lock_strong(VtableStubs_lock);
285   assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
286   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
287   // Insert s at the beginning of the corresponding list.
288   s->set_next(AtomicAccess::load(&_table[h]));
289   // Make sure that concurrent readers not taking the mutex observe the writing of "next".
290   AtomicAccess::release_store(&_table[h], s);
291 }
292 
293 VtableStub* VtableStubs::entry_point(address pc) {
294   // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
295   // to generate the hash that would have been used if it was. The lookup in the
296   // _table will only succeed if there is a VtableStub with an entry point at
297   // the pc.
298   MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
299   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
300   uint hash = VtableStubs::unsafe_hash(pc, stub->caller_is_c1());
301   VtableStub* s;
302   for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
303   return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
304 }
305 
306 bool VtableStubs::contains(address pc) {
307   // simple solution for now - we may want to use
308   // a faster way if this function is called often
309   return stub_containing(pc) != nullptr;
310 }
311 
312 
313 VtableStub* VtableStubs::stub_containing(address pc) {
314   for (int i = 0; i < N; i++) {
315     for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
316       if (s->contains(pc)) return s;
317     }
318   }
319   return nullptr;
320 }
321 
322 void vtableStubs_init() {
323   VtableStubs::initialize();
324 }
325 
326 void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
327   for (int i = 0; i < N; i++) {
328     for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
329       f(s);
330     }
331   }
332 }
333 
334 
335 //-----------------------------------------------------------------------------------------------------
336 // Non-product code
337 #ifndef PRODUCT
338 
339 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
340   ResourceMark rm;
341   Klass* klass = receiver->klass();
342   InstanceKlass* ik = InstanceKlass::cast(klass);
343   klassVtable vt = ik->vtable();
344   ik->print();
345   fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
346         "index %d (vtable length %d)",
347         p2i(receiver), index, vt.length());
348 }
349 
350 #endif // PRODUCT