1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/vtableStubs.hpp" 26 #include "compiler/compileBroker.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "logging/log.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/instanceKlass.hpp" 32 #include "oops/klass.inline.hpp" 33 #include "oops/klassVtable.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/forte.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "utilities/align.hpp" 41 #include "utilities/powerOfTwo.hpp" 42 43 // ----------------------------------------------------------------------------------------- 44 // Implementation of VtableStub 45 46 address VtableStub::_chunk = nullptr; 47 address VtableStub::_chunk_end = nullptr; 48 VMReg VtableStub::_receiver_location = VMRegImpl::Bad(); 49 50 51 void* VtableStub::operator new(size_t size, int code_size) throw() { 52 assert_lock_strong(VtableStubs_lock); 53 assert(size == sizeof(VtableStub), "mismatched size"); 54 // compute real VtableStub size (rounded to nearest word) 55 const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize); 56 // malloc them in chunks to minimize header overhead 57 const int chunk_factor = 32; 58 if (_chunk == nullptr || _chunk + real_size > _chunk_end) { 59 const int bytes = chunk_factor * real_size + pd_code_alignment(); 60 61 // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp 62 // If changing the name, update the other file accordingly. 63 VtableBlob* blob = VtableBlob::create("vtable chunks", bytes); 64 if (blob == nullptr) { 65 return nullptr; 66 } 67 _chunk = blob->content_begin(); 68 _chunk_end = _chunk + bytes; 69 Forte::register_stub("vtable stub", _chunk, _chunk_end); 70 align_chunk(); 71 } 72 assert(_chunk + real_size <= _chunk_end, "bad allocation"); 73 void* res = _chunk; 74 _chunk += real_size; 75 align_chunk(); 76 return res; 77 } 78 79 80 void VtableStub::print_on(outputStream* st) const { 81 st->print("vtable stub (index = %d, receiver_location = %zd, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "])", 82 index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end())); 83 } 84 85 void VtableStub::print() const { print_on(tty); } 86 87 // ----------------------------------------------------------------------------------------- 88 // Implementation of VtableStubs 89 // 90 // For each hash value there's a linked list of vtable stubs (with that 91 // hash value). Each list is anchored in a little hash _table, indexed 92 // by that hash value. 93 94 VtableStub* volatile VtableStubs::_table[VtableStubs::N]; 95 int VtableStubs::_vtab_stub_size = 0; 96 int VtableStubs::_itab_stub_size = 0; 97 98 #if defined(PRODUCT) 99 // These values are good for the PRODUCT case (no tracing). 100 static const int first_vtableStub_size = 64; 101 static const int first_itableStub_size = 256; 102 #else 103 // These values are good for the non-PRODUCT case (when tracing can be switched on). 104 // To find out, run test workload with 105 // -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables 106 // and use the reported "estimate" value. 107 // Here is a list of observed worst-case values: 108 // vtable itable 109 // aarch64: 460 324 110 // arm: ? ? 111 // ppc (linux, BE): 404 288 112 // ppc (linux, LE): 356 276 113 // ppc (AIX): 416 296 114 // s390x: 408 256 115 // Solaris-sparc: 792 348 116 // x86 (Linux): 670 309 117 // x86 (MacOS): 682 321 118 static const int first_vtableStub_size = 1024; 119 static const int first_itableStub_size = 512; 120 #endif 121 122 123 void VtableStubs::initialize() { 124 assert(VtableStub::_receiver_location == VMRegImpl::Bad(), "initialized multiple times?"); 125 126 VtableStub::_receiver_location = SharedRuntime::name_for_receiver(); 127 { 128 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); 129 for (int i = 0; i < N; i++) { 130 Atomic::store(&_table[i], (VtableStub*)nullptr); 131 } 132 } 133 } 134 135 136 int VtableStubs::code_size_limit(bool is_vtable_stub) { 137 if (is_vtable_stub) { 138 return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size; 139 } else { // itable stub 140 return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size; 141 } 142 } // code_size_limit 143 144 145 void VtableStubs::check_and_set_size_limit(bool is_vtable_stub, 146 int code_size, 147 int padding) { 148 const char* name = is_vtable_stub ? "vtable" : "itable"; 149 150 guarantee(code_size <= code_size_limit(is_vtable_stub), 151 "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub)); 152 153 if (is_vtable_stub) { 154 if (log_is_enabled(Trace, vtablestubs)) { 155 if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) { 156 log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes", 157 name, _vtab_stub_size, code_size + padding); 158 } 159 } 160 if ( (code_size + padding) > _vtab_stub_size ) { 161 _vtab_stub_size = code_size + padding; 162 } 163 } else { // itable stub 164 if (log_is_enabled(Trace, vtablestubs)) { 165 if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) { 166 log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes", 167 name, _itab_stub_size, code_size + padding); 168 } 169 } 170 if ( (code_size + padding) > _itab_stub_size ) { 171 _itab_stub_size = code_size + padding; 172 } 173 } 174 return; 175 } // check_and_set_size_limit 176 177 178 void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s, 179 address npe_addr, address ame_addr, bool is_vtable_stub, 180 int index, int slop_bytes, int index_dependent_slop) { 181 const char* name = is_vtable_stub ? "vtable" : "itable"; 182 const int stub_length = code_size_limit(is_vtable_stub); 183 184 if (log_is_enabled(Trace, vtablestubs)) { 185 log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d", 186 name, index, p2i(s->code_begin()), 187 (int)(masm->pc() - s->code_begin()), 188 stub_length, 189 (int)(s->code_end() - masm->pc())); 190 } 191 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d", 192 name, index, stub_length, 193 (int)(masm->pc() - s->code_begin()), 194 (int)(masm->pc() - s->code_end())); 195 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d", 196 name, index, index_dependent_slop, 197 (int)(s->code_end() - masm->pc())); 198 199 // After the first vtable/itable stub is generated, we have a much 200 // better estimate for the stub size. Remember/update this 201 // estimate after some sanity checks. 202 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes); 203 s->set_exception_points(npe_addr, ame_addr); 204 } 205 206 207 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) { 208 assert(vtable_index >= 0, "must be positive"); 209 210 VtableStub* s; 211 { 212 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); 213 s = lookup(is_vtable_stub, vtable_index); 214 if (s == nullptr) { 215 if (is_vtable_stub) { 216 s = create_vtable_stub(vtable_index); 217 } else { 218 s = create_itable_stub(vtable_index); 219 } 220 221 // Creation of vtable or itable can fail if there is not enough free space in the code cache. 222 if (s == nullptr) { 223 return nullptr; 224 } 225 226 enter(is_vtable_stub, vtable_index, s); 227 if (PrintAdapterHandlers) { 228 tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)", 229 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()), 230 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1)); 231 Disassembler::decode(s->code_begin(), s->code_end()); 232 } 233 // Notify JVMTI about this stub. The event will be recorded by the enclosing 234 // JvmtiDynamicCodeEventCollector and posted when this thread has released 235 // all locks. Only post this event if a new state is not required. Creating a new state would 236 // cause a safepoint and the caller of this code has a NoSafepointVerifier. 237 if (JvmtiExport::should_post_dynamic_code_generated()) { 238 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", 239 s->code_begin(), s->code_end()); 240 } 241 } 242 } 243 return s->entry_point(); 244 } 245 246 247 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){ 248 // Assumption: receiver_location < 4 in most cases. 249 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index; 250 return (is_vtable_stub ? ~hash : hash) & mask; 251 } 252 253 254 inline uint VtableStubs::unsafe_hash(address entry_point) { 255 // The entrypoint may or may not be a VtableStub. Generate a hash as if it was. 256 address vtable_stub_addr = entry_point - VtableStub::entry_offset(); 257 assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case"); 258 address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type); 259 address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index); 260 bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub); 261 short vtable_index; 262 static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition"); 263 memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index)); 264 return hash(is_vtable_stub, vtable_index); 265 } 266 267 268 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) { 269 assert_lock_strong(VtableStubs_lock); 270 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index); 271 VtableStub* s = Atomic::load(&_table[hash]); 272 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next(); 273 return s; 274 } 275 276 277 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) { 278 assert_lock_strong(VtableStubs_lock); 279 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub"); 280 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index); 281 // Insert s at the beginning of the corresponding list. 282 s->set_next(Atomic::load(&_table[h])); 283 // Make sure that concurrent readers not taking the mutex observe the writing of "next". 284 Atomic::release_store(&_table[h], s); 285 } 286 287 VtableStub* VtableStubs::entry_point(address pc) { 288 // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash 289 // to generate the hash that would have been used if it was. The lookup in the 290 // _table will only succeed if there is a VtableStub with an entry point at 291 // the pc. 292 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); 293 uint hash = VtableStubs::unsafe_hash(pc); 294 VtableStub* s; 295 for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {} 296 return (s != nullptr && s->entry_point() == pc) ? s : nullptr; 297 } 298 299 bool VtableStubs::contains(address pc) { 300 // simple solution for now - we may want to use 301 // a faster way if this function is called often 302 return stub_containing(pc) != nullptr; 303 } 304 305 306 VtableStub* VtableStubs::stub_containing(address pc) { 307 for (int i = 0; i < N; i++) { 308 for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) { 309 if (s->contains(pc)) return s; 310 } 311 } 312 return nullptr; 313 } 314 315 void vtableStubs_init() { 316 VtableStubs::initialize(); 317 } 318 319 void VtableStubs::vtable_stub_do(void f(VtableStub*)) { 320 for (int i = 0; i < N; i++) { 321 for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) { 322 f(s); 323 } 324 } 325 } 326 327 328 //----------------------------------------------------------------------------------------------------- 329 // Non-product code 330 #ifndef PRODUCT 331 332 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) { 333 ResourceMark rm; 334 Klass* klass = receiver->klass(); 335 InstanceKlass* ik = InstanceKlass::cast(klass); 336 klassVtable vt = ik->vtable(); 337 ik->print(); 338 fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", " 339 "index %d (vtable length %d)", 340 p2i(receiver), index, vt.length()); 341 } 342 343 #endif // PRODUCT