1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_STUBROUTINES_HPP 26 #define SHARE_RUNTIME_STUBROUTINES_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "memory/allocation.hpp" 30 #include "prims/vectorSupport.hpp" 31 #include "runtime/frame.hpp" 32 #include "runtime/mutexLocker.hpp" 33 #include "runtime/stubCodeGenerator.hpp" 34 #include "runtime/stubDeclarations.hpp" 35 #include "runtime/threadWXSetters.inline.hpp" 36 #include "utilities/macros.hpp" 37 38 // StubRoutines provides entry points to assembly routines used by 39 // compiled code and the run-time system. Platform-specific entry 40 // points are defined in the platform-specific inner class. Most 41 // routines have a single (main) entry point. However, a few routines 42 // do provide alternative entry points. 43 // 44 // Stub routines whose entries are advertised via class StubRoutines 45 // are generated in batches at well-defined stages during JVM init: 46 // initial stubs, continuation stubs, compiler stubs, final stubs. 47 // Each batch is embedded in a single, associated blob (an instance of 48 // BufferBlob) i.e. the blob to entry relationship is 1-m. 49 // 50 // Note that this constrasts with the much smaller number of stub 51 // routines generated via classes SharedRuntime, c1_Runtime1 and 52 // OptoRuntime. The latter routines are also generated at well-defined 53 // points during JVM init. However, each stub routine has its own 54 // unique blob (various subclasses of RuntimeBlob) i.e. the blob to 55 // entry relationship is 1-1. The difference arises because 56 // SharedRuntime routines may need to be relocatable or advertise 57 // properties such as a frame size via their blob. 58 // 59 // Staging of stub routine generation is needed in order to manage 60 // init dependencies between 1) stubs and other stubs or 2) stubs and 61 // other runtime components. For example, some exception throw stubs 62 // need to be generated before compiler stubs (such as the 63 // deoptimization stub) so that the latter can invoke the thrwo rotine 64 // in bail-out code. Likewise, stubs that access objects (such as the 65 // object array copy stub) need to be created after initialization of 66 // some GC constants and generation of the GC barrier stubs they might 67 // need to invoke. 68 // 69 // Class scheme: 70 // 71 // platform-independent platform-dependent 72 // 73 // stubRoutines.hpp <-- included -- stubRoutines_<arch>.hpp 74 // ^ ^ 75 // | | 76 // implements implements 77 // | | 78 // | | 79 // stubRoutines.cpp stubRoutines_<arch>.cpp 80 // stubGenerator_<arch>.cpp 81 // 82 // Note 1: The important thing is a clean decoupling between stub 83 // entry points (interfacing to the whole vm; i.e., 1-to-n 84 // relationship) and stub generators (interfacing only to 85 // the entry points implementation; i.e., 1-to-1 relationship). 86 // This significantly simplifies changes in the generator 87 // structure since the rest of the vm is not affected. 88 // 89 // Note 2: stubGenerator_<arch>.cpp contains a minimal portion of 90 // machine-independent code; namely the generator calls of 91 // the generator functions that are used platform-independently. 92 // However, it comes with the advantage of having a 1-file 93 // implementation of the generator. It should be fairly easy 94 // to change, should it become a problem later. 95 // 96 // Scheme for adding a new entry point: 97 // 98 // 1. determine if it's a platform-dependent or independent entry point 99 // a) if platform independent: make subsequent changes in the independent files 100 // b) if platform dependent: make subsequent changes in the dependent files 101 // 2. add a private instance variable holding the entry point address 102 // 3. add a public accessor function to the instance variable 103 // 4. implement the corresponding generator function in the platform-dependent 104 // stubGenerator_<arch>.cpp file and call the function in generate_all() of that file 105 // 5. ensure the entry is generated in the right blob to satisfy initialization 106 // dependencies between it and other stubs or runtime components. 107 108 class UnsafeMemoryAccess : public CHeapObj<mtCode> { 109 private: 110 address _start_pc; 111 address _end_pc; 112 address _error_exit_pc; 113 public: 114 static address _common_exit_stub_pc; 115 static UnsafeMemoryAccess* _table; 116 static int _table_length; 117 static int _table_max_length; 118 UnsafeMemoryAccess() : _start_pc(nullptr), _end_pc(nullptr), _error_exit_pc(nullptr) {} 119 void set_start_pc(address pc) { _start_pc = pc; } 120 void set_end_pc(address pc) { _end_pc = pc; } 121 void set_error_exit_pc(address pc) { _error_exit_pc = pc; } 122 address start_pc() const { return _start_pc; } 123 address end_pc() const { return _end_pc; } 124 address error_exit_pc() const { return _error_exit_pc; } 125 126 static void set_common_exit_stub_pc(address pc) { _common_exit_stub_pc = pc; } 127 static address common_exit_stub_pc() { return _common_exit_stub_pc; } 128 129 static UnsafeMemoryAccess* add_to_table(address start_pc, address end_pc, address error_exit_pc) { 130 guarantee(_table_length < _table_max_length, "Incorrect UnsafeMemoryAccess::_table_max_length"); 131 UnsafeMemoryAccess* entry = &_table[_table_length]; 132 entry->set_start_pc(start_pc); 133 entry->set_end_pc(end_pc); 134 entry->set_error_exit_pc(error_exit_pc); 135 136 _table_length++; 137 return entry; 138 } 139 140 static bool contains_pc(address pc); 141 static address page_error_continue_pc(address pc); 142 static void create_table(int max_size); 143 }; 144 145 class UnsafeMemoryAccessMark : public StackObj { 146 private: 147 UnsafeMemoryAccess* _ucm_entry; 148 StubCodeGenerator* _cgen; 149 public: 150 UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = nullptr); 151 ~UnsafeMemoryAccessMark(); 152 }; 153 154 // declare stubgen blob id enum 155 156 #define BLOB_ENUM_DECLARE(blob_name) \ 157 STUB_ID_NAME(blob_name), 158 159 enum StubGenBlobId : int { 160 NO_BLOBID = -1, 161 STUBGEN_BLOBS_DO(BLOB_ENUM_DECLARE) 162 NUM_BLOBIDS 163 }; 164 165 #undef BLOB_ENUM_DECLARE 166 167 // declare blob local stub id enums 168 169 #define BLOB_LOCAL_ENUM_START(blob_name) \ 170 enum StubGenStubId_ ## blob_name { \ 171 NO_STUBID_ ## blob_name = -1, 172 173 #define BLOB_LOCAL_ENUM_END(blob_name) \ 174 NUM_STUBIDS_ ## blob_name \ 175 }; 176 177 #define BLOB_LOCAL_STUB_ENUM_DECLARE(blob_name, stub_name) \ 178 blob_name ## _ ## stub_name ## _id, 179 180 STUBGEN_BLOBS_STUBS_DO(BLOB_LOCAL_ENUM_START, BLOB_LOCAL_ENUM_END, BLOB_LOCAL_STUB_ENUM_DECLARE) 181 182 #undef BLOB_LOCAL_ENUM_START 183 #undef BLOB_LOCAL_ENUM_END 184 #undef BLOB_LOCAL_STUB_ENUM_DECLARE 185 186 // declare global stub id enum 187 188 #define STUB_ENUM_DECLARE(blob_name, stub_name) \ 189 STUB_ID_NAME(stub_name) , 190 191 enum StubGenStubId : int { 192 NO_STUBID = -1, 193 STUBGEN_STUBS_DO(STUB_ENUM_DECLARE) 194 NUM_STUBIDS 195 }; 196 197 #undef STUB_ENUM_DECLARE 198 199 class StubRoutines: AllStatic { 200 201 public: 202 // Dependencies 203 friend class StubGenerator; 204 friend class VMStructs; 205 #if INCLUDE_JVMCI 206 friend class JVMCIVMStructs; 207 #endif 208 209 #include CPU_HEADER(stubRoutines) 210 211 // declare blob and stub name storage and associated lookup methods 212 213 private: 214 static bool _inited_names; 215 static const char* _blob_names[StubGenBlobId::NUM_BLOBIDS]; 216 static const char* _stub_names[StubGenStubId::NUM_STUBIDS]; 217 218 public: 219 static bool init_names(); 220 static const char* get_blob_name(StubGenBlobId id); 221 static const char* get_stub_name(StubGenStubId id); 222 223 // declare blob fields 224 225 #define DECLARE_BLOB_FIELD(blob_name) \ 226 static BufferBlob* STUBGEN_BLOB_FIELD_NAME(blob_name); 227 228 private: 229 STUBGEN_BLOBS_DO(DECLARE_BLOB_FIELD); 230 231 #undef DECLARE_BLOB_FIELD 232 233 // declare fields to store entry addresses 234 235 #define DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \ 236 static address STUB_FIELD_NAME(field_name); 237 238 #define DECLARE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ 239 DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) 240 241 #define DECLARE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ 242 static address STUB_FIELD_NAME(field_name)[count]; 243 244 private: 245 STUBGEN_ENTRIES_DO(DECLARE_ENTRY_FIELD, DECLARE_ENTRY_FIELD_INIT, DECLARE_ENTRY_FIELD_ARRAY); 246 247 #undef DECLARE_ENTRY_FIELD_ARRAY 248 #undef DECLARE_ENTRY_FIELD_INIT 249 #undef DECLARE_ENTRY_FIELD 250 251 // declare getters and setters for entry addresses 252 253 #define DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) \ 254 static address getter_name() { return STUB_FIELD_NAME(field_name); } \ 255 256 #define DEFINE_ENTRY_GETTER_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ 257 DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) 258 259 #define DEFINE_ENTRY_GETTER_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ 260 static address getter_name(int idx) { \ 261 assert(idx < count, "out of bounds"); \ 262 return STUB_FIELD_NAME(field_name)[idx]; \ 263 } \ 264 265 public: 266 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_GETTER, DEFINE_ENTRY_GETTER_INIT, DEFINE_ENTRY_GETTER_ARRAY); 267 268 #undef DEFINE_ENTRY_GETTER_ARRAY 269 #undef DEFINE_ENTRY_GETTER_INIT 270 #undef DEFINE_ENTRY_GETTER 271 272 public: 273 274 #define DECLARE_BLOB_INIT_METHOD(blob_name) \ 275 static void initialize_ ## blob_name ## _stubs(); 276 277 STUBGEN_BLOBS_DO(DECLARE_BLOB_INIT_METHOD) 278 279 #undef DECLARE_BLOB_INIT_METHOD 280 281 public: 282 283 // Calls to Java 284 typedef void (*CallStub)( 285 address link, 286 intptr_t* result, 287 int result_type, /* BasicType on 4 bytes */ 288 Method* method, 289 address entry_point, 290 intptr_t* parameters, 291 int size_of_parameters, 292 TRAPS 293 ); 294 295 static jint _verify_oop_count; 296 297 public: 298 // this is used by x86_64 to expose string index stubs to the opto 299 // library as a target to a call planted before back end lowering. 300 // all other arches plant the call to the stub during back end 301 // lowering and use arch-specific entries. we really need to 302 // rationalise this at some point. 303 304 static address _string_indexof_array[4]; 305 306 /* special case: stub employs array of entries */ 307 308 // Vector Math Routines 309 static address _vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH]; 310 static address _vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH]; 311 312 static bool is_stub_code(address addr) { return contains(addr); } 313 314 // generate code to implement method contains 315 316 #define CHECK_ADDRESS_IN_BLOB(blob_name) \ 317 blob = STUBGEN_BLOB_FIELD_NAME(blob_name); \ 318 if (blob != nullptr && blob->blob_contains(addr)) { return true; } 319 320 static bool contains(address addr) { 321 BufferBlob *blob; 322 STUBGEN_BLOBS_DO(CHECK_ADDRESS_IN_BLOB) 323 return false; 324 } 325 #undef CHECK_ADDRESS_IN_BLOB 326 // define getters for stub code blobs 327 328 #define DEFINE_BLOB_GETTER(blob_name) \ 329 static RuntimeBlob* blob_name ## _stubs_code() { return _ ## blob_name ## _stubs_code; } 330 331 STUBGEN_BLOBS_DO(DEFINE_BLOB_GETTER); 332 333 #undef DEFINE_BLOB_GETTER 334 335 #ifdef ASSERT 336 // provide a translation from stub id to its associated blob id 337 static StubGenBlobId stub_to_blob(StubGenStubId stubId); 338 #endif 339 340 // Debugging 341 static jint verify_oop_count() { return _verify_oop_count; } 342 static jint* verify_oop_count_addr() { return &_verify_oop_count; } 343 // a subroutine for debugging the GC 344 static address verify_oop_subroutine_entry_address() { return (address)&_verify_oop_subroutine_entry; } 345 346 static CallStub call_stub() { return CAST_TO_FN_PTR(CallStub, _call_stub_entry); } 347 348 static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized); 349 350 static address oop_arraycopy(bool dest_uninitialized = false) { 351 return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy; 352 } 353 354 static address oop_disjoint_arraycopy(bool dest_uninitialized = false) { 355 return dest_uninitialized ? _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy; 356 } 357 358 static address arrayof_oop_arraycopy(bool dest_uninitialized = false) { 359 return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy; 360 } 361 362 static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) { 363 return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy; 364 } 365 366 typedef void (*DataCacheWritebackStub)(void *); 367 static DataCacheWritebackStub DataCacheWriteback_stub() { return CAST_TO_FN_PTR(DataCacheWritebackStub, _data_cache_writeback); } 368 typedef void (*DataCacheWritebackSyncStub)(bool); 369 static DataCacheWritebackSyncStub DataCacheWritebackSync_stub() { return CAST_TO_FN_PTR(DataCacheWritebackSyncStub, _data_cache_writeback_sync); } 370 371 static address checkcast_arraycopy(bool dest_uninitialized = false) { 372 return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy; 373 } 374 375 typedef void (*UnsafeArrayCopyStub)(const void* src, void* dst, size_t count); 376 static UnsafeArrayCopyStub UnsafeArrayCopy_stub() { return CAST_TO_FN_PTR(UnsafeArrayCopyStub, _unsafe_arraycopy); } 377 378 typedef void (*UnsafeSetMemoryStub)(void* dst, size_t count, char byte); 379 static UnsafeSetMemoryStub UnsafeSetMemory_stub() { return CAST_TO_FN_PTR(UnsafeSetMemoryStub, _unsafe_setmemory); } 380 381 static jshort f2hf(jfloat x) { 382 assert(_f2hf != nullptr, "stub is not implemented on this platform"); 383 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache 384 typedef jshort (*f2hf_stub_t)(jfloat x); 385 return ((f2hf_stub_t)_f2hf)(x); 386 } 387 static jfloat hf2f(jshort x) { 388 assert(_hf2f != nullptr, "stub is not implemented on this platform"); 389 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache 390 typedef jfloat (*hf2f_stub_t)(jshort x); 391 return ((hf2f_stub_t)_hf2f)(x); 392 } 393 394 static address select_fill_function(BasicType t, bool aligned, const char* &name); 395 396 // Default versions of some of the arraycopy functions for platforms 397 // which do not have specialized versions 398 // 399 static void jbyte_copy (jbyte* src, jbyte* dest, size_t count); 400 static void jshort_copy (jshort* src, jshort* dest, size_t count); 401 static void jint_copy (jint* src, jint* dest, size_t count); 402 static void jlong_copy (jlong* src, jlong* dest, size_t count); 403 static void oop_copy (oop* src, oop* dest, size_t count); 404 static void oop_copy_uninit(oop* src, oop* dest, size_t count); 405 406 static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count); 407 static void arrayof_jshort_copy (HeapWord* src, HeapWord* dest, size_t count); 408 static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count); 409 static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); 410 static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); 411 static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); 412 413 }; 414 415 #endif // SHARE_RUNTIME_STUBROUTINES_HPP