1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_STUBROUTINES_HPP
 26 #define SHARE_RUNTIME_STUBROUTINES_HPP
 27 
 28 #include "code/codeBlob.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "prims/vectorSupport.hpp"
 31 #include "runtime/frame.hpp"
 32 #include "runtime/mutexLocker.hpp"
 33 #include "runtime/stubCodeGenerator.hpp"
 34 #include "runtime/stubDeclarations.hpp"
 35 #include "runtime/threadWXSetters.inline.hpp"
 36 #include "utilities/macros.hpp"
 37 
 38 // StubRoutines provides entry points to assembly routines used by
 39 // compiled code and the run-time system. Platform-specific entry
 40 // points are defined in the platform-specific inner class. Most
 41 // routines have a single (main) entry point. However, a few routines
 42 // do provide alternative entry points.
 43 //
 44 // Stub routines whose entries are advertised via class StubRoutines
 45 // are generated in batches at well-defined stages during JVM init:
 46 // initial stubs, continuation stubs, compiler stubs, final stubs.
 47 // Each batch is embedded in a single, associated blob (an instance of
 48 // BufferBlob) i.e. the blob to entry relationship is 1-m.
 49 //
 50 // Note that this constrasts with the much smaller number of stub
 51 // routines generated via classes SharedRuntime, c1_Runtime1 and
 52 // OptoRuntime. The latter routines are also generated at well-defined
 53 // points during JVM init. However, each stub routine has its own
 54 // unique blob (various subclasses of RuntimeBlob) i.e. the blob to
 55 // entry relationship is 1-1. The difference arises because
 56 // SharedRuntime routines may need to be relocatable or advertise
 57 // properties such as a frame size via their blob.
 58 //
 59 // Staging of stub routine generation is needed in order to manage
 60 // init dependencies between 1) stubs and other stubs or 2) stubs and
 61 // other runtime components. For example, some exception throw stubs
 62 // need to be generated before compiler stubs (such as the
 63 // deoptimization stub) so that the latter can invoke the thrwo rotine
 64 // in bail-out code. Likewise, stubs that access objects (such as the
 65 // object array copy stub) need to be created after initialization of
 66 // some GC constants and generation of the GC barrier stubs they might
 67 // need to invoke.
 68 //
 69 // Class scheme:
 70 //
 71 //    platform-independent               platform-dependent
 72 //
 73 //    stubRoutines.hpp  <-- included --  stubRoutines_<arch>.hpp
 74 //           ^                                  ^
 75 //           |                                  |
 76 //       implements                         implements
 77 //           |                                  |
 78 //           |                                  |
 79 //    stubRoutines.cpp                   stubRoutines_<arch>.cpp
 80 //                                       stubGenerator_<arch>.cpp
 81 //
 82 // Note 1: The important thing is a clean decoupling between stub
 83 //         entry points (interfacing to the whole vm; i.e., 1-to-n
 84 //         relationship) and stub generators (interfacing only to
 85 //         the entry points implementation; i.e., 1-to-1 relationship).
 86 //         This significantly simplifies changes in the generator
 87 //         structure since the rest of the vm is not affected.
 88 //
 89 // Note 2: stubGenerator_<arch>.cpp contains a minimal portion of
 90 //         machine-independent code; namely the generator calls of
 91 //         the generator functions that are used platform-independently.
 92 //         However, it comes with the advantage of having a 1-file
 93 //         implementation of the generator. It should be fairly easy
 94 //         to change, should it become a problem later.
 95 //
 96 // Scheme for adding a new entry point:
 97 //
 98 // 1. determine if it's a platform-dependent or independent entry point
 99 //    a) if platform independent: make subsequent changes in the independent files
100 //    b) if platform   dependent: make subsequent changes in the   dependent files
101 // 2. add a private instance variable holding the entry point address
102 // 3. add a public accessor function to the instance variable
103 // 4. implement the corresponding generator function in the platform-dependent
104 //    stubGenerator_<arch>.cpp file and call the function in generate_all() of that file
105 // 5. ensure the entry is generated in the right blob to satisfy initialization
106 //    dependencies between it and other stubs or runtime components.
107 
108 class UnsafeMemoryAccess : public CHeapObj<mtCode> {
109  private:
110   address _start_pc;
111   address _end_pc;
112   address _error_exit_pc;
113  public:
114   static address           _common_exit_stub_pc;
115   static UnsafeMemoryAccess* _table;
116   static int               _table_length;
117   static int               _table_max_length;
118   UnsafeMemoryAccess() : _start_pc(nullptr), _end_pc(nullptr), _error_exit_pc(nullptr) {}
119   void    set_start_pc(address pc)      { _start_pc = pc; }
120   void    set_end_pc(address pc)        { _end_pc = pc; }
121   void    set_error_exit_pc(address pc) { _error_exit_pc = pc; }
122   address start_pc()      const { return _start_pc; }
123   address end_pc()        const { return _end_pc; }
124   address error_exit_pc() const { return _error_exit_pc; }
125 
126   static void    set_common_exit_stub_pc(address pc) { _common_exit_stub_pc = pc; }
127   static address common_exit_stub_pc()               { return _common_exit_stub_pc; }
128 
129   static UnsafeMemoryAccess* add_to_table(address start_pc, address end_pc, address error_exit_pc) {
130     guarantee(_table_length < _table_max_length, "Incorrect UnsafeMemoryAccess::_table_max_length");
131     UnsafeMemoryAccess* entry = &_table[_table_length];
132     entry->set_start_pc(start_pc);
133     entry->set_end_pc(end_pc);
134     entry->set_error_exit_pc(error_exit_pc);
135 
136     _table_length++;
137     return entry;
138   }
139 
140   static bool    contains_pc(address pc);
141   static address page_error_continue_pc(address pc);
142   static void    create_table(int max_size);
143 };
144 
145 class UnsafeMemoryAccessMark : public StackObj {
146  private:
147   UnsafeMemoryAccess*  _ucm_entry;
148   StubCodeGenerator* _cgen;
149  public:
150   UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = nullptr);
151   ~UnsafeMemoryAccessMark();
152 };
153 
154 // declare stubgen blob id enum
155 
156 #define BLOB_ENUM_DECLARE(blob_name) \
157   STUB_ID_NAME(blob_name),
158 
159 enum StubGenBlobId : int {
160   NO_BLOBID = -1,
161   STUBGEN_BLOBS_DO(BLOB_ENUM_DECLARE)
162   NUM_BLOBIDS
163 };
164 
165 #undef BLOB_ENUM_DECLARE
166 
167 // declare blob local stub id enums
168 
169 #define BLOB_LOCAL_ENUM_START(blob_name)        \
170   enum StubGenStubId_ ## blob_name {            \
171     NO_STUBID_ ## blob_name = -1,
172 
173 #define BLOB_LOCAL_ENUM_END(blob_name)   \
174     NUM_STUBIDS_ ## blob_name            \
175   };
176 
177 #define BLOB_LOCAL_STUB_ENUM_DECLARE(blob_name, stub_name) \
178   blob_name ## _ ## stub_name ## _id,
179 
180 STUBGEN_BLOBS_STUBS_DO(BLOB_LOCAL_ENUM_START, BLOB_LOCAL_ENUM_END, BLOB_LOCAL_STUB_ENUM_DECLARE)
181 
182 #undef BLOB_LOCAL_ENUM_START
183 #undef BLOB_LOCAL_ENUM_END
184 #undef BLOB_LOCAL_STUB_ENUM_DECLARE
185 
186 // declare global stub id enum
187 
188 #define STUB_ENUM_DECLARE(blob_name, stub_name) \
189   STUB_ID_NAME(stub_name) ,
190 
191 enum StubGenStubId : int {
192   NO_STUBID = -1,
193   STUBGEN_STUBS_DO(STUB_ENUM_DECLARE)
194   NUM_STUBIDS
195 };
196 
197 #undef STUB_ENUM_DECLARE
198 
199 class StubRoutines: AllStatic {
200 
201 public:
202   // Dependencies
203   friend class StubGenerator;
204   friend class VMStructs;
205   friend class SCAddressTable;
206 #if INCLUDE_JVMCI
207   friend class JVMCIVMStructs;
208 #endif
209 
210 #include CPU_HEADER(stubRoutines)
211 
212 // declare blob and stub name storage and associated lookup methods
213 
214 private:
215   static bool _inited_names;
216   static const char* _blob_names[StubGenBlobId::NUM_BLOBIDS];
217   static const char* _stub_names[StubGenStubId::NUM_STUBIDS];
218 
219 public:
220   static bool init_names();
221   static const char* get_blob_name(StubGenBlobId id);
222   static const char* get_stub_name(StubGenStubId id);
223 
224 // declare blob fields
225 
226 #define DECLARE_BLOB_FIELD(blob_name) \
227   static BufferBlob* STUBGEN_BLOB_FIELD_NAME(blob_name);
228 
229 private:
230   STUBGEN_BLOBS_DO(DECLARE_BLOB_FIELD);
231 
232 #undef DECLARE_BLOB_FIELD
233 
234 // declare fields to store entry addresses
235 
236 #define DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
237   static address STUB_FIELD_NAME(field_name);
238 
239 #define DECLARE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
240   DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name)
241 
242 #define DECLARE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
243   static address STUB_FIELD_NAME(field_name)[count];
244 
245 private:
246   STUBGEN_ENTRIES_DO(DECLARE_ENTRY_FIELD, DECLARE_ENTRY_FIELD_INIT, DECLARE_ENTRY_FIELD_ARRAY);
247 
248 #undef DECLARE_ENTRY_FIELD_ARRAY
249 #undef DECLARE_ENTRY_FIELD_INIT
250 #undef DECLARE_ENTRY_FIELD
251 
252 // declare getters and setters for entry addresses
253 
254 #define DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) \
255   static address getter_name() { return STUB_FIELD_NAME(field_name); } \
256 
257 #define DEFINE_ENTRY_GETTER_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
258   DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name)
259 
260 #define DEFINE_ENTRY_GETTER_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
261   static address getter_name(int idx) {                                 \
262     assert(idx < count, "out of bounds");                               \
263     return STUB_FIELD_NAME(field_name)[idx];                            \
264   }                                                                     \
265 
266 public:
267   STUBGEN_ENTRIES_DO(DEFINE_ENTRY_GETTER, DEFINE_ENTRY_GETTER_INIT, DEFINE_ENTRY_GETTER_ARRAY);
268 
269 #undef DEFINE_ENTRY_GETTER_ARRAY
270 #undef DEFINE_ENTRY_GETTER_INIT
271 #undef DEFINE_ENTRY_GETTER
272 
273 public:
274 
275 #define DECLARE_BLOB_INIT_METHOD(blob_name)     \
276   static void initialize_ ## blob_name ## _stubs();
277 
278   STUBGEN_BLOBS_DO(DECLARE_BLOB_INIT_METHOD)
279 
280 #undef DECLARE_BLOB_INIT_METHOD
281 
282 public:
283 
284   // Calls to Java
285   typedef void (*CallStub)(
286     address   link,
287     intptr_t* result,
288     int       result_type, /* BasicType on 4 bytes */
289     Method* method,
290     address   entry_point,
291     intptr_t* parameters,
292     int       size_of_parameters,
293     TRAPS
294   );
295 
296   static jint    _verify_oop_count;
297 
298 public:
299   // this is used by x86_64 to expose string index stubs to the opto
300   // library as a target to a call planted before back end lowering.
301   // all other arches plant the call to the stub during back end
302   // lowering and use arch-specific entries. we really need to
303   // rationalise this at some point.
304 
305   static address _string_indexof_array[4];
306 
307   /* special case: stub employs array of entries */
308 
309   // Vector Math Routines
310   static address _vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH];
311   static address _vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH];
312 
313   static bool is_stub_code(address addr)                   { return contains(addr); }
314 
315   // generate code to implement method contains
316 
317 #define CHECK_ADDRESS_IN_BLOB(blob_name) \
318   blob = STUBGEN_BLOB_FIELD_NAME(blob_name); \
319   if (blob != nullptr && blob->blob_contains(addr)) { return true; }
320 
321   static bool contains(address addr) {
322     BufferBlob *blob;
323     STUBGEN_BLOBS_DO(CHECK_ADDRESS_IN_BLOB)
324     return false;
325   }
326 #undef CHECK_ADDRESS_IN_BLOB
327 // define getters for stub code blobs
328 
329 #define DEFINE_BLOB_GETTER(blob_name) \
330   static RuntimeBlob* blob_name ## _stubs_code() { return _ ## blob_name ## _stubs_code; }
331 
332   STUBGEN_BLOBS_DO(DEFINE_BLOB_GETTER);
333 
334 #undef DEFINE_BLOB_GETTER
335 
336 #ifdef ASSERT
337   // provide a translation from stub id to its associated blob id
338   static StubGenBlobId stub_to_blob(StubGenStubId stubId);
339 #endif
340 
341   // Debugging
342   static jint    verify_oop_count()                        { return _verify_oop_count; }
343   static jint*   verify_oop_count_addr()                   { return &_verify_oop_count; }
344   // a subroutine for debugging the GC
345   static address verify_oop_subroutine_entry_address()     { return (address)&_verify_oop_subroutine_entry; }
346 
347   static CallStub call_stub()                              { return CAST_TO_FN_PTR(CallStub, _call_stub_entry); }
348 
349   static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized);
350 
351   static address oop_arraycopy(bool dest_uninitialized = false) {
352     return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy;
353   }
354 
355   static address oop_disjoint_arraycopy(bool dest_uninitialized = false) {
356     return dest_uninitialized ?  _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy;
357   }
358 
359   static address arrayof_oop_arraycopy(bool dest_uninitialized = false) {
360     return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy;
361   }
362 
363   static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) {
364     return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy;
365   }
366 
367   typedef void (*DataCacheWritebackStub)(void *);
368   static DataCacheWritebackStub DataCacheWriteback_stub()         { return CAST_TO_FN_PTR(DataCacheWritebackStub,  _data_cache_writeback); }
369   typedef void (*DataCacheWritebackSyncStub)(bool);
370   static DataCacheWritebackSyncStub DataCacheWritebackSync_stub() { return CAST_TO_FN_PTR(DataCacheWritebackSyncStub,  _data_cache_writeback_sync); }
371 
372   static address checkcast_arraycopy(bool dest_uninitialized = false) {
373     return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy;
374   }
375 
376   typedef void (*UnsafeArrayCopyStub)(const void* src, void* dst, size_t count);
377   static UnsafeArrayCopyStub UnsafeArrayCopy_stub()         { return CAST_TO_FN_PTR(UnsafeArrayCopyStub,  _unsafe_arraycopy); }
378 
379   typedef void (*UnsafeSetMemoryStub)(void* dst, size_t count, char byte);
380   static UnsafeSetMemoryStub UnsafeSetMemory_stub()         { return CAST_TO_FN_PTR(UnsafeSetMemoryStub,  _unsafe_setmemory); }
381 
382   static jshort f2hf(jfloat x) {
383     assert(_f2hf != nullptr, "stub is not implemented on this platform");
384     MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache
385     typedef jshort (*f2hf_stub_t)(jfloat x);
386     return ((f2hf_stub_t)_f2hf)(x);
387   }
388   static jfloat hf2f(jshort x) {
389     assert(_hf2f != nullptr, "stub is not implemented on this platform");
390     MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache
391     typedef jfloat (*hf2f_stub_t)(jshort x);
392     return ((hf2f_stub_t)_hf2f)(x);
393   }
394 
395   static address select_fill_function(BasicType t, bool aligned, const char* &name);
396 
397   // Default versions of some of the arraycopy functions for platforms
398   // which do not have specialized versions
399   //
400   static void jbyte_copy     (jbyte*  src, jbyte*  dest, size_t count);
401   static void jshort_copy    (jshort* src, jshort* dest, size_t count);
402   static void jint_copy      (jint*   src, jint*   dest, size_t count);
403   static void jlong_copy     (jlong*  src, jlong*  dest, size_t count);
404   static void oop_copy       (oop*    src, oop*    dest, size_t count);
405   static void oop_copy_uninit(oop*    src, oop*    dest, size_t count);
406 
407   static void arrayof_jbyte_copy     (HeapWord* src, HeapWord* dest, size_t count);
408   static void arrayof_jshort_copy    (HeapWord* src, HeapWord* dest, size_t count);
409   static void arrayof_jint_copy      (HeapWord* src, HeapWord* dest, size_t count);
410   static void arrayof_jlong_copy     (HeapWord* src, HeapWord* dest, size_t count);
411   static void arrayof_oop_copy       (HeapWord* src, HeapWord* dest, size_t count);
412   static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
413 
414 };
415 
416 #endif // SHARE_RUNTIME_STUBROUTINES_HPP