1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_STUBROUTINES_HPP
 26 #define SHARE_RUNTIME_STUBROUTINES_HPP
 27 
 28 #include "code/codeBlob.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "prims/vectorSupport.hpp"
 31 #include "runtime/frame.hpp"
 32 #include "runtime/mutexLocker.hpp"
 33 #include "runtime/stubCodeGenerator.hpp"
 34 #include "runtime/stubInfo.hpp"
 35 #include "runtime/threadWXSetters.inline.hpp"
 36 #include "utilities/growableArray.hpp"
 37 #include "utilities/macros.hpp"
 38 
 39 // StubRoutines provides entry points to assembly routines used by
 40 // compiled code and the run-time system. Platform-specific entry
 41 // points are defined in the platform-specific inner class. Most
 42 // routines have a single (main) entry point. However, a few routines
 43 // do provide alternative entry points.
 44 //
 45 // Stub routines whose entries are advertised via class StubRoutines
 46 // are generated in batches at well-defined stages during JVM init:
 47 // initial stubs, continuation stubs, compiler stubs, final stubs.
 48 // Each batch is embedded in a single, associated blob (an instance of
 49 // BufferBlob) i.e. the blob to entry relationship is 1-m.
 50 //
 51 // Note that this constrasts with the much smaller number of stub
 52 // routines generated via classes SharedRuntime, c1_Runtime1 and
 53 // OptoRuntime. The latter routines are also generated at well-defined
 54 // points during JVM init. However, each stub routine has its own
 55 // unique blob (various subclasses of RuntimeBlob) i.e. the blob to
 56 // entry relationship is 1-1. The difference arises because
 57 // SharedRuntime routines may need to be relocatable or advertise
 58 // properties such as a frame size via their blob.
 59 //
 60 // Staging of stub routine generation is needed in order to manage
 61 // init dependencies between 1) stubs and other stubs or 2) stubs and
 62 // other runtime components. For example, some exception throw stubs
 63 // need to be generated before compiler stubs (such as the
 64 // deoptimization stub) so that the latter can invoke the thrwo rotine
 65 // in bail-out code. Likewise, stubs that access objects (such as the
 66 // object array copy stub) need to be created after initialization of
 67 // some GC constants and generation of the GC barrier stubs they might
 68 // need to invoke.
 69 //
 70 // Class scheme:
 71 //
 72 //    platform-independent               platform-dependent
 73 //
 74 //    stubRoutines.hpp  <-- included --  stubRoutines_<arch>.hpp
 75 //           ^                                  ^
 76 //           |                                  |
 77 //       implements                         implements
 78 //           |                                  |
 79 //           |                                  |
 80 //    stubRoutines.cpp                   stubRoutines_<arch>.cpp
 81 //                                       stubGenerator_<arch>.cpp
 82 //
 83 // Note 1: The important thing is a clean decoupling between stub
 84 //         entry points (interfacing to the whole vm; i.e., 1-to-n
 85 //         relationship) and stub generators (interfacing only to
 86 //         the entry points implementation; i.e., 1-to-1 relationship).
 87 //         This significantly simplifies changes in the generator
 88 //         structure since the rest of the vm is not affected.
 89 //
 90 // Note 2: stubGenerator_<arch>.cpp contains a minimal portion of
 91 //         machine-independent code; namely the generator calls of
 92 //         the generator functions that are used platform-independently.
 93 //         However, it comes with the advantage of having a 1-file
 94 //         implementation of the generator. It should be fairly easy
 95 //         to change, should it become a problem later.
 96 //
 97 // Scheme for adding a new entry point:
 98 //
 99 // 1. determine if it's a platform-dependent or independent entry point
100 //    a) if platform independent: make subsequent changes in the independent files
101 //    b) if platform   dependent: make subsequent changes in the   dependent files
102 // 2. add a private instance variable holding the entry point address
103 // 3. add a public accessor function to the instance variable
104 // 4. implement the corresponding generator function in the platform-dependent
105 //    stubGenerator_<arch>.cpp file and call the function in generate_all() of that file
106 // 5. ensure the entry is generated in the right blob to satisfy initialization
107 //    dependencies between it and other stubs or runtime components.
108 
109 class UnsafeMemoryAccess : public CHeapObj<mtCode> {
110  private:
111   address _start_pc;
112   address _end_pc;
113   address _error_exit_pc;
114  public:
115   static address           _common_exit_stub_pc;
116   static UnsafeMemoryAccess* _table;
117   static int               _table_length;
118   static int               _table_max_length;
119   UnsafeMemoryAccess() : _start_pc(nullptr), _end_pc(nullptr), _error_exit_pc(nullptr) {}
120   void    set_start_pc(address pc)      { _start_pc = pc; }
121   void    set_end_pc(address pc)        { _end_pc = pc; }
122   void    set_error_exit_pc(address pc) { _error_exit_pc = pc; }
123   address start_pc()      const { return _start_pc; }
124   address end_pc()        const { return _end_pc; }
125   address error_exit_pc() const { return _error_exit_pc; }
126 
127   static void    set_common_exit_stub_pc(address pc) { _common_exit_stub_pc = pc; }
128   static address common_exit_stub_pc()               { return _common_exit_stub_pc; }
129 
130   static UnsafeMemoryAccess* add_to_table(address start_pc, address end_pc, address error_exit_pc) {
131     guarantee(_table_length < _table_max_length, "Incorrect UnsafeMemoryAccess::_table_max_length");
132     UnsafeMemoryAccess* entry = &_table[_table_length];
133     entry->set_start_pc(start_pc);
134     entry->set_end_pc(end_pc);
135     entry->set_error_exit_pc(error_exit_pc);
136 
137     _table_length++;
138     return entry;
139   }
140 
141   static bool    contains_pc(address pc);
142   static address page_error_continue_pc(address pc);
143   static void    create_table(int max_size);
144   // Append to entries arrray start, end and exit pcs of all table
145   // entries that identify a sub-interval of range (range_start,
146   // range_end). Append nullptr if the exit pc is not in the range.
147   static void collect_entries(address range_start, address range_end, GrowableArray<address>& entries);
148 };
149 
150 class UnsafeMemoryAccessMark : public StackObj {
151  private:
152   UnsafeMemoryAccess*  _ucm_entry;
153   StubCodeGenerator* _cgen;
154  public:
155   UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = nullptr);
156   ~UnsafeMemoryAccessMark();
157 };
158 
159 class StubRoutines: AllStatic {
160 
161 public:
162   // Dependencies
163   friend class StubGenerator;
164   friend class VMStructs;
165   friend class AOTCodeAddressTable;
166 #if INCLUDE_JVMCI
167   friend class JVMCIVMStructs;
168 #endif
169 
170 #include CPU_HEADER(stubRoutines)
171 
172   static const char* get_blob_name(BlobId id);
173   static const char* get_stub_name(StubId id);
174 
175 // declare blob fields
176 
177 #define DECLARE_BLOB_FIELD(blob_name) \
178   static BufferBlob* STUBGEN_BLOB_FIELD_NAME(blob_name);
179 
180 private:
181   STUBGEN_BLOBS_DO(DECLARE_BLOB_FIELD);
182 
183 #undef DECLARE_BLOB_FIELD
184 
185 // declare fields to store entry addresses
186 
187 #define DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
188   static address STUB_FIELD_NAME(field_name);
189 
190 #define DECLARE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
191   DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name)
192 
193 #define DECLARE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
194   static address STUB_FIELD_NAME(field_name)[count];
195 
196 private:
197   STUBGEN_ENTRIES_DO(DECLARE_ENTRY_FIELD, DECLARE_ENTRY_FIELD_INIT, DECLARE_ENTRY_FIELD_ARRAY);
198 
199 #undef DECLARE_ENTRY_FIELD_ARRAY
200 #undef DECLARE_ENTRY_FIELD_INIT
201 #undef DECLARE_ENTRY_FIELD
202 
203 // declare getters and setters for entry addresses
204 
205 #define DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) \
206   static address getter_name() { return STUB_FIELD_NAME(field_name); } \
207 
208 #define DEFINE_ENTRY_GETTER_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
209   DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name)
210 
211 #define DEFINE_ENTRY_GETTER_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
212   static address getter_name(int idx) {                                 \
213     assert(idx < count, "out of bounds");                               \
214     return STUB_FIELD_NAME(field_name)[idx];                            \
215   }                                                                     \
216 
217 public:
218   STUBGEN_ENTRIES_DO(DEFINE_ENTRY_GETTER, DEFINE_ENTRY_GETTER_INIT, DEFINE_ENTRY_GETTER_ARRAY);
219 
220 #undef DEFINE_ENTRY_GETTER_ARRAY
221 #undef DEFINE_ENTRY_GETTER_INIT
222 #undef DEFINE_ENTRY_GETTER
223 
224 public:
225 
226 #define DECLARE_BLOB_INIT_METHOD(blob_name)     \
227   static void initialize_ ## blob_name ## _stubs();
228 
229   STUBGEN_BLOBS_DO(DECLARE_BLOB_INIT_METHOD)
230 
231 #undef DECLARE_BLOB_INIT_METHOD
232 
233 public:
234 
235   // Calls to Java
236   typedef void (*CallStub)(
237     address   link,
238     intptr_t* result,
239     int       result_type, /* BasicType on 4 bytes */
240     Method* method,
241     address   entry_point,
242     intptr_t* parameters,
243     int       size_of_parameters,
244     TRAPS
245   );
246 
247   static jint    _verify_oop_count;
248 
249 public:
250   // this is used by x86_64 to expose string index stubs to the opto
251   // library as a target to a call planted before back end lowering.
252   // all other arches plant the call to the stub during back end
253   // lowering and use arch-specific entries. we really need to
254   // rationalise this at some point.
255 
256   static address _string_indexof_array[4];
257 
258   /* special case: stub employs array of entries */
259 
260   static bool is_stub_code(address addr)                   { return contains(addr); }
261 
262   // generate code to implement method contains
263 
264 #define CHECK_ADDRESS_IN_BLOB(blob_name) \
265   blob = STUBGEN_BLOB_FIELD_NAME(blob_name); \
266   if (blob != nullptr && blob->blob_contains(addr)) { return true; }
267 
268   static bool contains(address addr) {
269     BufferBlob *blob;
270     STUBGEN_BLOBS_DO(CHECK_ADDRESS_IN_BLOB)
271     return false;
272   }
273 #undef CHECK_ADDRESS_IN_BLOB
274 // define getters for stub code blobs
275 
276 #define DEFINE_BLOB_GETTER(blob_name) \
277   static RuntimeBlob* blob_name ## _stubs_code() { return _ ## blob_name ## _stubs_code; }
278 
279   STUBGEN_BLOBS_DO(DEFINE_BLOB_GETTER);
280 
281 #undef DEFINE_BLOB_GETTER
282 
283 #ifdef ASSERT
284   static BlobId stub_to_blob(StubId id);
285 #endif
286 
287   // Debugging
288   static jint    verify_oop_count()                        { return _verify_oop_count; }
289   static jint*   verify_oop_count_addr()                   { return &_verify_oop_count; }
290   // a subroutine for debugging the GC
291   static address verify_oop_subroutine_entry_address()     { return (address)&_verify_oop_subroutine_entry; }
292 
293   static CallStub call_stub()                              { assert(_call_stub_entry != nullptr, ""); return CAST_TO_FN_PTR(CallStub, _call_stub_entry); }
294 
295   static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized);
296 
297   static address oop_arraycopy(bool dest_uninitialized = false) {
298     return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy;
299   }
300 
301   static address oop_disjoint_arraycopy(bool dest_uninitialized = false) {
302     return dest_uninitialized ?  _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy;
303   }
304 
305   static address arrayof_oop_arraycopy(bool dest_uninitialized = false) {
306     return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy;
307   }
308 
309   static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) {
310     return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy;
311   }
312 
313   // These methods is implemented in architecture-specific code.
314   // Any table that is returned must be allocated once-only in
315   // foreign memory (or C heap) rather generated in the code cache.
316   static address crc_table_addr();
317   static address crc32c_table_addr();
318 
319   typedef void (*DataCacheWritebackStub)(void *);
320   static DataCacheWritebackStub DataCacheWriteback_stub()         { return CAST_TO_FN_PTR(DataCacheWritebackStub,  _data_cache_writeback); }
321   typedef void (*DataCacheWritebackSyncStub)(bool);
322   static DataCacheWritebackSyncStub DataCacheWritebackSync_stub() { return CAST_TO_FN_PTR(DataCacheWritebackSyncStub,  _data_cache_writeback_sync); }
323 
324   static address checkcast_arraycopy(bool dest_uninitialized = false) {
325     return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy;
326   }
327 
328   typedef void (*UnsafeArrayCopyStub)(const void* src, void* dst, size_t count);
329   static UnsafeArrayCopyStub UnsafeArrayCopy_stub()         { return CAST_TO_FN_PTR(UnsafeArrayCopyStub,  _unsafe_arraycopy); }
330 
331   typedef void (*UnsafeSetMemoryStub)(void* dst, size_t count, char byte);
332   static UnsafeSetMemoryStub UnsafeSetMemory_stub()         { return CAST_TO_FN_PTR(UnsafeSetMemoryStub,  _unsafe_setmemory); }
333 
334   static jshort f2hf(jfloat x) {
335     assert(_f2hf != nullptr, "stub is not implemented on this platform");
336     MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache
337     typedef jshort (*f2hf_stub_t)(jfloat x);
338     return ((f2hf_stub_t)_f2hf)(x);
339   }
340   static jfloat hf2f(jshort x) {
341     assert(_hf2f != nullptr, "stub is not implemented on this platform");
342     MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache
343     typedef jfloat (*hf2f_stub_t)(jshort x);
344     return ((hf2f_stub_t)_hf2f)(x);
345   }
346 
347   static address select_fill_function(BasicType t, bool aligned, const char* &name);
348 
349   // Default versions of some of the arraycopy functions for platforms
350   // which do not have specialized versions
351   //
352   static void jbyte_copy     (jbyte*  src, jbyte*  dest, size_t count);
353   static void jshort_copy    (jshort* src, jshort* dest, size_t count);
354   static void jint_copy      (jint*   src, jint*   dest, size_t count);
355   static void jlong_copy     (jlong*  src, jlong*  dest, size_t count);
356   static void oop_copy       (oop*    src, oop*    dest, size_t count);
357   static void oop_copy_uninit(oop*    src, oop*    dest, size_t count);
358 
359   static void arrayof_jbyte_copy     (HeapWord* src, HeapWord* dest, size_t count);
360   static void arrayof_jshort_copy    (HeapWord* src, HeapWord* dest, size_t count);
361   static void arrayof_jint_copy      (HeapWord* src, HeapWord* dest, size_t count);
362   static void arrayof_jlong_copy     (HeapWord* src, HeapWord* dest, size_t count);
363   static void arrayof_oop_copy       (HeapWord* src, HeapWord* dest, size_t count);
364   static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
365 
366 };
367 
368 #endif // SHARE_RUNTIME_STUBROUTINES_HPP