1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_STUBROUTINES_HPP
26 #define SHARE_RUNTIME_STUBROUTINES_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "memory/allocation.hpp"
30 #include "prims/vectorSupport.hpp"
31 #include "runtime/frame.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/stubCodeGenerator.hpp"
34 #include "runtime/stubInfo.hpp"
35 #include "runtime/threadWXSetters.inline.hpp"
36 #include "utilities/growableArray.hpp"
37 #include "utilities/macros.hpp"
38
39 // StubRoutines provides entry points to assembly routines used by
40 // compiled code and the run-time system. Platform-specific entry
41 // points are defined in the platform-specific inner class. Most
42 // routines have a single (main) entry point. However, a few routines
43 // do provide alternative entry points.
44 //
45 // Stub routines whose entries are advertised via class StubRoutines
46 // are generated in batches at well-defined stages during JVM init:
47 // initial stubs, continuation stubs, compiler stubs, final stubs.
48 // Each batch is embedded in a single, associated blob (an instance of
49 // BufferBlob) i.e. the blob to entry relationship is 1-m.
50 //
51 // Note that this constrasts with the much smaller number of stub
52 // routines generated via classes SharedRuntime, c1_Runtime1 and
53 // OptoRuntime. The latter routines are also generated at well-defined
54 // points during JVM init. However, each stub routine has its own
55 // unique blob (various subclasses of RuntimeBlob) i.e. the blob to
56 // entry relationship is 1-1. The difference arises because
57 // SharedRuntime routines may need to be relocatable or advertise
58 // properties such as a frame size via their blob.
59 //
60 // Staging of stub routine generation is needed in order to manage
61 // init dependencies between 1) stubs and other stubs or 2) stubs and
62 // other runtime components. For example, some exception throw stubs
63 // need to be generated before compiler stubs (such as the
64 // deoptimization stub) so that the latter can invoke the thrwo rotine
65 // in bail-out code. Likewise, stubs that access objects (such as the
66 // object array copy stub) need to be created after initialization of
67 // some GC constants and generation of the GC barrier stubs they might
68 // need to invoke.
69 //
70 // Class scheme:
71 //
72 // platform-independent platform-dependent
73 //
74 // stubRoutines.hpp <-- included -- stubRoutines_<arch>.hpp
75 // ^ ^
76 // | |
77 // implements implements
78 // | |
79 // | |
80 // stubRoutines.cpp stubRoutines_<arch>.cpp
81 // stubGenerator_<arch>.cpp
82 //
83 // Note 1: The important thing is a clean decoupling between stub
84 // entry points (interfacing to the whole vm; i.e., 1-to-n
85 // relationship) and stub generators (interfacing only to
86 // the entry points implementation; i.e., 1-to-1 relationship).
87 // This significantly simplifies changes in the generator
88 // structure since the rest of the vm is not affected.
89 //
90 // Note 2: stubGenerator_<arch>.cpp contains a minimal portion of
91 // machine-independent code; namely the generator calls of
92 // the generator functions that are used platform-independently.
93 // However, it comes with the advantage of having a 1-file
94 // implementation of the generator. It should be fairly easy
95 // to change, should it become a problem later.
96 //
97 // Scheme for adding a new entry point:
98 //
99 // 1. determine if it's a platform-dependent or independent entry point
100 // a) if platform independent: make subsequent changes in the independent files
101 // b) if platform dependent: make subsequent changes in the dependent files
102 // 2. add a private instance variable holding the entry point address
103 // 3. add a public accessor function to the instance variable
104 // 4. implement the corresponding generator function in the platform-dependent
105 // stubGenerator_<arch>.cpp file and call the function in generate_all() of that file
106 // 5. ensure the entry is generated in the right blob to satisfy initialization
107 // dependencies between it and other stubs or runtime components.
108
109 class UnsafeMemoryAccess : public CHeapObj<mtCode> {
110 private:
111 address _start_pc;
112 address _end_pc;
113 address _error_exit_pc;
114 public:
115 // each table entry requires 3 addresses
116 static const int COLUMN_COUNT = 3;
117 static address _common_exit_stub_pc;
118 static UnsafeMemoryAccess* _table;
119 static int _table_length;
120 static int _table_max_length;
121 UnsafeMemoryAccess() : _start_pc(nullptr), _end_pc(nullptr), _error_exit_pc(nullptr) {}
122 void set_start_pc(address pc) { _start_pc = pc; }
123 void set_end_pc(address pc) { _end_pc = pc; }
124 void set_error_exit_pc(address pc) { _error_exit_pc = pc; }
125 address start_pc() const { return _start_pc; }
126 address end_pc() const { return _end_pc; }
127 address error_exit_pc() const { return _error_exit_pc; }
128
129 static void set_common_exit_stub_pc(address pc) { _common_exit_stub_pc = pc; }
130 static address common_exit_stub_pc() { return _common_exit_stub_pc; }
131
132 static UnsafeMemoryAccess* add_to_table(address start_pc, address end_pc, address error_exit_pc) {
133 guarantee(_table_length < _table_max_length, "Incorrect UnsafeMemoryAccess::_table_max_length");
134 UnsafeMemoryAccess* entry = &_table[_table_length];
135 assert(start_pc != nullptr, "invalid start address");
136 entry->set_start_pc(start_pc);
137 entry->set_end_pc(end_pc);
138 entry->set_error_exit_pc(error_exit_pc);
139
140 _table_length++;
141 return entry;
142 }
143
144 static bool contains_pc(address pc);
145 static address page_error_continue_pc(address pc);
146 static void create_table(int max_size);
147 // Append to entries arrray start, end and exit pcs of all table
148 // entries that identify a sub-interval of range (range_start,
149 // range_end). Append nullptr if the exit pc is not in the range.
150 static void collect_entries(address range_start, address range_end, GrowableArray<address>& entries);
151 };
152
153 class UnsafeMemoryAccessMark : public StackObj {
154 private:
155 UnsafeMemoryAccess* _ucm_entry;
156 StubCodeGenerator* _cgen;
157 public:
158 UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = nullptr);
159 ~UnsafeMemoryAccessMark();
160 };
161
162 class StubRoutines: AllStatic {
163
164 public:
165 // Dependencies
166 friend class StubGenerator;
167 friend class VMStructs;
168 #if INCLUDE_JVMCI
169 friend class JVMCIVMStructs;
170 #endif
171
172 #include CPU_HEADER(stubRoutines)
173
174 static const char* get_blob_name(BlobId id);
175 static const char* get_stub_name(StubId id);
176
177 // declare blob fields
178
179 #define DECLARE_BLOB_FIELD(blob_name) \
180 static BufferBlob* STUBGEN_BLOB_FIELD_NAME(blob_name);
181
182 private:
183 STUBGEN_BLOBS_DO(DECLARE_BLOB_FIELD);
184
185 #undef DECLARE_BLOB_FIELD
186
187 // declare fields to store entry addresses
188
189 #define DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
190 static address STUB_FIELD_NAME(field_name);
191
192 #define DECLARE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
193 DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name)
194
195 #define DECLARE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
196 static address STUB_FIELD_NAME(field_name)[count];
197
198 private:
199 STUBGEN_ENTRIES_DO(DECLARE_ENTRY_FIELD, DECLARE_ENTRY_FIELD_INIT, DECLARE_ENTRY_FIELD_ARRAY);
200
201 #undef DECLARE_ENTRY_FIELD_ARRAY
202 #undef DECLARE_ENTRY_FIELD_INIT
203 #undef DECLARE_ENTRY_FIELD
204
205 // declare getters and setters for entry addresses
206
207 #define DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) \
208 static address getter_name() { return STUB_FIELD_NAME(field_name); } \
209
210 #define DEFINE_ENTRY_GETTER_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
211 DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name)
212
213 #define DEFINE_ENTRY_GETTER_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
214 static address getter_name(int idx) { \
215 assert(idx < count, "out of bounds"); \
216 return STUB_FIELD_NAME(field_name)[idx]; \
217 } \
218
219 public:
220 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_GETTER, DEFINE_ENTRY_GETTER_INIT, DEFINE_ENTRY_GETTER_ARRAY);
221
222 #undef DEFINE_ENTRY_GETTER_ARRAY
223 #undef DEFINE_ENTRY_GETTER_INIT
224 #undef DEFINE_ENTRY_GETTER
225
226 public:
227
228 #define DECLARE_BLOB_INIT_METHOD(blob_name) \
229 static void initialize_ ## blob_name ## _stubs();
230
231 STUBGEN_BLOBS_DO(DECLARE_BLOB_INIT_METHOD)
232
233 #undef DECLARE_BLOB_INIT_METHOD
234
235 public:
236
237 // Calls to Java
238 typedef void (*CallStub)(
239 address link,
240 intptr_t* result,
241 int result_type, /* BasicType on 4 bytes */
242 Method* method,
243 address entry_point,
244 intptr_t* parameters,
245 int size_of_parameters,
246 TRAPS
247 );
248
249 static jint _verify_oop_count;
250
251 public:
252 // this is used by x86_64 to expose string index stubs to the opto
253 // library as a target to a call planted before back end lowering.
254 // all other arches plant the call to the stub during back end
255 // lowering and use arch-specific entries. we really need to
256 // rationalise this at some point.
257
258 static address _string_indexof_array[4];
259
260 /* special case: stub employs array of entries */
261
262 static bool is_stub_code(address addr) { return contains(addr); }
263
264 // generate code to implement method contains
265
266 #define CHECK_ADDRESS_IN_BLOB(blob_name) \
267 blob = STUBGEN_BLOB_FIELD_NAME(blob_name); \
268 if (blob != nullptr && blob->blob_contains(addr)) { return true; }
269
270 static bool contains(address addr) {
271 BufferBlob *blob;
272 STUBGEN_BLOBS_DO(CHECK_ADDRESS_IN_BLOB)
273 return false;
274 }
275 #undef CHECK_ADDRESS_IN_BLOB
276 // define getters for stub code blobs
277
278 #define DEFINE_BLOB_GETTER(blob_name) \
279 static RuntimeBlob* blob_name ## _stubs_code() { return _ ## blob_name ## _stubs_code; }
280
281 STUBGEN_BLOBS_DO(DEFINE_BLOB_GETTER);
282
283 #undef DEFINE_BLOB_GETTER
284
285 #ifdef ASSERT
286 static BlobId stub_to_blob(StubId id);
287 #endif
288
289 #if INCLUDE_CDS
290 // AOT Initalization -- implementation is arch-specific
291 static void init_AOTAddressTable();
292 #endif // INCLUDE_CDS
293
294 // Debugging
295 static jint verify_oop_count() { return _verify_oop_count; }
296 static jint* verify_oop_count_addr() { return &_verify_oop_count; }
297 // a subroutine for debugging the GC
298 static address verify_oop_subroutine_entry_address() { return (address)&_verify_oop_subroutine_entry; }
299
300 static CallStub call_stub() { assert(_call_stub_entry != nullptr, ""); return CAST_TO_FN_PTR(CallStub, _call_stub_entry); }
301
302 static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized);
303
304 static address oop_arraycopy(bool dest_uninitialized = false) {
305 return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy;
306 }
307
308 static address oop_disjoint_arraycopy(bool dest_uninitialized = false) {
309 return dest_uninitialized ? _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy;
310 }
311
312 static address arrayof_oop_arraycopy(bool dest_uninitialized = false) {
313 return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy;
314 }
315
316 static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) {
317 return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy;
318 }
319
320 // These methods is implemented in architecture-specific code.
321 // Any table that is returned must be allocated once-only in
322 // foreign memory (or C heap) rather generated in the code cache.
323 static address crc_table_addr();
324 static address crc32c_table_addr();
325
326 typedef void (*DataCacheWritebackStub)(void *);
327 static DataCacheWritebackStub DataCacheWriteback_stub() { return CAST_TO_FN_PTR(DataCacheWritebackStub, _data_cache_writeback); }
328 typedef void (*DataCacheWritebackSyncStub)(bool);
329 static DataCacheWritebackSyncStub DataCacheWritebackSync_stub() { return CAST_TO_FN_PTR(DataCacheWritebackSyncStub, _data_cache_writeback_sync); }
330
331 static address checkcast_arraycopy(bool dest_uninitialized = false) {
332 return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy;
333 }
334
335 typedef void (*UnsafeArrayCopyStub)(const void* src, void* dst, size_t count);
336 static UnsafeArrayCopyStub UnsafeArrayCopy_stub() { return CAST_TO_FN_PTR(UnsafeArrayCopyStub, _unsafe_arraycopy); }
337
338 typedef void (*UnsafeSetMemoryStub)(void* dst, size_t count, char byte);
339 static UnsafeSetMemoryStub UnsafeSetMemory_stub() { return CAST_TO_FN_PTR(UnsafeSetMemoryStub, _unsafe_setmemory); }
340
341 static jshort f2hf(jfloat x) {
342 assert(_f2hf != nullptr, "stub is not implemented on this platform");
343 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache
344 typedef jshort (*f2hf_stub_t)(jfloat x);
345 return ((f2hf_stub_t)_f2hf)(x);
346 }
347 static jfloat hf2f(jshort x) {
348 assert(_hf2f != nullptr, "stub is not implemented on this platform");
349 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache
350 typedef jfloat (*hf2f_stub_t)(jshort x);
351 return ((hf2f_stub_t)_hf2f)(x);
352 }
353
354 static address select_fill_function(BasicType t, bool aligned, const char* &name);
355
356 // Default versions of some of the arraycopy functions for platforms
357 // which do not have specialized versions
358 //
359 static void jbyte_copy (jbyte* src, jbyte* dest, size_t count);
360 static void jshort_copy (jshort* src, jshort* dest, size_t count);
361 static void jint_copy (jint* src, jint* dest, size_t count);
362 static void jlong_copy (jlong* src, jlong* dest, size_t count);
363 static void oop_copy (oop* src, oop* dest, size_t count);
364 static void oop_copy_uninit(oop* src, oop* dest, size_t count);
365
366 static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count);
367 static void arrayof_jshort_copy (HeapWord* src, HeapWord* dest, size_t count);
368 static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count);
369 static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count);
370 static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count);
371 static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
372
373 };
374
375 #endif // SHARE_RUNTIME_STUBROUTINES_HPP