1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/codeBuffer.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/access.inline.hpp"
29 #include "oops/klass.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "prims/vectorSupport.hpp"
32 #include "runtime/continuation.hpp"
33 #include "runtime/interfaceSupport.inline.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/timerTrace.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/copy.hpp"
39 #ifdef COMPILER2
40 #include "opto/runtime.hpp"
41 #endif
42
43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table = nullptr;
44 int UnsafeMemoryAccess::_table_length = 0;
45 int UnsafeMemoryAccess::_table_max_length = 0;
46 address UnsafeMemoryAccess::_common_exit_stub_pc = nullptr;
47
48 // Implementation of StubRoutines - for a description of how to
49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
50
51 // Define fields used to store blobs
52
53 #define DEFINE_STUBGEN_BLOB_FIELD(blob_name) \
54 BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
55
56 STUBGEN_BLOBS_DO(DEFINE_STUBGEN_BLOB_FIELD)
57
58 #undef DEFINE_STUBGEN_BLOB_FIELD
59
60 // Define fields used to store stubgen stub entries
61
62 #define DEFINE_STUBGEN_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
63 address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
64
65 #define DEFINE_STUBGEN_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
66 address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
67
68 #define DEFINE_STUBGEN_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
69 address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
70
71 STUBGEN_ENTRIES_DO(DEFINE_STUBGEN_ENTRY_FIELD, DEFINE_STUBGEN_ENTRY_FIELD_INIT, DEFINE_STUBGEN_ENTRY_FIELD_ARRAY)
72
73 #undef DEFINE_STUBGEN_ENTRY_FIELD_ARRAY
74 #undef DEFINE_STUBGEN_ENTRY_FIELD_INIT
75 #undef DEFINE_STUBGEN_ENTRY_FIELD
76
77 jint StubRoutines::_verify_oop_count = 0;
78
79
80 address StubRoutines::_string_indexof_array[4] = { nullptr };
81
82 const char* StubRoutines::get_blob_name(BlobId id) {
83 assert(StubInfo::is_stubgen(id), "not a stubgen blob %s", StubInfo::name(id));
84 return StubInfo::name(id);
85 }
86
87 const char* StubRoutines::get_stub_name(StubId id) {
88 assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
89 return StubInfo::name(id);
90 }
91
92 #ifdef ASSERT
93 // translate a stub id to an associated blob id while checking that it
94 // is a stubgen stub
95
96 BlobId StubRoutines::stub_to_blob(StubId id) {
97 assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
98 return StubInfo::blob(id);
99 }
100
101 #endif // ASSERT
102
103 // TODO: update with 8343767
104 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
105 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
106
107 // Initialization
108
109 extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data); // only interface to generators
110 void UnsafeMemoryAccess::create_table(int max_size) {
111 UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
112 UnsafeMemoryAccess::_table_max_length = max_size;
113 }
114
115 bool UnsafeMemoryAccess::contains_pc(address pc) {
116 assert(UnsafeMemoryAccess::_table != nullptr, "");
117 for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
118 UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
119 if (pc >= entry->start_pc() && pc < entry->end_pc()) {
120 return true;
121 }
122 }
123 return false;
124 }
125
126 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
127 assert(UnsafeMemoryAccess::_table != nullptr, "");
128 for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
129 UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
130 if (pc >= entry->start_pc() && pc < entry->end_pc()) {
131 return entry->error_exit_pc();
132 }
133 }
134 return nullptr;
135 }
136
137 // Used to retrieve mark regions that lie within a generated stub so
138 // they can be saved along with the stub and used to reinit the table
139 // when the stub is reloaded.
140
141 void UnsafeMemoryAccess::collect_entries(address range_start, address range_end, GrowableArray<address>& entries)
142 {
143 for (int i = 0; i < _table_length; i++) {
144 UnsafeMemoryAccess& e = _table[i];
145 assert((e._start_pc != nullptr &&
146 e._end_pc != nullptr &&
147 e._error_exit_pc != nullptr),
148 "search for entries found incomplete table entry");
149 if (e._start_pc >= range_start && e._end_pc <= range_end) {
150 assert(((e._error_exit_pc >= range_start &&
151 e._error_exit_pc <= range_end) ||
152 e._error_exit_pc == _common_exit_stub_pc),
153 "unexpected error exit pc");
154 entries.append(e._start_pc);
155 entries.append(e._end_pc);
156 // only return an exit pc when it is within the range of the stub
157 if (e._error_exit_pc != _common_exit_stub_pc) {
158 entries.append(e._error_exit_pc);
159 } else {
160 // an address outside the stub must be the common exit stub
161 // address which is marked with a null address
162 entries.append(nullptr);
163 }
164 }
165 }
166 }
167
168 static BufferBlob* initialize_stubs(BlobId blob_id,
169 int code_size, int max_aligned_stubs,
170 const char* timer_msg,
171 const char* buffer_name,
172 const char* assert_msg) {
173 assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id));
174 ResourceMark rm;
175 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
176 // If we are loading stubs we need to check if we can retrieve a
177 // blob and/or an associated archived stub descriptor from the
178 // AOTCodeCache. If we are storing stubs we need to create a blob
179 // but we still need a stub data descriptor to fill in during
180 // generation.
181 AOTStubData stub_data(blob_id);
182 AOTStubData* stub_data_p = nullptr;
183 LogTarget(Info, stubs) lt;
184
185 // we need to track and publish details of stubs in a stubgen blob
186 // when we are 1) using stubs from the cache 2) dumping stubs to the
187 // cache 3) generating stubs that may be needed by other cache
188 // elements.
189
190 if (stub_data.is_open()) {
191 stub_data_p = &stub_data;
192 }
193 if (code_size > 0 && stub_data.is_using()) {
194 // try to load the blob and details of its stubs from cache. if
195 // that fails we will still generate all necessary stubs
196 if (stub_data.load_code_blob()) {
197 if (lt.is_enabled()) {
198 LogStream ls(lt);
199 ls.print_cr("Found blob %s in AOT cache", StubInfo::name(blob_id));
200 }
201 }
202 }
203
204 // Even if we managed to load a blob from the AOT cache we still
205 // need to allocate a code blob and associated buffer. The AOT blob
206 // may not include all the stubs we need for this runtime.
207
208 // Add extra space for large CodeEntryAlignment
209 int size = code_size + CodeEntryAlignment * max_aligned_stubs;
210 BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
211 if (stubs_code == nullptr) {
212 // The compiler blob may be created late by a C2 compiler thread
213 // rather than during normal initialization by the initial thread.
214 // In that case we can tolerate an allocation failure because the
215 // compiler will have been shut down and we have no need of the
216 // blob.
217 // TODO: Ideally we would still like to try to use any AOT cached
218 // blob here but we don't have a fallback if we find that it is
219 // missing stubs we need so for now we exit. This should only
220 // happen in cases where we have a very small code cache.
221 if (Thread::current()->is_Compiler_thread()) {
222 assert(blob_id == BlobId::stubgen_compiler_id, "sanity");
223 assert(DelayCompilerStubsGeneration, "sanity");
224 log_warning(stubs)("%s\t not generated:\t no space left in CodeCache", buffer_name);
225 return nullptr;
226 }
227 vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
228 }
229 CodeBuffer buffer(stubs_code);
230 short buffer_locs[20];
231 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
232 sizeof(buffer_locs)/sizeof(relocInfo));
233 StubGenerator_generate(&buffer, blob_id, stub_data_p);
234 if (code_size == 0) {
235 assert(buffer.insts_size() == 0, "should not write into buffer when bob size declared as 0");
236 if (lt.is_enabled()) {
237 LogStream ls(lt);
238 ls.print_cr("%s\t not generated", buffer_name);
239 }
240 return nullptr;
241 }
242 // When new stubs added we need to make sure there is some space left
243 // to catch situation when we should increase size again.
244 assert(buffer.insts_remaining() > 200,
245 "increase %s, code_size: %d, used: %d, free: %d",
246 assert_msg, code_size, buffer.total_content_size(), buffer.insts_remaining());
247
248 if (stub_data.is_dumping()) {
249 // save the blob and publish the entry addresses
250 if (stub_data.store_code_blob(*stubs_code, &buffer)) {
251 if (lt.is_enabled()) {
252 LogStream ls(lt);
253 ls.print_cr("Stored blob '%s' to Startup Code Cache", buffer_name);
254 }
255 } else {
256 if (lt.is_enabled()) {
257 LogStream ls(lt);
258 ls.print_cr("Failed to store blob '%s' to Startup Code Cache", buffer_name);
259 }
260 }
261 } else if (stub_data.is_open()) {
262 // we either loaded some entries or generated new entries so
263 // publish all entries
264 //
265 // TODO - ensure we publish collect and publish the preuniverse
266 // stubs but don't try to save them
267 AOTCodeCache::publish_stub_addresses(*stubs_code, blob_id, &stub_data);
268 if (lt.is_enabled()) {
269 LogStream ls(lt);
270 ls.print_cr("Republished entries for blob '%s'", buffer_name);
271 }
272 }
273
274 // close off recording of any further stubgen generation
275 if (blob_id == BlobId::stubgen_final_id) {
276 AOTCodeCache::set_stubgen_stubs_complete();
277 }
278
279 if (lt.is_enabled()) {
280 LogStream ls(lt);
281 ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
282 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
283 buffer.total_content_size(), buffer.insts_remaining());
284 }
285
286 return stubs_code;
287 }
288
289 // per blob initializer methods StubRoutines::initialize_xxx_stubs()
290
291 #define DEFINE_BLOB_INIT_METHOD(blob_name) \
292 void StubRoutines::initialize_ ## blob_name ## _stubs() { \
293 if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) { \
294 BlobId blob_id = BlobId:: JOIN3(stubgen, blob_name, id); \
295 int size = _ ## blob_name ## _code_size; \
296 int max_aligned_stubs = StubInfo::stub_count(blob_id); \
297 const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
298 const char* name = "StubRoutines (" # blob_name " stubs)"; \
299 const char* assert_msg = "_" # blob_name "_code_size"; \
300 STUBGEN_BLOB_FIELD_NAME(blob_name) = \
301 initialize_stubs(blob_id, size, max_aligned_stubs, timer_msg, \
302 name, assert_msg); \
303 } \
304 }
305
306
307 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
308
309 #undef DEFINE_BLOB_INIT_METHOD
310
311 // external driver API functions for per blob init: xxx_stubs_init()
312
313 #define DEFINE_BLOB_INIT_FUNCTION(blob_name) \
314 void blob_name ## _stubs_init() { \
315 StubRoutines::initialize_ ## blob_name ## _stubs(); \
316 }
317
318 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
319
320 #undef DEFINE_BLOB_INIT_FUNCTION
321
322
323 #if INCLUDE_CDS
324 // non-generated external API init driver function
325
326 void stubs_AOTAddressTable_init() { StubRoutines::init_AOTAddressTable(); }
327 #endif // INCLUDE_CDS
328
329 /*
330 * we generate the underlying driver function compiler_stubs_init()
331 * but this wrapper is needed to perform special handling depending on
332 * where the compiler init gets called from. it ought to be possible
333 * to remove this at some point and have a determinate ordered init.
334 */
335
336 void compiler_stubs_init(bool in_compiler_thread) {
337 if (in_compiler_thread && DelayCompilerStubsGeneration) {
338 // Temporarily revert state of stubs generation because
339 // it is called after final_stubs_init() finished
340 // during compiler runtime initialization.
341 // It is fine because these stubs are only used by
342 // compiled code and compiler is not running yet.
343 StubCodeDesc::unfreeze();
344 StubRoutines::initialize_compiler_stubs();
345 StubCodeDesc::freeze();
346 } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
347 StubRoutines::initialize_compiler_stubs();
348 }
349 }
350
351 //
352 // Default versions of arraycopy functions
353 //
354
355 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
356 #ifndef PRODUCT
357 SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
358 #endif // !PRODUCT
359 Copy::conjoint_jbytes_atomic(src, dest, count);
360 JRT_END
361
362 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
363 #ifndef PRODUCT
364 SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
365 #endif // !PRODUCT
366 Copy::conjoint_jshorts_atomic(src, dest, count);
367 JRT_END
368
369 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
370 #ifndef PRODUCT
371 SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
372 #endif // !PRODUCT
373 Copy::conjoint_jints_atomic(src, dest, count);
374 JRT_END
375
376 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
377 #ifndef PRODUCT
378 SharedRuntime::_jlong_array_copy_ctr++; // Slow-path long/double array copy
379 #endif // !PRODUCT
380 Copy::conjoint_jlongs_atomic(src, dest, count);
381 JRT_END
382
383 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
384 #ifndef PRODUCT
385 SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
386 #endif // !PRODUCT
387 assert(count != 0, "count should be non-zero");
388 ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
389 JRT_END
390
391 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
392 #ifndef PRODUCT
393 SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
394 #endif // !PRODUCT
395 assert(count != 0, "count should be non-zero");
396 ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
397 JRT_END
398
399 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
400 #ifndef PRODUCT
401 SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
402 #endif // !PRODUCT
403 Copy::arrayof_conjoint_jbytes(src, dest, count);
404 JRT_END
405
406 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
407 #ifndef PRODUCT
408 SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
409 #endif // !PRODUCT
410 Copy::arrayof_conjoint_jshorts(src, dest, count);
411 JRT_END
412
413 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
414 #ifndef PRODUCT
415 SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
416 #endif // !PRODUCT
417 Copy::arrayof_conjoint_jints(src, dest, count);
418 JRT_END
419
420 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
421 #ifndef PRODUCT
422 SharedRuntime::_jlong_array_copy_ctr++; // Slow-path int/float array copy
423 #endif // !PRODUCT
424 Copy::arrayof_conjoint_jlongs(src, dest, count);
425 JRT_END
426
427 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
428 #ifndef PRODUCT
429 SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
430 #endif // !PRODUCT
431 assert(count != 0, "count should be non-zero");
432 ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
433 JRT_END
434
435 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
436 #ifndef PRODUCT
437 SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
438 #endif // !PRODUCT
439 assert(count != 0, "count should be non-zero");
440 ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
441 JRT_END
442
443 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
444 #define RETURN_STUB(xxx_fill) { \
445 name = #xxx_fill; \
446 return StubRoutines::xxx_fill(); }
447
448 switch (t) {
449 case T_BYTE:
450 case T_BOOLEAN:
451 if (!aligned) RETURN_STUB(jbyte_fill);
452 RETURN_STUB(arrayof_jbyte_fill);
453 case T_CHAR:
454 case T_SHORT:
455 if (!aligned) RETURN_STUB(jshort_fill);
456 RETURN_STUB(arrayof_jshort_fill);
457 case T_INT:
458 case T_FLOAT:
459 if (!aligned) RETURN_STUB(jint_fill);
460 RETURN_STUB(arrayof_jint_fill);
461 case T_DOUBLE:
462 case T_LONG:
463 case T_ARRAY:
464 case T_OBJECT:
465 case T_NARROWOOP:
466 case T_NARROWKLASS:
467 case T_ADDRESS:
468 case T_VOID:
469 // Currently unsupported
470 return nullptr;
471
472 default:
473 ShouldNotReachHere();
474 return nullptr;
475 }
476
477 #undef RETURN_STUB
478 }
479
480 // constants for computing the copy function
481 enum {
482 COPYFUNC_UNALIGNED = 0,
483 COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize
484 COPYFUNC_CONJOINT = 0,
485 COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend
486 };
487
488 // Note: The condition "disjoint" applies also for overlapping copies
489 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
490 address
491 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
492 int selector =
493 (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) +
494 (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
495
496 #define RETURN_STUB(xxx_arraycopy) { \
497 name = #xxx_arraycopy; \
498 return StubRoutines::xxx_arraycopy(); }
499
500 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
501 name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
502 return StubRoutines::xxx_arraycopy(parm); }
503
504 switch (t) {
505 case T_BYTE:
506 case T_BOOLEAN:
507 switch (selector) {
508 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy);
509 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy);
510 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy);
511 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
512 }
513 case T_CHAR:
514 case T_SHORT:
515 switch (selector) {
516 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy);
517 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy);
518 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy);
519 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
520 }
521 case T_INT:
522 case T_FLOAT:
523 switch (selector) {
524 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy);
525 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy);
526 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy);
527 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy);
528 }
529 case T_DOUBLE:
530 case T_LONG:
531 switch (selector) {
532 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy);
533 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy);
534 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy);
535 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
536 }
537 case T_ARRAY:
538 case T_OBJECT:
539 switch (selector) {
540 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
541 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
542 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
543 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
544 }
545 default:
546 ShouldNotReachHere();
547 return nullptr;
548 }
549
550 #undef RETURN_STUB
551 #undef RETURN_STUB_PARM
552 }
553
554 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
555 _cgen = cgen;
556 _ucm_entry = nullptr;
557 if (add_entry) {
558 address err_exit_pc = nullptr;
559 if (!continue_at_scope_end) {
560 err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
561 }
562 assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
563 _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
564 }
565 }
566
567 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
568 if (_ucm_entry != nullptr) {
569 _ucm_entry->set_end_pc(_cgen->assembler()->pc());
570 if (_ucm_entry->error_exit_pc() == nullptr) {
571 _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
572 }
573 }
574 }