1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "runtime/stubRoutines.hpp"
 36 #include "runtime/timerTrace.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // Define fields used to store blobs
 52 
 53 #define DEFINE_STUBGEN_BLOB_FIELD(blob_name)                            \
 54   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 55 
 56 STUBGEN_BLOBS_DO(DEFINE_STUBGEN_BLOB_FIELD)
 57 
 58 #undef DEFINE_STUBGEN_BLOB_FIELD
 59 
 60 // Define fields used to store stubgen stub entries
 61 
 62 #define DEFINE_STUBGEN_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 63   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 64 
 65 #define DEFINE_STUBGEN_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 66   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 67 
 68 #define DEFINE_STUBGEN_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 69   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 70 
 71 STUBGEN_ENTRIES_DO(DEFINE_STUBGEN_ENTRY_FIELD, DEFINE_STUBGEN_ENTRY_FIELD_INIT, DEFINE_STUBGEN_ENTRY_FIELD_ARRAY)
 72 
 73 #undef DEFINE_STUBGEN_ENTRY_FIELD_ARRAY
 74 #undef DEFINE_STUBGEN_ENTRY_FIELD_INIT
 75 #undef DEFINE_STUBGEN_ENTRY_FIELD
 76 
 77 jint    StubRoutines::_verify_oop_count                         = 0;
 78 
 79 
 80 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
 81 
 82 const char* StubRoutines::get_blob_name(BlobId id) {
 83   assert(StubInfo::is_stubgen(id), "not a stubgen blob %s", StubInfo::name(id));
 84   return StubInfo::name(id);
 85 }
 86 
 87 const char* StubRoutines::get_stub_name(StubId id) {
 88   assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
 89   return StubInfo::name(id);
 90 }
 91 
 92 #ifdef ASSERT
 93 // translate a stub id to an associated blob id while checking that it
 94 // is a stubgen stub
 95 
 96 BlobId StubRoutines::stub_to_blob(StubId id) {
 97   assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
 98   return StubInfo::blob(id);
 99 }
100 
101 #endif // ASSERT
102 
103 // Initialization
104 
105 extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data); // only interface to generators
106 void UnsafeMemoryAccess::create_table(int max_size) {
107   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
108   UnsafeMemoryAccess::_table_max_length = max_size;
109 }
110 
111 bool UnsafeMemoryAccess::contains_pc(address pc) {
112   assert(UnsafeMemoryAccess::_table != nullptr, "");
113   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
114     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
115     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
116       return true;
117     }
118   }
119   return false;
120 }
121 
122 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
123   assert(UnsafeMemoryAccess::_table != nullptr, "");
124   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
125     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
126     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
127       return entry->error_exit_pc();
128     }
129   }
130   return nullptr;
131 }
132 
133 // Used to retrieve mark regions that lie within a generated stub so
134 // they can be saved along with the stub and used to reinit the table
135 // when the stub is reloaded.
136 
137 void UnsafeMemoryAccess::collect_entries(address range_start, address range_end, GrowableArray<address>& entries)
138 {
139   for (int i = 0; i < _table_length; i++) {
140     UnsafeMemoryAccess& e = _table[i];
141     assert((e._start_pc != nullptr &&
142             e._end_pc != nullptr &&
143             e._error_exit_pc != nullptr),
144            "search for entries found incomplete table entry");
145     if (e._start_pc >= range_start && e._end_pc <= range_end) {
146       assert(((e._error_exit_pc >= range_start &&
147                e._error_exit_pc <= range_end) ||
148               e._error_exit_pc == _common_exit_stub_pc),
149              "unexpected error exit pc");
150       entries.append(e._start_pc);
151       entries.append(e._end_pc);
152       // only return an exit pc when it is within the range of the stub
153       if (e._error_exit_pc != _common_exit_stub_pc) {
154         entries.append(e._error_exit_pc);
155       } else {
156         // an address outside the stub must be the common exit stub
157         // address which is marked with a null address
158         entries.append(nullptr);
159       }
160     }
161   }
162 }
163 
164 static BufferBlob* initialize_stubs(BlobId blob_id,
165                                     int code_size, int max_aligned_stubs,
166                                     const char* timer_msg,
167                                     const char* buffer_name,
168                                     const char* assert_msg) {
169   assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id));
170   ResourceMark rm;
171   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
172   // If we are loading stubs we need to check if we can retrieve a
173   // blob and/or an associated archived stub descriptor from the
174   // AOTCodeCache. If we are storing stubs we need to create a blob
175   // but we still need a stub data descriptor to fill in during
176   // generation.
177   AOTStubData stub_data(blob_id);
178   AOTStubData* stub_data_p = nullptr;
179   LogTarget(Info, stubs) lt;
180 
181   // we need to track and publish details of stubs in a stubgen blob
182   // when we are 1) using stubs from the cache 2) dumping stubs to the
183   // cache 3) generating stubs that may be needed by other cache
184   // elements.
185 
186   if (stub_data.is_open()) {
187     stub_data_p = &stub_data;
188   }
189   if (code_size > 0 && stub_data.is_using()) {
190     // try to load the blob and details of its stubs from cache. if
191     // that fails we will still generate all necessary stubs
192     if (stub_data.load_code_blob()) {
193       if (lt.is_enabled()) {
194         LogStream ls(lt);
195         ls.print_cr("Found blob %s in AOT cache", StubInfo::name(blob_id));
196       }
197     }
198   }
199 
200   // Even if we managed to load a blob from the AOT cache we still
201   // need to allocate a code blob and associated buffer. The AOT blob
202   // may not include all the stubs we need for this runtime.
203 
204   // Add extra space for large CodeEntryAlignment
205   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
206   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
207   if (stubs_code == nullptr) {
208     // The compiler blob may be created late by a C2 compiler thread
209     // rather than during normal initialization by the initial thread.
210     // In that case we can tolerate an allocation failure because the
211     // compiler will have been shut down and we have no need of the
212     // blob.
213     // TODO: Ideally we would still like to try to use any AOT cached
214     // blob here but we don't have a fallback if we find that it is
215     // missing stubs we need so for now we exit. This should only
216     // happen in cases where we have a very small code cache.
217     if (Thread::current()->is_Compiler_thread()) {
218       assert(blob_id == BlobId::stubgen_compiler_id, "sanity");
219       assert(DelayCompilerStubsGeneration, "sanity");
220       log_warning(stubs)("%s\t not generated:\t no space left in CodeCache", buffer_name);
221       return nullptr;
222     }
223     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
224   }
225   CodeBuffer buffer(stubs_code);
226   short buffer_locs[20];
227   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
228                                          sizeof(buffer_locs)/sizeof(relocInfo));
229   StubGenerator_generate(&buffer, blob_id, stub_data_p);
230   if (code_size == 0) {
231     assert(buffer.insts_size() == 0, "should not write into buffer when bob size declared as 0");
232     if (lt.is_enabled()) {
233       LogStream ls(lt);
234       ls.print_cr("%s\t not generated", buffer_name);
235     }
236     return nullptr;
237   }
238   // When new stubs added we need to make sure there is some space left
239   // to catch situation when we should increase size again.
240   assert(buffer.insts_remaining() > 200,
241          "increase %s, code_size: %d, used: %d, free: %d",
242          assert_msg, code_size, buffer.total_content_size(), buffer.insts_remaining());
243 
244   if (stub_data.is_dumping()) {
245     // save the blob and publish the entry addresses
246     if (stub_data.store_code_blob(*stubs_code, &buffer)) {
247       if (lt.is_enabled()) {
248         LogStream ls(lt);
249         ls.print_cr("Stored blob '%s' to Startup Code Cache", buffer_name);
250       }
251     } else {
252       if (lt.is_enabled()) {
253         LogStream ls(lt);
254         ls.print_cr("Failed to store blob '%s' to Startup Code Cache", buffer_name);
255       }
256     }
257   } else if (stub_data.is_open()) {
258     // we either loaded some entries or generated new entries so
259     // publish all entries
260     //
261     // TODO - ensure we publish collect and publish the preuniverse
262     // stubs but don't try to save them
263     AOTCodeCache::publish_stub_addresses(*stubs_code, blob_id, &stub_data);
264     if (lt.is_enabled()) {
265       LogStream ls(lt);
266       ls.print_cr("Republished entries for blob '%s'", buffer_name);
267     }
268   }
269 
270   // close off recording of any further stubgen generation
271   if (blob_id == BlobId::stubgen_final_id) {
272     AOTCodeCache::set_stubgen_stubs_complete();
273   }
274 
275   if (lt.is_enabled()) {
276     LogStream ls(lt);
277     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
278                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
279                 buffer.total_content_size(), buffer.insts_remaining());
280   }
281 
282   return stubs_code;
283 }
284 
285 // per blob initializer methods StubRoutines::initialize_xxx_stubs()
286 
287 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
288   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
289     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
290       BlobId blob_id = BlobId:: JOIN3(stubgen, blob_name, id);          \
291       int size = _ ## blob_name ## _code_size;                          \
292       int max_aligned_stubs = StubInfo::stub_count(blob_id);            \
293       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
294       const char* name = "StubRoutines (" # blob_name " stubs)";        \
295       const char* assert_msg = "_" # blob_name "_code_size";            \
296       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
297         initialize_stubs(blob_id, size, max_aligned_stubs, timer_msg,   \
298                          name, assert_msg);                             \
299     }                                                                   \
300   }
301 
302 
303 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
304 
305 #undef DEFINE_BLOB_INIT_METHOD
306 
307 // external driver API functions for per blob init: xxx_stubs_init()
308 
309 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
310   void blob_name ## _stubs_init()  {                    \
311     StubRoutines::initialize_ ## blob_name ## _stubs(); \
312   }
313 
314 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
315 
316 #undef DEFINE_BLOB_INIT_FUNCTION
317 
318 
319 #if INCLUDE_CDS
320 // non-generated external API init driver function
321 
322 void stubs_AOTAddressTable_init() { StubRoutines::init_AOTAddressTable(); }
323 #endif // INCLUDE_CDS
324 
325 /*
326  * we generate the underlying driver function compiler_stubs_init()
327  * but this wrapper is needed to perform special handling depending on
328  * where the compiler init gets called from. it ought to be possible
329  * to remove this at some point and have a determinate ordered init.
330  */
331 
332 void compiler_stubs_init(bool in_compiler_thread) {
333   if (in_compiler_thread && DelayCompilerStubsGeneration) {
334     // Temporarily revert state of stubs generation because
335     // it is called after final_stubs_init() finished
336     // during compiler runtime initialization.
337     // It is fine because these stubs are only used by
338     // compiled code and compiler is not running yet.
339     StubCodeDesc::unfreeze();
340     StubRoutines::initialize_compiler_stubs();
341     StubCodeDesc::freeze();
342   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
343     StubRoutines::initialize_compiler_stubs();
344   }
345 }
346 
347 //
348 // Default versions of arraycopy functions
349 //
350 
351 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
352 #ifndef PRODUCT
353   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
354 #endif // !PRODUCT
355   Copy::conjoint_jbytes_atomic(src, dest, count);
356 JRT_END
357 
358 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
359 #ifndef PRODUCT
360   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
361 #endif // !PRODUCT
362   Copy::conjoint_jshorts_atomic(src, dest, count);
363 JRT_END
364 
365 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
366 #ifndef PRODUCT
367   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
368 #endif // !PRODUCT
369   Copy::conjoint_jints_atomic(src, dest, count);
370 JRT_END
371 
372 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
373 #ifndef PRODUCT
374   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
375 #endif // !PRODUCT
376   Copy::conjoint_jlongs_atomic(src, dest, count);
377 JRT_END
378 
379 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
380 #ifndef PRODUCT
381   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
382 #endif // !PRODUCT
383   assert(count != 0, "count should be non-zero");
384   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
385 JRT_END
386 
387 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
388 #ifndef PRODUCT
389   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
390 #endif // !PRODUCT
391   assert(count != 0, "count should be non-zero");
392   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
393 JRT_END
394 
395 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
396 #ifndef PRODUCT
397   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
398 #endif // !PRODUCT
399   Copy::arrayof_conjoint_jbytes(src, dest, count);
400 JRT_END
401 
402 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
403 #ifndef PRODUCT
404   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
405 #endif // !PRODUCT
406   Copy::arrayof_conjoint_jshorts(src, dest, count);
407 JRT_END
408 
409 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
410 #ifndef PRODUCT
411   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
412 #endif // !PRODUCT
413   Copy::arrayof_conjoint_jints(src, dest, count);
414 JRT_END
415 
416 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
417 #ifndef PRODUCT
418   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
419 #endif // !PRODUCT
420   Copy::arrayof_conjoint_jlongs(src, dest, count);
421 JRT_END
422 
423 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
424 #ifndef PRODUCT
425   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
426 #endif // !PRODUCT
427   assert(count != 0, "count should be non-zero");
428   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
429 JRT_END
430 
431 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
432 #ifndef PRODUCT
433   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
434 #endif // !PRODUCT
435   assert(count != 0, "count should be non-zero");
436   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
437 JRT_END
438 
439 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
440 #define RETURN_STUB(xxx_fill) { \
441   name = #xxx_fill; \
442   return StubRoutines::xxx_fill(); }
443 
444   switch (t) {
445   case T_BYTE:
446   case T_BOOLEAN:
447     if (!aligned) RETURN_STUB(jbyte_fill);
448     RETURN_STUB(arrayof_jbyte_fill);
449   case T_CHAR:
450   case T_SHORT:
451     if (!aligned) RETURN_STUB(jshort_fill);
452     RETURN_STUB(arrayof_jshort_fill);
453   case T_INT:
454   case T_FLOAT:
455     if (!aligned) RETURN_STUB(jint_fill);
456     RETURN_STUB(arrayof_jint_fill);
457   case T_DOUBLE:
458   case T_LONG:
459   case T_ARRAY:
460   case T_OBJECT:
461   case T_NARROWOOP:
462   case T_NARROWKLASS:
463   case T_ADDRESS:
464   case T_VOID:
465     // Currently unsupported
466     return nullptr;
467 
468   default:
469     ShouldNotReachHere();
470     return nullptr;
471   }
472 
473 #undef RETURN_STUB
474 }
475 
476 // constants for computing the copy function
477 enum {
478   COPYFUNC_UNALIGNED = 0,
479   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
480   COPYFUNC_CONJOINT = 0,
481   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
482 };
483 
484 // Note:  The condition "disjoint" applies also for overlapping copies
485 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
486 address
487 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
488   int selector =
489     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
490     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
491 
492 #define RETURN_STUB(xxx_arraycopy) { \
493   name = #xxx_arraycopy; \
494   return StubRoutines::xxx_arraycopy(); }
495 
496 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
497   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
498   return StubRoutines::xxx_arraycopy(parm); }
499 
500   switch (t) {
501   case T_BYTE:
502   case T_BOOLEAN:
503     switch (selector) {
504     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
505     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
506     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
507     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
508     }
509   case T_CHAR:
510   case T_SHORT:
511     switch (selector) {
512     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
513     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
514     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
515     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
516     }
517   case T_INT:
518   case T_FLOAT:
519     switch (selector) {
520     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
521     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
522     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
523     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
524     }
525   case T_DOUBLE:
526   case T_LONG:
527     switch (selector) {
528     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
529     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
530     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
531     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
532     }
533   case T_ARRAY:
534   case T_OBJECT:
535     switch (selector) {
536     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
537     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
538     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
539     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
540     }
541   default:
542     ShouldNotReachHere();
543     return nullptr;
544   }
545 
546 #undef RETURN_STUB
547 #undef RETURN_STUB_PARM
548 }
549 
550 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
551   _cgen = cgen;
552   _ucm_entry = nullptr;
553   if (add_entry) {
554     address err_exit_pc = nullptr;
555     if (!continue_at_scope_end) {
556       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
557     }
558     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
559     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
560   }
561 }
562 
563 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
564   if (_ucm_entry != nullptr) {
565     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
566     if (_ucm_entry->error_exit_pc() == nullptr) {
567       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
568     }
569   }
570 }