1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/timerTrace.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // define arrays to hold stub and blob names
 52 
 53 // use a template to generate the initializer for the blob names array
 54 
 55 #define DEFINE_BLOB_NAME(blob_name)             \
 56   # blob_name,
 57 
 58 const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = {
 59   STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME)
 60 };
 61 
 62 #undef DEFINE_BLOB_NAME
 63 
 64 #define DEFINE_STUB_NAME(blob_name, stub_name)          \
 65   # stub_name ,                                         \
 66 
 67 // use a template to generate the initializer for the stub names array
 68 const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = {
 69   STUBGEN_STUBS_DO(DEFINE_STUB_NAME)
 70 };
 71 
 72 #undef DEFINE_STUB_NAME
 73 
 74 // Define fields used to store blobs
 75 
 76 #define DEFINE_BLOB_FIELD(blob_name) \
 77   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 78 
 79 STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD)
 80 
 81 #undef DEFINE_BLOB_FIELD
 82 
 83 // Define fields used to store stub entries
 84 
 85 #define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 86   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 87 
 88 #define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 89   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 90 
 91 #define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 92   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 93 
 94 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY)
 95 
 96 #undef DEFINE_ENTRY_FIELD_ARRAY
 97 #undef DEFINE_ENTRY_FIELD_INIT
 98 #undef DEFINE_ENTRY_FIELD
 99 
100 jint    StubRoutines::_verify_oop_count                         = 0;
101 
102 
103 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
104 address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH] = {{nullptr}, {nullptr}};
105 address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH] = {{nullptr}, {nullptr}};
106 
107 const char* StubRoutines::get_blob_name(StubGenBlobId id) {
108   assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id");
109   return _blob_names[id];
110 }
111 
112 const char* StubRoutines::get_stub_name(StubGenStubId id) {
113   assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id");
114   return _stub_names[id];
115 }
116 
117 #ifdef ASSERT
118 
119 // array holding start and end indices for stub ids associated with a
120 // given blob. Given a blob with id (StubGenBlobId) blob_id for any
121 // stub with id (StubGenStubId) stub_id declared within the blob:
122 // _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1]
123 
124 static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1];
125 
126 // macro used to compute blob limits
127 #define BLOB_COUNT(blob_name)                                           \
128   counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name;  \
129   _blob_limits[++index] = counter;                                      \
130 
131 // macro that checks stubs are associated with the correct blobs
132 #define STUB_VERIFY(blob_name, stub_name)                               \
133   localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \
134   globalStubId = (int) (StubGenStubId:: stub_name ## _id);              \
135   blobId = (int) (StubGenBlobId:: blob_name ## _id);                    \
136   assert((globalStubId >= _blob_limits[blobId] &&                       \
137           globalStubId < _blob_limits[blobId+1]),                       \
138          "stub " # stub_name " uses incorrect blob name " # blob_name); \
139   assert(globalStubId == _blob_limits[blobId] + localStubId,            \
140          "stub " # stub_name " id found at wrong offset!");             \
141 
142 bool verifyStubIds() {
143   // first compute the blob limits
144   int counter = 0;
145   int index = 0;
146   // populate offsets table with cumulative total of local enum counts
147   STUBGEN_BLOBS_DO(BLOB_COUNT);
148 
149   // ensure 1) global stub ids lie in the range of the associated blob
150   // and 2) each blob's base + local stub id == global stub id
151   int globalStubId, blobId, localStubId;
152   STUBGEN_STUBS_DO(STUB_VERIFY);
153   return true;
154 }
155 
156 #undef BLOB_COUNT
157 #undef STUB_VERIFY
158 
159 // ensure we verify the blob ids when this compile unit is first entered
160 bool _verified_stub_ids = verifyStubIds();
161 
162 
163 // macro used by stub to blob translation
164 
165 #define BLOB_CHECK_OFFSET(blob_name)                                \
166   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
167   blobId = StubGenBlobId:: blob_name ## _id;                        \
168 
169 // translate a global stub id to an associated blob id based on the
170 // computed blob limits
171 
172 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
173   int id = (int)stubId;
174   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
175   // start with no blob to catch stub id == -1
176   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
177   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
178   // if we reach here we should have the last blob id
179   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
180   return blobId;
181 }
182 
183 #endif // ASSERT
184 
185 // TODO: update with 8343767
186 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
187 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
188 
189 
190 // Initialization
191 //
192 // Note: to break cycle with universe initialization, stubs are generated in two phases.
193 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
194 // The second phase includes all other stubs (which may depend on universe being initialized.)
195 
196 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
197 
198 void UnsafeMemoryAccess::create_table(int max_size) {
199   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
200   UnsafeMemoryAccess::_table_max_length = max_size;
201 }
202 
203 bool UnsafeMemoryAccess::contains_pc(address pc) {
204   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
205     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
206     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
207       return true;
208     }
209   }
210   return false;
211 }
212 
213 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
214   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
215     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
216     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
217       return entry->error_exit_pc();
218     }
219   }
220   return nullptr;
221 }
222 
223 
224 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
225                                     int code_size, int max_aligned_stubs,
226                                     const char* timer_msg,
227                                     const char* buffer_name,
228                                     const char* assert_msg) {
229   ResourceMark rm;
230   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
231   // Add extra space for large CodeEntryAlignment
232   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
233   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
234   if (stubs_code == nullptr) {
235     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
236   }
237   CodeBuffer buffer(stubs_code);
238   StubGenerator_generate(&buffer, blob_id);
239   // When new stubs added we need to make sure there is some space left
240   // to catch situation when we should increase size again.
241   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
242 
243   LogTarget(Info, stubs) lt;
244   if (lt.is_enabled()) {
245     LogStream ls(lt);
246     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
247                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
248                 buffer.total_content_size(), buffer.insts_remaining());
249   }
250   return stubs_code;
251 }
252 
253 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
254   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
255     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
256       StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name);  \
257       int size = _ ## blob_name ## _code_size;                          \
258       int max_aligned_size = 10;                                        \
259       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
260       const char* name = "StubRoutines (" # blob_name "stubs)";         \
261       const char* assert_msg = "_" # blob_name "_code_size";            \
262       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
263         initialize_stubs(blob_id, size, max_aligned_size, timer_msg,    \
264                          name, assert_msg);                             \
265     }                                                                   \
266   }
267 
268 
269 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
270 
271 #undef DEFINE_BLOB_INIT_METHOD
272 
273 
274 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
275 void blob_name ## _stubs_init()  {                      \
276   StubRoutines::initialize_ ## blob_name ## _stubs();   \
277 }
278 
279 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
280 
281 #undef DEFINE_BLOB_INIT_FUNCTION
282 
283 /*
284  * we generate the underlying driver method but this wrapper is needed
285  * to perform special handling depending on where the compiler init
286  * gets called from. it ought to be possible to remove this at some
287  * point and have adeterminate ordered init.
288  */
289 
290 void compiler_stubs_init(bool in_compiler_thread) {
291   if (in_compiler_thread && DelayCompilerStubsGeneration) {
292     // Temporarily revert state of stubs generation because
293     // it is called after final_stubs_init() finished
294     // during compiler runtime initialization.
295     // It is fine because these stubs are only used by
296     // compiled code and compiler is not running yet.
297     StubCodeDesc::unfreeze();
298     StubRoutines::initialize_compiler_stubs();
299     StubCodeDesc::freeze();
300   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
301     StubRoutines::initialize_compiler_stubs();
302   }
303 }
304 
305 
306 //
307 // Default versions of arraycopy functions
308 //
309 
310 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
311 #ifndef PRODUCT
312   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
313 #endif // !PRODUCT
314   Copy::conjoint_jbytes_atomic(src, dest, count);
315 JRT_END
316 
317 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
318 #ifndef PRODUCT
319   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
320 #endif // !PRODUCT
321   Copy::conjoint_jshorts_atomic(src, dest, count);
322 JRT_END
323 
324 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
325 #ifndef PRODUCT
326   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
327 #endif // !PRODUCT
328   Copy::conjoint_jints_atomic(src, dest, count);
329 JRT_END
330 
331 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
332 #ifndef PRODUCT
333   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
334 #endif // !PRODUCT
335   Copy::conjoint_jlongs_atomic(src, dest, count);
336 JRT_END
337 
338 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
339 #ifndef PRODUCT
340   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
341 #endif // !PRODUCT
342   assert(count != 0, "count should be non-zero");
343   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
344 JRT_END
345 
346 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
347 #ifndef PRODUCT
348   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
349 #endif // !PRODUCT
350   assert(count != 0, "count should be non-zero");
351   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
352 JRT_END
353 
354 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
355 #ifndef PRODUCT
356   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
357 #endif // !PRODUCT
358   Copy::arrayof_conjoint_jbytes(src, dest, count);
359 JRT_END
360 
361 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
362 #ifndef PRODUCT
363   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
364 #endif // !PRODUCT
365   Copy::arrayof_conjoint_jshorts(src, dest, count);
366 JRT_END
367 
368 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
369 #ifndef PRODUCT
370   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
371 #endif // !PRODUCT
372   Copy::arrayof_conjoint_jints(src, dest, count);
373 JRT_END
374 
375 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
376 #ifndef PRODUCT
377   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
378 #endif // !PRODUCT
379   Copy::arrayof_conjoint_jlongs(src, dest, count);
380 JRT_END
381 
382 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
383 #ifndef PRODUCT
384   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
385 #endif // !PRODUCT
386   assert(count != 0, "count should be non-zero");
387   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
388 JRT_END
389 
390 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
391 #ifndef PRODUCT
392   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
393 #endif // !PRODUCT
394   assert(count != 0, "count should be non-zero");
395   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
396 JRT_END
397 
398 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
399 #define RETURN_STUB(xxx_fill) { \
400   name = #xxx_fill; \
401   return StubRoutines::xxx_fill(); }
402 
403   switch (t) {
404   case T_BYTE:
405   case T_BOOLEAN:
406     if (!aligned) RETURN_STUB(jbyte_fill);
407     RETURN_STUB(arrayof_jbyte_fill);
408   case T_CHAR:
409   case T_SHORT:
410     if (!aligned) RETURN_STUB(jshort_fill);
411     RETURN_STUB(arrayof_jshort_fill);
412   case T_INT:
413   case T_FLOAT:
414     if (!aligned) RETURN_STUB(jint_fill);
415     RETURN_STUB(arrayof_jint_fill);
416   case T_DOUBLE:
417   case T_LONG:
418   case T_ARRAY:
419   case T_OBJECT:
420   case T_NARROWOOP:
421   case T_NARROWKLASS:
422   case T_ADDRESS:
423   case T_VOID:
424     // Currently unsupported
425     return nullptr;
426 
427   default:
428     ShouldNotReachHere();
429     return nullptr;
430   }
431 
432 #undef RETURN_STUB
433 }
434 
435 // constants for computing the copy function
436 enum {
437   COPYFUNC_UNALIGNED = 0,
438   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
439   COPYFUNC_CONJOINT = 0,
440   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
441 };
442 
443 // Note:  The condition "disjoint" applies also for overlapping copies
444 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
445 address
446 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
447   int selector =
448     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
449     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
450 
451 #define RETURN_STUB(xxx_arraycopy) { \
452   name = #xxx_arraycopy; \
453   return StubRoutines::xxx_arraycopy(); }
454 
455 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
456   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
457   return StubRoutines::xxx_arraycopy(parm); }
458 
459   switch (t) {
460   case T_BYTE:
461   case T_BOOLEAN:
462     switch (selector) {
463     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
464     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
465     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
466     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
467     }
468   case T_CHAR:
469   case T_SHORT:
470     switch (selector) {
471     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
472     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
473     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
474     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
475     }
476   case T_INT:
477   case T_FLOAT:
478     switch (selector) {
479     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
480     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
481     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
482     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
483     }
484   case T_DOUBLE:
485   case T_LONG:
486     switch (selector) {
487     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
488     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
489     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
490     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
491     }
492   case T_ARRAY:
493   case T_OBJECT:
494     switch (selector) {
495     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
496     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
497     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
498     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
499     }
500   default:
501     ShouldNotReachHere();
502     return nullptr;
503   }
504 
505 #undef RETURN_STUB
506 #undef RETURN_STUB_PARM
507 }
508 
509 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
510   _cgen = cgen;
511   _ucm_entry = nullptr;
512   if (add_entry) {
513     address err_exit_pc = nullptr;
514     if (!continue_at_scope_end) {
515       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
516     }
517     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
518     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
519   }
520 }
521 
522 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
523   if (_ucm_entry != nullptr) {
524     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
525     if (_ucm_entry->error_exit_pc() == nullptr) {
526       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
527     }
528   }
529 }