1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/timerTrace.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // define arrays to hold stub and blob names
 52 
 53 // use a template to generate the initializer for the blob names array
 54 
 55 #define DEFINE_BLOB_NAME(blob_name)             \
 56   # blob_name,
 57 
 58 const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = {
 59   STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME)
 60 };
 61 
 62 #undef DEFINE_BLOB_NAME
 63 
 64 #define DEFINE_STUB_NAME(blob_name, stub_name)          \
 65   # stub_name ,                                         \
 66 
 67 // use a template to generate the initializer for the stub names array
 68 const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = {
 69   STUBGEN_STUBS_DO(DEFINE_STUB_NAME)
 70 };
 71 
 72 #undef DEFINE_STUB_NAME
 73 
 74 // Define fields used to store blobs
 75 
 76 #define DEFINE_BLOB_FIELD(blob_name) \
 77   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 78 
 79 STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD)
 80 
 81 #undef DEFINE_BLOB_FIELD
 82 
 83 // Define fields used to store stub entries
 84 
 85 #define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 86   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 87 
 88 #define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 89   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 90 
 91 #define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 92   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 93 
 94 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY)
 95 
 96 #undef DEFINE_ENTRY_FIELD_ARRAY
 97 #undef DEFINE_ENTRY_FIELD_INIT
 98 #undef DEFINE_ENTRY_FIELD
 99 
100 jint    StubRoutines::_verify_oop_count                         = 0;
101 
102 
103 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
104 
105 const char* StubRoutines::get_blob_name(StubGenBlobId id) {
106   assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id");
107   return _blob_names[id];
108 }
109 
110 const char* StubRoutines::get_stub_name(StubGenStubId id) {
111   assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id");
112   return _stub_names[id];
113 }
114 
115 #ifdef ASSERT
116 
117 // array holding start and end indices for stub ids associated with a
118 // given blob. Given a blob with id (StubGenBlobId) blob_id for any
119 // stub with id (StubGenStubId) stub_id declared within the blob:
120 // _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1]
121 
122 static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1];
123 
124 // macro used to compute blob limits
125 #define BLOB_COUNT(blob_name)                                           \
126   counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name;  \
127   _blob_limits[++index] = counter;                                      \
128 
129 // macro that checks stubs are associated with the correct blobs
130 #define STUB_VERIFY(blob_name, stub_name)                               \
131   localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \
132   globalStubId = (int) (StubGenStubId:: stub_name ## _id);              \
133   blobId = (int) (StubGenBlobId:: blob_name ## _id);                    \
134   assert((globalStubId >= _blob_limits[blobId] &&                       \
135           globalStubId < _blob_limits[blobId+1]),                       \
136          "stub " # stub_name " uses incorrect blob name " # blob_name); \
137   assert(globalStubId == _blob_limits[blobId] + localStubId,            \
138          "stub " # stub_name " id found at wrong offset!");             \
139 
140 bool verifyStubIds() {
141   // first compute the blob limits
142   int counter = 0;
143   int index = 0;
144   // populate offsets table with cumulative total of local enum counts
145   STUBGEN_BLOBS_DO(BLOB_COUNT);
146 
147   // ensure 1) global stub ids lie in the range of the associated blob
148   // and 2) each blob's base + local stub id == global stub id
149   int globalStubId, blobId, localStubId;
150   STUBGEN_STUBS_DO(STUB_VERIFY);
151   return true;
152 }
153 
154 #undef BLOB_COUNT
155 #undef STUB_VERIFY
156 
157 // ensure we verify the blob ids when this compile unit is first entered
158 bool _verified_stub_ids = verifyStubIds();
159 
160 
161 // macro used by stub to blob translation
162 
163 #define BLOB_CHECK_OFFSET(blob_name)                                \
164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 
183 // TODO: update with 8343767
184 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
185 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
186 
187 
188 // Initialization
189 
190 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
191 
192 void UnsafeMemoryAccess::create_table(int max_size) {
193   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
194   UnsafeMemoryAccess::_table_max_length = max_size;
195 }
196 
197 bool UnsafeMemoryAccess::contains_pc(address pc) {
198   assert(UnsafeMemoryAccess::_table != nullptr, "");
199   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
200     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
201     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
202       return true;
203     }
204   }
205   return false;
206 }
207 
208 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
209   assert(UnsafeMemoryAccess::_table != nullptr, "");
210   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
211     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
212     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
213       return entry->error_exit_pc();
214     }
215   }
216   return nullptr;
217 }
218 
219 
220 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
221                                     int code_size, int max_aligned_stubs,
222                                     const char* timer_msg,
223                                     const char* buffer_name,
224                                     const char* assert_msg) {
225   ResourceMark rm;
226   if (code_size == 0) {
227     LogTarget(Info, stubs) lt;
228     if (lt.is_enabled()) {
229       LogStream ls(lt);
230       ls.print_cr("%s\t not generated", buffer_name);
231       return nullptr;
232     }
233   }
234   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
235   // Add extra space for large CodeEntryAlignment
236   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
237   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
238   if (stubs_code == nullptr) {
239     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
240   }
241   CodeBuffer buffer(stubs_code);
242   StubGenerator_generate(&buffer, blob_id);
243   // When new stubs added we need to make sure there is some space left
244   // to catch situation when we should increase size again.
245   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
246 
247   LogTarget(Info, stubs) lt;
248   if (lt.is_enabled()) {
249     LogStream ls(lt);
250     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
251                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
252                 buffer.total_content_size(), buffer.insts_remaining());
253   }
254   return stubs_code;
255 }
256 
257 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
258   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
259     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
260       StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name);  \
261       int size = _ ## blob_name ## _code_size;                          \
262       int max_aligned_size = 10;                                        \
263       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
264       const char* name = "StubRoutines (" # blob_name "stubs)";         \
265       const char* assert_msg = "_" # blob_name "_code_size";            \
266       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
267         initialize_stubs(blob_id, size, max_aligned_size, timer_msg,    \
268                          name, assert_msg);                             \
269     }                                                                   \
270   }
271 
272 
273 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
274 
275 #undef DEFINE_BLOB_INIT_METHOD
276 
277 
278 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
279 void blob_name ## _stubs_init()  {                      \
280   StubRoutines::initialize_ ## blob_name ## _stubs();   \
281 }
282 
283 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
284 
285 #undef DEFINE_BLOB_INIT_FUNCTION
286 
287 /*
288  * we generate the underlying driver method but this wrapper is needed
289  * to perform special handling depending on where the compiler init
290  * gets called from. it ought to be possible to remove this at some
291  * point and have adeterminate ordered init.
292  */
293 
294 void compiler_stubs_init(bool in_compiler_thread) {
295   if (in_compiler_thread && DelayCompilerStubsGeneration) {
296     // Temporarily revert state of stubs generation because
297     // it is called after final_stubs_init() finished
298     // during compiler runtime initialization.
299     // It is fine because these stubs are only used by
300     // compiled code and compiler is not running yet.
301     StubCodeDesc::unfreeze();
302     StubRoutines::initialize_compiler_stubs();
303     StubCodeDesc::freeze();
304   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
305     StubRoutines::initialize_compiler_stubs();
306   }
307 }
308 
309 
310 //
311 // Default versions of arraycopy functions
312 //
313 
314 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
315 #ifndef PRODUCT
316   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
317 #endif // !PRODUCT
318   Copy::conjoint_jbytes_atomic(src, dest, count);
319 JRT_END
320 
321 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
322 #ifndef PRODUCT
323   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
324 #endif // !PRODUCT
325   Copy::conjoint_jshorts_atomic(src, dest, count);
326 JRT_END
327 
328 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
329 #ifndef PRODUCT
330   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
331 #endif // !PRODUCT
332   Copy::conjoint_jints_atomic(src, dest, count);
333 JRT_END
334 
335 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
336 #ifndef PRODUCT
337   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
338 #endif // !PRODUCT
339   Copy::conjoint_jlongs_atomic(src, dest, count);
340 JRT_END
341 
342 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
343 #ifndef PRODUCT
344   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
345 #endif // !PRODUCT
346   assert(count != 0, "count should be non-zero");
347   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
348 JRT_END
349 
350 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
351 #ifndef PRODUCT
352   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
353 #endif // !PRODUCT
354   assert(count != 0, "count should be non-zero");
355   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
356 JRT_END
357 
358 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
359 #ifndef PRODUCT
360   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
361 #endif // !PRODUCT
362   Copy::arrayof_conjoint_jbytes(src, dest, count);
363 JRT_END
364 
365 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
366 #ifndef PRODUCT
367   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
368 #endif // !PRODUCT
369   Copy::arrayof_conjoint_jshorts(src, dest, count);
370 JRT_END
371 
372 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
373 #ifndef PRODUCT
374   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
375 #endif // !PRODUCT
376   Copy::arrayof_conjoint_jints(src, dest, count);
377 JRT_END
378 
379 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
380 #ifndef PRODUCT
381   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
382 #endif // !PRODUCT
383   Copy::arrayof_conjoint_jlongs(src, dest, count);
384 JRT_END
385 
386 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
387 #ifndef PRODUCT
388   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
389 #endif // !PRODUCT
390   assert(count != 0, "count should be non-zero");
391   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
392 JRT_END
393 
394 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
395 #ifndef PRODUCT
396   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
397 #endif // !PRODUCT
398   assert(count != 0, "count should be non-zero");
399   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
400 JRT_END
401 
402 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
403 #define RETURN_STUB(xxx_fill) { \
404   name = #xxx_fill; \
405   return StubRoutines::xxx_fill(); }
406 
407   switch (t) {
408   case T_BYTE:
409   case T_BOOLEAN:
410     if (!aligned) RETURN_STUB(jbyte_fill);
411     RETURN_STUB(arrayof_jbyte_fill);
412   case T_CHAR:
413   case T_SHORT:
414     if (!aligned) RETURN_STUB(jshort_fill);
415     RETURN_STUB(arrayof_jshort_fill);
416   case T_INT:
417   case T_FLOAT:
418     if (!aligned) RETURN_STUB(jint_fill);
419     RETURN_STUB(arrayof_jint_fill);
420   case T_DOUBLE:
421   case T_LONG:
422   case T_ARRAY:
423   case T_OBJECT:
424   case T_NARROWOOP:
425   case T_NARROWKLASS:
426   case T_ADDRESS:
427   case T_VOID:
428     // Currently unsupported
429     return nullptr;
430 
431   default:
432     ShouldNotReachHere();
433     return nullptr;
434   }
435 
436 #undef RETURN_STUB
437 }
438 
439 // constants for computing the copy function
440 enum {
441   COPYFUNC_UNALIGNED = 0,
442   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
443   COPYFUNC_CONJOINT = 0,
444   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
445 };
446 
447 // Note:  The condition "disjoint" applies also for overlapping copies
448 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
449 address
450 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
451   int selector =
452     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
453     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
454 
455 #define RETURN_STUB(xxx_arraycopy) { \
456   name = #xxx_arraycopy; \
457   return StubRoutines::xxx_arraycopy(); }
458 
459 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
460   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
461   return StubRoutines::xxx_arraycopy(parm); }
462 
463   switch (t) {
464   case T_BYTE:
465   case T_BOOLEAN:
466     switch (selector) {
467     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
468     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
469     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
470     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
471     }
472   case T_CHAR:
473   case T_SHORT:
474     switch (selector) {
475     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
476     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
477     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
478     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
479     }
480   case T_INT:
481   case T_FLOAT:
482     switch (selector) {
483     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
484     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
485     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
486     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
487     }
488   case T_DOUBLE:
489   case T_LONG:
490     switch (selector) {
491     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
492     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
493     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
494     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
495     }
496   case T_ARRAY:
497   case T_OBJECT:
498     switch (selector) {
499     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
500     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
501     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
502     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
503     }
504   default:
505     ShouldNotReachHere();
506     return nullptr;
507   }
508 
509 #undef RETURN_STUB
510 #undef RETURN_STUB_PARM
511 }
512 
513 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
514   _cgen = cgen;
515   _ucm_entry = nullptr;
516   if (add_entry) {
517     address err_exit_pc = nullptr;
518     if (!continue_at_scope_end) {
519       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
520     }
521     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
522     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
523   }
524 }
525 
526 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
527   if (_ucm_entry != nullptr) {
528     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
529     if (_ucm_entry->error_exit_pc() == nullptr) {
530       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
531     }
532   }
533 }