1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/timerTrace.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // define arrays to hold stub and blob names
 52 
 53 // use a template to generate the initializer for the blob names array
 54 
 55 #define DEFINE_BLOB_NAME(blob_name)             \
 56   # blob_name,
 57 
 58 const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = {
 59   STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME)
 60 };
 61 
 62 #undef DEFINE_BLOB_NAME
 63 
 64 #define DEFINE_STUB_NAME(blob_name, stub_name)          \
 65   # stub_name ,                                         \
 66 
 67 // use a template to generate the initializer for the stub names array
 68 const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = {
 69   STUBGEN_STUBS_DO(DEFINE_STUB_NAME)
 70 };
 71 
 72 #undef DEFINE_STUB_NAME
 73 
 74 // Define fields used to store blobs
 75 
 76 #define DEFINE_BLOB_FIELD(blob_name) \
 77   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 78 
 79 STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD)
 80 
 81 #undef DEFINE_BLOB_FIELD
 82 
 83 // Define fields used to store stub entries
 84 
 85 #define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 86   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 87 
 88 #define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 89   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 90 
 91 #define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 92   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 93 
 94 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY)
 95 
 96 #undef DEFINE_ENTRY_FIELD_ARRAY
 97 #undef DEFINE_ENTRY_FIELD_INIT
 98 #undef DEFINE_ENTRY_FIELD
 99 
100 jint    StubRoutines::_verify_oop_count                         = 0;
101 
102 
103 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
104 
105 const char* StubRoutines::get_blob_name(StubGenBlobId id) {
106   assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id");
107   return _blob_names[id];
108 }
109 
110 const char* StubRoutines::get_stub_name(StubGenStubId id) {
111   assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id");
112   return _stub_names[id];
113 }
114 
115 #ifdef ASSERT
116 
117 // array holding start and end indices for stub ids associated with a
118 // given blob. Given a blob with id (StubGenBlobId) blob_id for any
119 // stub with id (StubGenStubId) stub_id declared within the blob:
120 // _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1]
121 
122 static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1];
123 
124 // macro used to compute blob limits
125 #define BLOB_COUNT(blob_name)                                           \
126   counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name;  \
127   _blob_limits[++index] = counter;                                      \
128 
129 // macro that checks stubs are associated with the correct blobs
130 #define STUB_VERIFY(blob_name, stub_name)                               \
131   localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \
132   globalStubId = (int) (StubGenStubId:: stub_name ## _id);              \
133   blobId = (int) (StubGenBlobId:: blob_name ## _id);                    \
134   assert((globalStubId >= _blob_limits[blobId] &&                       \
135           globalStubId < _blob_limits[blobId+1]),                       \
136          "stub " # stub_name " uses incorrect blob name " # blob_name); \
137   assert(globalStubId == _blob_limits[blobId] + localStubId,            \
138          "stub " # stub_name " id found at wrong offset!");             \
139 
140 bool verifyStubIds() {
141   // first compute the blob limits
142   int counter = 0;
143   int index = 0;
144   // populate offsets table with cumulative total of local enum counts
145   STUBGEN_BLOBS_DO(BLOB_COUNT);
146 
147   // ensure 1) global stub ids lie in the range of the associated blob
148   // and 2) each blob's base + local stub id == global stub id
149   int globalStubId, blobId, localStubId;
150   STUBGEN_STUBS_DO(STUB_VERIFY);
151   return true;
152 }
153 
154 #undef BLOB_COUNT
155 #undef STUB_VERIFY
156 
157 // ensure we verify the blob ids when this compile unit is first entered
158 bool _verified_stub_ids = verifyStubIds();
159 
160 
161 // macro used by stub to blob translation
162 
163 #define BLOB_CHECK_OFFSET(blob_name)                                \
164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 
183 // Initialization
184 //
185 // Note: to break cycle with universe initialization, stubs are generated in two phases.
186 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
187 // The second phase includes all other stubs (which may depend on universe being initialized.)
188 
189 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
190 
191 void UnsafeMemoryAccess::create_table(int max_size) {
192   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
193   UnsafeMemoryAccess::_table_max_length = max_size;
194 }
195 
196 bool UnsafeMemoryAccess::contains_pc(address pc) {
197   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
198     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
199     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
200       return true;
201     }
202   }
203   return false;
204 }
205 
206 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
207   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
208     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
209     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
210       return entry->error_exit_pc();
211     }
212   }
213   return nullptr;
214 }
215 
216 
217 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
218                                     int code_size, int max_aligned_stubs,
219                                     const char* timer_msg,
220                                     const char* buffer_name,
221                                     const char* assert_msg) {
222   ResourceMark rm;
223   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
224   // Add extra space for large CodeEntryAlignment
225   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
226   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
227   if (stubs_code == nullptr) {
228     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
229   }
230   CodeBuffer buffer(stubs_code);
231   StubGenerator_generate(&buffer, blob_id);
232   // When new stubs added we need to make sure there is some space left
233   // to catch situation when we should increase size again.
234   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
235 
236   LogTarget(Info, stubs) lt;
237   if (lt.is_enabled()) {
238     LogStream ls(lt);
239     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
240                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
241                 buffer.total_content_size(), buffer.insts_remaining());
242   }
243   return stubs_code;
244 }
245 
246 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
247   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
248     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
249       StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name);  \
250       int size = _ ## blob_name ## _code_size;                          \
251       int max_aligned_size = 10;                                        \
252       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
253       const char* name = "StubRoutines (" # blob_name "stubs)";         \
254       const char* assert_msg = "_" # blob_name "_code_size";            \
255       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
256         initialize_stubs(blob_id, size, max_aligned_size, timer_msg,    \
257                          name, assert_msg);                             \
258     }                                                                   \
259   }
260 
261 
262 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
263 
264 #undef DEFINE_BLOB_INIT_METHOD
265 
266 
267 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
268 void blob_name ## _stubs_init()  {                      \
269   StubRoutines::initialize_ ## blob_name ## _stubs();   \
270 }
271 
272 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
273 
274 #undef DEFINE_BLOB_INIT_FUNCTION
275 
276 /*
277  * we generate the underlying driver method but this wrapper is needed
278  * to perform special handling depending on where the compiler init
279  * gets called from. it ought to be possible to remove this at some
280  * point and have adeterminate ordered init.
281  */
282 
283 void compiler_stubs_init(bool in_compiler_thread) {
284   if (in_compiler_thread && DelayCompilerStubsGeneration) {
285     // Temporarily revert state of stubs generation because
286     // it is called after final_stubs_init() finished
287     // during compiler runtime initialization.
288     // It is fine because these stubs are only used by
289     // compiled code and compiler is not running yet.
290     StubCodeDesc::unfreeze();
291     StubRoutines::initialize_compiler_stubs();
292     StubCodeDesc::freeze();
293   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
294     StubRoutines::initialize_compiler_stubs();
295   }
296 }
297 
298 
299 //
300 // Default versions of arraycopy functions
301 //
302 
303 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
304 #ifndef PRODUCT
305   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
306 #endif // !PRODUCT
307   Copy::conjoint_jbytes_atomic(src, dest, count);
308 JRT_END
309 
310 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
311 #ifndef PRODUCT
312   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
313 #endif // !PRODUCT
314   Copy::conjoint_jshorts_atomic(src, dest, count);
315 JRT_END
316 
317 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
318 #ifndef PRODUCT
319   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
320 #endif // !PRODUCT
321   Copy::conjoint_jints_atomic(src, dest, count);
322 JRT_END
323 
324 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
325 #ifndef PRODUCT
326   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
327 #endif // !PRODUCT
328   Copy::conjoint_jlongs_atomic(src, dest, count);
329 JRT_END
330 
331 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
332 #ifndef PRODUCT
333   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
334 #endif // !PRODUCT
335   assert(count != 0, "count should be non-zero");
336   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
337 JRT_END
338 
339 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
340 #ifndef PRODUCT
341   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
342 #endif // !PRODUCT
343   assert(count != 0, "count should be non-zero");
344   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
345 JRT_END
346 
347 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
348 #ifndef PRODUCT
349   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
350 #endif // !PRODUCT
351   Copy::arrayof_conjoint_jbytes(src, dest, count);
352 JRT_END
353 
354 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
355 #ifndef PRODUCT
356   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
357 #endif // !PRODUCT
358   Copy::arrayof_conjoint_jshorts(src, dest, count);
359 JRT_END
360 
361 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
362 #ifndef PRODUCT
363   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
364 #endif // !PRODUCT
365   Copy::arrayof_conjoint_jints(src, dest, count);
366 JRT_END
367 
368 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
369 #ifndef PRODUCT
370   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
371 #endif // !PRODUCT
372   Copy::arrayof_conjoint_jlongs(src, dest, count);
373 JRT_END
374 
375 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
376 #ifndef PRODUCT
377   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
378 #endif // !PRODUCT
379   assert(count != 0, "count should be non-zero");
380   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
381 JRT_END
382 
383 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
384 #ifndef PRODUCT
385   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
386 #endif // !PRODUCT
387   assert(count != 0, "count should be non-zero");
388   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
389 JRT_END
390 
391 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
392 #define RETURN_STUB(xxx_fill) { \
393   name = #xxx_fill; \
394   return StubRoutines::xxx_fill(); }
395 
396   switch (t) {
397   case T_BYTE:
398   case T_BOOLEAN:
399     if (!aligned) RETURN_STUB(jbyte_fill);
400     RETURN_STUB(arrayof_jbyte_fill);
401   case T_CHAR:
402   case T_SHORT:
403     if (!aligned) RETURN_STUB(jshort_fill);
404     RETURN_STUB(arrayof_jshort_fill);
405   case T_INT:
406   case T_FLOAT:
407     if (!aligned) RETURN_STUB(jint_fill);
408     RETURN_STUB(arrayof_jint_fill);
409   case T_DOUBLE:
410   case T_LONG:
411   case T_ARRAY:
412   case T_OBJECT:
413   case T_NARROWOOP:
414   case T_NARROWKLASS:
415   case T_ADDRESS:
416   case T_VOID:
417     // Currently unsupported
418     return nullptr;
419 
420   default:
421     ShouldNotReachHere();
422     return nullptr;
423   }
424 
425 #undef RETURN_STUB
426 }
427 
428 // constants for computing the copy function
429 enum {
430   COPYFUNC_UNALIGNED = 0,
431   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
432   COPYFUNC_CONJOINT = 0,
433   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
434 };
435 
436 // Note:  The condition "disjoint" applies also for overlapping copies
437 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
438 address
439 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
440   int selector =
441     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
442     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
443 
444 #define RETURN_STUB(xxx_arraycopy) { \
445   name = #xxx_arraycopy; \
446   return StubRoutines::xxx_arraycopy(); }
447 
448 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
449   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
450   return StubRoutines::xxx_arraycopy(parm); }
451 
452   switch (t) {
453   case T_BYTE:
454   case T_BOOLEAN:
455     switch (selector) {
456     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
457     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
458     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
459     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
460     }
461   case T_CHAR:
462   case T_SHORT:
463     switch (selector) {
464     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
465     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
466     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
467     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
468     }
469   case T_INT:
470   case T_FLOAT:
471     switch (selector) {
472     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
473     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
474     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
475     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
476     }
477   case T_DOUBLE:
478   case T_LONG:
479     switch (selector) {
480     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
481     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
482     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
483     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
484     }
485   case T_ARRAY:
486   case T_OBJECT:
487     switch (selector) {
488     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
489     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
490     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
491     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
492     }
493   default:
494     ShouldNotReachHere();
495     return nullptr;
496   }
497 
498 #undef RETURN_STUB
499 #undef RETURN_STUB_PARM
500 }
501 
502 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
503   _cgen = cgen;
504   _ucm_entry = nullptr;
505   if (add_entry) {
506     address err_exit_pc = nullptr;
507     if (!continue_at_scope_end) {
508       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
509     }
510     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
511     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
512   }
513 }
514 
515 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
516   if (_ucm_entry != nullptr) {
517     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
518     if (_ucm_entry->error_exit_pc() == nullptr) {
519       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
520     }
521   }
522 }