1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/timerTrace.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // define arrays to hold stub and blob names
 52 
 53 // use a template to generate the initializer for the blob names array
 54 
 55 #define DEFINE_BLOB_NAME(blob_name)             \
 56   # blob_name,
 57 
 58 const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = {
 59   STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME)
 60 };
 61 
 62 #undef DEFINE_BLOB_NAME
 63 
 64 #define DEFINE_STUB_NAME(blob_name, stub_name)          \
 65   # stub_name ,                                         \
 66 
 67 // use a template to generate the initializer for the stub names array
 68 const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = {
 69   STUBGEN_STUBS_DO(DEFINE_STUB_NAME)
 70 };
 71 
 72 #undef DEFINE_STUB_NAME
 73 
 74 // Define fields used to store blobs
 75 
 76 #define DEFINE_BLOB_FIELD(blob_name) \
 77   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 78 
 79 STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD)
 80 
 81 #undef DEFINE_BLOB_FIELD
 82 
 83 // Define fields used to store stub entries
 84 
 85 #define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 86   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 87 
 88 #define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 89   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 90 
 91 #define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 92   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 93 
 94 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY)
 95 
 96 #undef DEFINE_ENTRY_FIELD_ARRAY
 97 #undef DEFINE_ENTRY_FIELD_INIT
 98 #undef DEFINE_ENTRY_FIELD
 99 
100 jint    StubRoutines::_verify_oop_count                         = 0;
101 
102 
103 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
104 
105 const char* StubRoutines::get_blob_name(StubGenBlobId id) {
106   assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id");
107   return _blob_names[id];
108 }
109 
110 const char* StubRoutines::get_stub_name(StubGenStubId id) {
111   assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id");
112   return _stub_names[id];
113 }
114 
115 #ifdef ASSERT
116 
117 // array holding start and end indices for stub ids associated with a
118 // given blob. Given a blob with id (StubGenBlobId) blob_id for any
119 // stub with id (StubGenStubId) stub_id declared within the blob:
120 // _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1]
121 
122 static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1];
123 
124 // macro used to compute blob limits
125 #define BLOB_COUNT(blob_name)                                           \
126   counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name;  \
127   _blob_limits[++index] = counter;                                      \
128 
129 // macro that checks stubs are associated with the correct blobs
130 #define STUB_VERIFY(blob_name, stub_name)                               \
131   localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \
132   globalStubId = (int) (StubGenStubId:: stub_name ## _id);              \
133   blobId = (int) (StubGenBlobId:: blob_name ## _id);                    \
134   assert((globalStubId >= _blob_limits[blobId] &&                       \
135           globalStubId < _blob_limits[blobId+1]),                       \
136          "stub " # stub_name " uses incorrect blob name " # blob_name); \
137   assert(globalStubId == _blob_limits[blobId] + localStubId,            \
138          "stub " # stub_name " id found at wrong offset!");             \
139 
140 bool verifyStubIds() {
141   // first compute the blob limits
142   int counter = 0;
143   int index = 0;
144   // populate offsets table with cumulative total of local enum counts
145   STUBGEN_BLOBS_DO(BLOB_COUNT);
146 
147   // ensure 1) global stub ids lie in the range of the associated blob
148   // and 2) each blob's base + local stub id == global stub id
149   int globalStubId, blobId, localStubId;
150   STUBGEN_STUBS_DO(STUB_VERIFY);
151   return true;
152 }
153 
154 #undef BLOB_COUNT
155 #undef STUB_VERIFY
156 
157 // ensure we verify the blob ids when this compile unit is first entered
158 bool _verified_stub_ids = verifyStubIds();
159 
160 
161 // macro used by stub to blob translation
162 
163 #define BLOB_CHECK_OFFSET(blob_name)                                \
164   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
165   blobId = StubGenBlobId:: blob_name ## _id;                        \
166 
167 // translate a global stub id to an associated blob id based on the
168 // computed blob limits
169 
170 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
171   int id = (int)stubId;
172   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
173   // start with no blob to catch stub id == -1
174   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
175   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
176   // if we reach here we should have the last blob id
177   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
178   return blobId;
179 }
180 
181 #endif // ASSERT
182 
183 // Initialization
184 
185 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
186 
187 void UnsafeMemoryAccess::create_table(int max_size) {
188   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
189   UnsafeMemoryAccess::_table_max_length = max_size;
190 }
191 
192 bool UnsafeMemoryAccess::contains_pc(address pc) {
193   assert(UnsafeMemoryAccess::_table != nullptr, "");
194   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
195     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
196     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
197       return true;
198     }
199   }
200   return false;
201 }
202 
203 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
204   assert(UnsafeMemoryAccess::_table != nullptr, "");
205   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
206     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
207     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
208       return entry->error_exit_pc();
209     }
210   }
211   return nullptr;
212 }
213 
214 
215 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
216                                     int code_size, int max_aligned_stubs,
217                                     const char* timer_msg,
218                                     const char* buffer_name,
219                                     const char* assert_msg) {
220   ResourceMark rm;
221   if (code_size == 0) {
222     LogTarget(Info, stubs) lt;
223     if (lt.is_enabled()) {
224       LogStream ls(lt);
225       ls.print_cr("%s\t not generated", buffer_name);
226       return nullptr;
227     }
228   }
229   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
230   // Add extra space for large CodeEntryAlignment
231   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
232   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
233   if (stubs_code == nullptr) {
234     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
235   }
236   CodeBuffer buffer(stubs_code);
237   StubGenerator_generate(&buffer, blob_id);
238   // When new stubs added we need to make sure there is some space left
239   // to catch situation when we should increase size again.
240   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
241 
242   LogTarget(Info, stubs) lt;
243   if (lt.is_enabled()) {
244     LogStream ls(lt);
245     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
246                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
247                 buffer.total_content_size(), buffer.insts_remaining());
248   }
249   return stubs_code;
250 }
251 
252 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
253   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
254     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
255       StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name);  \
256       int size = _ ## blob_name ## _code_size;                          \
257       int max_aligned_size = 10;                                        \
258       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
259       const char* name = "StubRoutines (" # blob_name "stubs)";         \
260       const char* assert_msg = "_" # blob_name "_code_size";            \
261       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
262         initialize_stubs(blob_id, size, max_aligned_size, timer_msg,    \
263                          name, assert_msg);                             \
264     }                                                                   \
265   }
266 
267 
268 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
269 
270 #undef DEFINE_BLOB_INIT_METHOD
271 
272 
273 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
274 void blob_name ## _stubs_init()  {                      \
275   StubRoutines::initialize_ ## blob_name ## _stubs();   \
276 }
277 
278 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
279 
280 #undef DEFINE_BLOB_INIT_FUNCTION
281 
282 /*
283  * we generate the underlying driver method but this wrapper is needed
284  * to perform special handling depending on where the compiler init
285  * gets called from. it ought to be possible to remove this at some
286  * point and have adeterminate ordered init.
287  */
288 
289 void compiler_stubs_init(bool in_compiler_thread) {
290   if (in_compiler_thread && DelayCompilerStubsGeneration) {
291     // Temporarily revert state of stubs generation because
292     // it is called after final_stubs_init() finished
293     // during compiler runtime initialization.
294     // It is fine because these stubs are only used by
295     // compiled code and compiler is not running yet.
296     StubCodeDesc::unfreeze();
297     StubRoutines::initialize_compiler_stubs();
298     StubCodeDesc::freeze();
299   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
300     StubRoutines::initialize_compiler_stubs();
301   }
302 }
303 
304 
305 //
306 // Default versions of arraycopy functions
307 //
308 
309 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
310 #ifndef PRODUCT
311   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
312 #endif // !PRODUCT
313   Copy::conjoint_jbytes_atomic(src, dest, count);
314 JRT_END
315 
316 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
317 #ifndef PRODUCT
318   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
319 #endif // !PRODUCT
320   Copy::conjoint_jshorts_atomic(src, dest, count);
321 JRT_END
322 
323 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
324 #ifndef PRODUCT
325   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
326 #endif // !PRODUCT
327   Copy::conjoint_jints_atomic(src, dest, count);
328 JRT_END
329 
330 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
331 #ifndef PRODUCT
332   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
333 #endif // !PRODUCT
334   Copy::conjoint_jlongs_atomic(src, dest, count);
335 JRT_END
336 
337 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
338 #ifndef PRODUCT
339   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
340 #endif // !PRODUCT
341   assert(count != 0, "count should be non-zero");
342   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
343 JRT_END
344 
345 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
346 #ifndef PRODUCT
347   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
348 #endif // !PRODUCT
349   assert(count != 0, "count should be non-zero");
350   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
351 JRT_END
352 
353 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
354 #ifndef PRODUCT
355   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
356 #endif // !PRODUCT
357   Copy::arrayof_conjoint_jbytes(src, dest, count);
358 JRT_END
359 
360 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
361 #ifndef PRODUCT
362   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
363 #endif // !PRODUCT
364   Copy::arrayof_conjoint_jshorts(src, dest, count);
365 JRT_END
366 
367 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
368 #ifndef PRODUCT
369   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
370 #endif // !PRODUCT
371   Copy::arrayof_conjoint_jints(src, dest, count);
372 JRT_END
373 
374 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
375 #ifndef PRODUCT
376   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
377 #endif // !PRODUCT
378   Copy::arrayof_conjoint_jlongs(src, dest, count);
379 JRT_END
380 
381 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
382 #ifndef PRODUCT
383   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
384 #endif // !PRODUCT
385   assert(count != 0, "count should be non-zero");
386   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
387 JRT_END
388 
389 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
390 #ifndef PRODUCT
391   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
392 #endif // !PRODUCT
393   assert(count != 0, "count should be non-zero");
394   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
395 JRT_END
396 
397 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
398 #define RETURN_STUB(xxx_fill) { \
399   name = #xxx_fill; \
400   return StubRoutines::xxx_fill(); }
401 
402   switch (t) {
403   case T_BYTE:
404   case T_BOOLEAN:
405     if (!aligned) RETURN_STUB(jbyte_fill);
406     RETURN_STUB(arrayof_jbyte_fill);
407   case T_CHAR:
408   case T_SHORT:
409     if (!aligned) RETURN_STUB(jshort_fill);
410     RETURN_STUB(arrayof_jshort_fill);
411   case T_INT:
412   case T_FLOAT:
413     if (!aligned) RETURN_STUB(jint_fill);
414     RETURN_STUB(arrayof_jint_fill);
415   case T_DOUBLE:
416   case T_LONG:
417   case T_ARRAY:
418   case T_OBJECT:
419   case T_NARROWOOP:
420   case T_NARROWKLASS:
421   case T_ADDRESS:
422   case T_VOID:
423     // Currently unsupported
424     return nullptr;
425 
426   default:
427     ShouldNotReachHere();
428     return nullptr;
429   }
430 
431 #undef RETURN_STUB
432 }
433 
434 // constants for computing the copy function
435 enum {
436   COPYFUNC_UNALIGNED = 0,
437   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
438   COPYFUNC_CONJOINT = 0,
439   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
440 };
441 
442 // Note:  The condition "disjoint" applies also for overlapping copies
443 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
444 address
445 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
446   int selector =
447     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
448     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
449 
450 #define RETURN_STUB(xxx_arraycopy) { \
451   name = #xxx_arraycopy; \
452   return StubRoutines::xxx_arraycopy(); }
453 
454 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
455   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
456   return StubRoutines::xxx_arraycopy(parm); }
457 
458   switch (t) {
459   case T_BYTE:
460   case T_BOOLEAN:
461     switch (selector) {
462     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
463     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
464     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
465     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
466     }
467   case T_CHAR:
468   case T_SHORT:
469     switch (selector) {
470     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
471     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
472     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
473     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
474     }
475   case T_INT:
476   case T_FLOAT:
477     switch (selector) {
478     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
479     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
480     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
481     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
482     }
483   case T_DOUBLE:
484   case T_LONG:
485     switch (selector) {
486     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
487     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
488     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
489     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
490     }
491   case T_ARRAY:
492   case T_OBJECT:
493     switch (selector) {
494     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
495     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
496     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
497     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
498     }
499   default:
500     ShouldNotReachHere();
501     return nullptr;
502   }
503 
504 #undef RETURN_STUB
505 #undef RETURN_STUB_PARM
506 }
507 
508 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
509   _cgen = cgen;
510   _ucm_entry = nullptr;
511   if (add_entry) {
512     address err_exit_pc = nullptr;
513     if (!continue_at_scope_end) {
514       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
515     }
516     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
517     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
518   }
519 }
520 
521 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
522   if (_ucm_entry != nullptr) {
523     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
524     if (_ucm_entry->error_exit_pc() == nullptr) {
525       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
526     }
527   }
528 }