1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "runtime/stubRoutines.hpp"
 36 #include "runtime/timerTrace.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // Define fields used to store blobs
 52 
 53 #define DEFINE_STUBGEN_BLOB_FIELD(blob_name)                            \
 54   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 55 
 56 STUBGEN_BLOBS_DO(DEFINE_STUBGEN_BLOB_FIELD)
 57 
 58 #undef DEFINE_STUBGEN_BLOB_FIELD
 59 
 60 // Define fields used to store stubgen stub entries
 61 
 62 #define DEFINE_STUBGEN_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 63   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 64 
 65 #define DEFINE_STUBGEN_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 66   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 67 
 68 #define DEFINE_STUBGEN_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 69   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 70 
 71 STUBGEN_ENTRIES_DO(DEFINE_STUBGEN_ENTRY_FIELD, DEFINE_STUBGEN_ENTRY_FIELD_INIT, DEFINE_STUBGEN_ENTRY_FIELD_ARRAY)
 72 
 73 #undef DEFINE_STUBGEN_ENTRY_FIELD_ARRAY
 74 #undef DEFINE_STUBGEN_ENTRY_FIELD_INIT
 75 #undef DEFINE_STUBGEN_ENTRY_FIELD
 76 
 77 jint    StubRoutines::_verify_oop_count                         = 0;
 78 
 79 
 80 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
 81 
 82 const char* StubRoutines::get_blob_name(BlobId id) {
 83   assert(StubInfo::is_stubgen(id), "not a stubgen blob %s", StubInfo::name(id));
 84   return StubInfo::name(id);
 85 }
 86 
 87 const char* StubRoutines::get_stub_name(StubId id) {
 88   assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
 89   return StubInfo::name(id);
 90 }
 91 
 92 #ifdef ASSERT
 93 // translate a stub id to an associated blob id while checking that it
 94 // is a stubgen stub
 95 
 96 BlobId StubRoutines::stub_to_blob(StubId id) {
 97   assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
 98   return StubInfo::blob(id);
 99 }
100 
101 #endif // ASSERT
102 
103 // TODO: update with 8343767
104 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
105 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
106 
107 
108 // Initialization
109 
110 extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id); // only interface to generators
111 
112 void UnsafeMemoryAccess::create_table(int max_size) {
113   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
114   UnsafeMemoryAccess::_table_max_length = max_size;
115 }
116 
117 bool UnsafeMemoryAccess::contains_pc(address pc) {
118   assert(UnsafeMemoryAccess::_table != nullptr, "");
119   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
120     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
121     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
122       return true;
123     }
124   }
125   return false;
126 }
127 
128 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
129   assert(UnsafeMemoryAccess::_table != nullptr, "");
130   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
131     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
132     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
133       return entry->error_exit_pc();
134     }
135   }
136   return nullptr;
137 }
138 
139 // Used to retrieve mark regions that lie within a generated stub so
140 // they can be saved along with the stub and used to reinit the table
141 // when the stub is reloaded.
142 
143 void UnsafeMemoryAccess::collect_entries(address range_start, address range_end, GrowableArray<address>& entries)
144 {
145   for (int i = 0; i < _table_length; i++) {
146     UnsafeMemoryAccess& e = _table[i];
147     assert((e._start_pc != nullptr &&
148             e._end_pc != nullptr &&
149             e._error_exit_pc != nullptr),
150            "search for entries found incomplete table entry");
151     if (e._start_pc >= range_start && e._end_pc <= range_end) {
152       assert(((e._error_exit_pc >= range_start &&
153                e._error_exit_pc <= range_end) ||
154               e._error_exit_pc == _common_exit_stub_pc),
155              "unexpected error exit pc");
156       entries.append(e._start_pc);
157       entries.append(e._end_pc);
158       // only return an exit pc when it is within the range of the stub
159       if (e._error_exit_pc != _common_exit_stub_pc) {
160         entries.append(e._error_exit_pc);
161       } else {
162         // an address outside the stub must be the common exit stub address
163         entries.append(nullptr);
164       }
165     }
166   }
167 }
168 
169 static BufferBlob* initialize_stubs(BlobId blob_id,
170                                     int code_size, int max_aligned_stubs,
171                                     const char* timer_msg,
172                                     const char* buffer_name,
173                                     const char* assert_msg) {
174   assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id));
175   ResourceMark rm;
176   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
177   // Add extra space for large CodeEntryAlignment
178   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
179   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
180   if (stubs_code == nullptr) {
181     // The compiler blob may be created late by a C2 compiler thread
182     // rather than during normal initialization by the initial thread.
183     // In that case we can tolerate an allocation failure because the
184     // compiler will have been shut down and we have no need of the
185     // blob.
186     if (Thread::current()->is_Compiler_thread()) {
187       assert(blob_id == BlobId::stubgen_compiler_id, "sanity");
188       assert(DelayCompilerStubsGeneration, "sanity");
189       log_warning(stubs)("%s\t not generated:\t no space left in CodeCache", buffer_name);
190       return nullptr;
191     }
192     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
193   }
194   CodeBuffer buffer(stubs_code);
195   StubGenerator_generate(&buffer, blob_id);
196   if (code_size == 0) {
197     assert(buffer.insts_size() == 0, "should not write into buffer when bob size declared as 0");
198     LogTarget(Info, stubs) lt;
199     if (lt.is_enabled()) {
200       LogStream ls(lt);
201       ls.print_cr("%s\t not generated", buffer_name);
202     }
203     return nullptr;
204   }
205   // When new stubs added we need to make sure there is some space left
206   // to catch situation when we should increase size again.
207   assert(buffer.insts_remaining() > 200,
208          "increase %s, code_size: %d, used: %d, free: %d",
209          assert_msg, code_size, buffer.total_content_size(), buffer.insts_remaining());
210 
211   LogTarget(Info, stubs) lt;
212   if (lt.is_enabled()) {
213     LogStream ls(lt);
214     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
215                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
216                 buffer.total_content_size(), buffer.insts_remaining());
217   }
218 
219   return stubs_code;
220 }
221 
222 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
223   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
224     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
225       BlobId blob_id = BlobId:: JOIN3(stubgen, blob_name, id);          \
226       int size = _ ## blob_name ## _code_size;                          \
227       int max_aligned_size = 10;                                        \
228       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
229       const char* name = "StubRoutines (" # blob_name " stubs)";        \
230       const char* assert_msg = "_" # blob_name "_code_size";            \
231       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
232         initialize_stubs(blob_id, size, max_aligned_size, timer_msg,    \
233                          name, assert_msg);                             \
234     }                                                                   \
235   }
236 
237 
238 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
239 
240 #undef DEFINE_BLOB_INIT_METHOD
241 
242 
243 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
244   void blob_name ## _stubs_init()  {                    \
245     StubRoutines::initialize_ ## blob_name ## _stubs(); \
246   }
247 
248 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
249 
250 #undef DEFINE_BLOB_INIT_FUNCTION
251 
252 /*
253  * we generate the underlying driver method but this wrapper is needed
254  * to perform special handling depending on where the compiler init
255  * gets called from. it ought to be possible to remove this at some
256  * point and have a determinate ordered init.
257  */
258 
259 void compiler_stubs_init(bool in_compiler_thread) {
260   if (in_compiler_thread && DelayCompilerStubsGeneration) {
261     // Temporarily revert state of stubs generation because
262     // it is called after final_stubs_init() finished
263     // during compiler runtime initialization.
264     // It is fine because these stubs are only used by
265     // compiled code and compiler is not running yet.
266     StubCodeDesc::unfreeze();
267     StubRoutines::initialize_compiler_stubs();
268     StubCodeDesc::freeze();
269   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
270     StubRoutines::initialize_compiler_stubs();
271   }
272 }
273 
274 //
275 // Default versions of arraycopy functions
276 //
277 
278 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
279 #ifndef PRODUCT
280   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
281 #endif // !PRODUCT
282   Copy::conjoint_jbytes_atomic(src, dest, count);
283 JRT_END
284 
285 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
286 #ifndef PRODUCT
287   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
288 #endif // !PRODUCT
289   Copy::conjoint_jshorts_atomic(src, dest, count);
290 JRT_END
291 
292 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
293 #ifndef PRODUCT
294   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
295 #endif // !PRODUCT
296   Copy::conjoint_jints_atomic(src, dest, count);
297 JRT_END
298 
299 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
300 #ifndef PRODUCT
301   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
302 #endif // !PRODUCT
303   Copy::conjoint_jlongs_atomic(src, dest, count);
304 JRT_END
305 
306 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
307 #ifndef PRODUCT
308   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
309 #endif // !PRODUCT
310   assert(count != 0, "count should be non-zero");
311   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
312 JRT_END
313 
314 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
315 #ifndef PRODUCT
316   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
317 #endif // !PRODUCT
318   assert(count != 0, "count should be non-zero");
319   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
320 JRT_END
321 
322 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
323 #ifndef PRODUCT
324   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
325 #endif // !PRODUCT
326   Copy::arrayof_conjoint_jbytes(src, dest, count);
327 JRT_END
328 
329 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
330 #ifndef PRODUCT
331   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
332 #endif // !PRODUCT
333   Copy::arrayof_conjoint_jshorts(src, dest, count);
334 JRT_END
335 
336 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
337 #ifndef PRODUCT
338   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
339 #endif // !PRODUCT
340   Copy::arrayof_conjoint_jints(src, dest, count);
341 JRT_END
342 
343 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
344 #ifndef PRODUCT
345   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
346 #endif // !PRODUCT
347   Copy::arrayof_conjoint_jlongs(src, dest, count);
348 JRT_END
349 
350 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
351 #ifndef PRODUCT
352   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
353 #endif // !PRODUCT
354   assert(count != 0, "count should be non-zero");
355   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
356 JRT_END
357 
358 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
359 #ifndef PRODUCT
360   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
361 #endif // !PRODUCT
362   assert(count != 0, "count should be non-zero");
363   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
364 JRT_END
365 
366 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
367 #define RETURN_STUB(xxx_fill) { \
368   name = #xxx_fill; \
369   return StubRoutines::xxx_fill(); }
370 
371   switch (t) {
372   case T_BYTE:
373   case T_BOOLEAN:
374     if (!aligned) RETURN_STUB(jbyte_fill);
375     RETURN_STUB(arrayof_jbyte_fill);
376   case T_CHAR:
377   case T_SHORT:
378     if (!aligned) RETURN_STUB(jshort_fill);
379     RETURN_STUB(arrayof_jshort_fill);
380   case T_INT:
381   case T_FLOAT:
382     if (!aligned) RETURN_STUB(jint_fill);
383     RETURN_STUB(arrayof_jint_fill);
384   case T_DOUBLE:
385   case T_LONG:
386   case T_ARRAY:
387   case T_OBJECT:
388   case T_NARROWOOP:
389   case T_NARROWKLASS:
390   case T_ADDRESS:
391   case T_VOID:
392     // Currently unsupported
393     return nullptr;
394 
395   default:
396     ShouldNotReachHere();
397     return nullptr;
398   }
399 
400 #undef RETURN_STUB
401 }
402 
403 // constants for computing the copy function
404 enum {
405   COPYFUNC_UNALIGNED = 0,
406   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
407   COPYFUNC_CONJOINT = 0,
408   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
409 };
410 
411 // Note:  The condition "disjoint" applies also for overlapping copies
412 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
413 address
414 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
415   int selector =
416     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
417     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
418 
419 #define RETURN_STUB(xxx_arraycopy) { \
420   name = #xxx_arraycopy; \
421   return StubRoutines::xxx_arraycopy(); }
422 
423 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
424   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
425   return StubRoutines::xxx_arraycopy(parm); }
426 
427   switch (t) {
428   case T_BYTE:
429   case T_BOOLEAN:
430     switch (selector) {
431     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
432     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
433     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
434     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
435     }
436   case T_CHAR:
437   case T_SHORT:
438     switch (selector) {
439     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
440     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
441     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
442     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
443     }
444   case T_INT:
445   case T_FLOAT:
446     switch (selector) {
447     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
448     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
449     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
450     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
451     }
452   case T_DOUBLE:
453   case T_LONG:
454     switch (selector) {
455     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
456     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
457     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
458     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
459     }
460   case T_ARRAY:
461   case T_OBJECT:
462     switch (selector) {
463     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
464     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
465     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
466     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
467     }
468   default:
469     ShouldNotReachHere();
470     return nullptr;
471   }
472 
473 #undef RETURN_STUB
474 #undef RETURN_STUB_PARM
475 }
476 
477 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
478   _cgen = cgen;
479   _ucm_entry = nullptr;
480   if (add_entry) {
481     address err_exit_pc = nullptr;
482     if (!continue_at_scope_end) {
483       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
484     }
485     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
486     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
487   }
488 }
489 
490 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
491   if (_ucm_entry != nullptr) {
492     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
493     if (_ucm_entry->error_exit_pc() == nullptr) {
494       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
495     }
496   }
497 }