1 /*
  2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "runtime/stubRoutines.hpp"
 36 #include "runtime/timerTrace.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // Define fields used to store blobs
 52 
 53 #define DEFINE_STUBGEN_BLOB_FIELD(blob_name)                            \
 54   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 55 
 56 STUBGEN_BLOBS_DO(DEFINE_STUBGEN_BLOB_FIELD)
 57 
 58 #undef DEFINE_STUBGEN_BLOB_FIELD
 59 
 60 // Define fields used to store stubgen stub entries
 61 
 62 #define DEFINE_STUBGEN_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 63   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 64 
 65 #define DEFINE_STUBGEN_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 66   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 67 
 68 #define DEFINE_STUBGEN_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 69   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 70 
 71 STUBGEN_ENTRIES_DO(DEFINE_STUBGEN_ENTRY_FIELD, DEFINE_STUBGEN_ENTRY_FIELD_INIT, DEFINE_STUBGEN_ENTRY_FIELD_ARRAY)
 72 
 73 #undef DEFINE_STUBGEN_ENTRY_FIELD_ARRAY
 74 #undef DEFINE_STUBGEN_ENTRY_FIELD_INIT
 75 #undef DEFINE_STUBGEN_ENTRY_FIELD
 76 
 77 jint    StubRoutines::_verify_oop_count                         = 0;
 78 
 79 
 80 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
 81 
 82 const char* StubRoutines::get_blob_name(BlobId id) {
 83   assert(StubInfo::is_stubgen(id), "not a stubgen blob %s", StubInfo::name(id));
 84   return StubInfo::name(id);
 85 }
 86 
 87 const char* StubRoutines::get_stub_name(StubId id) {
 88   assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
 89   return StubInfo::name(id);
 90 }
 91 
 92 #ifdef ASSERT
 93 // translate a stub id to an associated blob id while checking that it
 94 // is a stubgen stub
 95 
 96 BlobId StubRoutines::stub_to_blob(StubId id) {
 97   assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id));
 98   return StubInfo::blob(id);
 99 }
100 
101 #endif // ASSERT
102 
103 // TODO: update with 8343767
104 address StubRoutines::_load_inline_type_fields_in_regs = nullptr;
105 address StubRoutines::_store_inline_type_fields_to_buf = nullptr;
106 
107 // Initialization
108 
109 extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id); // only interface to generators
110 
111 void UnsafeMemoryAccess::create_table(int max_size) {
112   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
113   UnsafeMemoryAccess::_table_max_length = max_size;
114 }
115 
116 bool UnsafeMemoryAccess::contains_pc(address pc) {
117   assert(UnsafeMemoryAccess::_table != nullptr, "");
118   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
119     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
120     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
121       return true;
122     }
123   }
124   return false;
125 }
126 
127 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
128   assert(UnsafeMemoryAccess::_table != nullptr, "");
129   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
130     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
131     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
132       return entry->error_exit_pc();
133     }
134   }
135   return nullptr;
136 }
137 
138 // Used to retrieve mark regions that lie within a generated stub so
139 // they can be saved along with the stub and used to reinit the table
140 // when the stub is reloaded.
141 
142 void UnsafeMemoryAccess::collect_entries(address range_start, address range_end, GrowableArray<address>& entries)
143 {
144   for (int i = 0; i < _table_length; i++) {
145     UnsafeMemoryAccess& e = _table[i];
146     assert((e._start_pc != nullptr &&
147             e._end_pc != nullptr &&
148             e._error_exit_pc != nullptr),
149            "search for entries found incomplete table entry");
150     if (e._start_pc >= range_start && e._end_pc <= range_end) {
151       assert(((e._error_exit_pc >= range_start &&
152                e._error_exit_pc <= range_end) ||
153               e._error_exit_pc == _common_exit_stub_pc),
154              "unexpected error exit pc");
155       entries.append(e._start_pc);
156       entries.append(e._end_pc);
157       // only return an exit pc when it is within the range of the stub
158       if (e._error_exit_pc != _common_exit_stub_pc) {
159         entries.append(e._error_exit_pc);
160       } else {
161         // an address outside the stub must be the common exit stub address
162         entries.append(nullptr);
163       }
164     }
165   }
166 }
167 
168 static BufferBlob* initialize_stubs(BlobId blob_id,
169                                     int code_size, int max_aligned_stubs,
170                                     const char* timer_msg,
171                                     const char* buffer_name,
172                                     const char* assert_msg) {
173   assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id));
174   ResourceMark rm;
175   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
176   // Add extra space for large CodeEntryAlignment
177   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
178   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
179   if (stubs_code == nullptr) {
180     // The compiler blob may be created late by a C2 compiler thread
181     // rather than during normal initialization by the initial thread.
182     // In that case we can tolerate an allocation failure because the
183     // compiler will have been shut down and we have no need of the
184     // blob.
185     if (Thread::current()->is_Compiler_thread()) {
186       assert(blob_id == BlobId::stubgen_compiler_id, "sanity");
187       assert(DelayCompilerStubsGeneration, "sanity");
188       log_warning(stubs)("%s\t not generated:\t no space left in CodeCache", buffer_name);
189       return nullptr;
190     }
191     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
192   }
193   CodeBuffer buffer(stubs_code);
194   StubGenerator_generate(&buffer, blob_id);
195   if (code_size == 0) {
196     assert(buffer.insts_size() == 0, "should not write into buffer when bob size declared as 0");
197     LogTarget(Info, stubs) lt;
198     if (lt.is_enabled()) {
199       LogStream ls(lt);
200       ls.print_cr("%s\t not generated", buffer_name);
201     }
202     return nullptr;
203   }
204   // When new stubs added we need to make sure there is some space left
205   // to catch situation when we should increase size again.
206   assert(buffer.insts_remaining() > 200,
207          "increase %s, code_size: %d, used: %d, free: %d",
208          assert_msg, code_size, buffer.total_content_size(), buffer.insts_remaining());
209 
210   LogTarget(Info, stubs) lt;
211   if (lt.is_enabled()) {
212     LogStream ls(lt);
213     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
214                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
215                 buffer.total_content_size(), buffer.insts_remaining());
216   }
217 
218   return stubs_code;
219 }
220 
221 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
222   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
223     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
224       BlobId blob_id = BlobId:: JOIN3(stubgen, blob_name, id);          \
225       int size = _ ## blob_name ## _code_size;                          \
226       int max_aligned_stubs = StubInfo::stub_count(blob_id);            \
227       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
228       const char* name = "StubRoutines (" # blob_name " stubs)";        \
229       const char* assert_msg = "_" # blob_name "_code_size";            \
230       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
231         initialize_stubs(blob_id, size, max_aligned_stubs, timer_msg,   \
232                          name, assert_msg);                             \
233     }                                                                   \
234   }
235 
236 
237 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
238 
239 #undef DEFINE_BLOB_INIT_METHOD
240 
241 
242 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
243   void blob_name ## _stubs_init()  {                    \
244     StubRoutines::initialize_ ## blob_name ## _stubs(); \
245   }
246 
247 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
248 
249 #undef DEFINE_BLOB_INIT_FUNCTION
250 
251 /*
252  * we generate the underlying driver method but this wrapper is needed
253  * to perform special handling depending on where the compiler init
254  * gets called from. it ought to be possible to remove this at some
255  * point and have a determinate ordered init.
256  */
257 
258 void compiler_stubs_init(bool in_compiler_thread) {
259   if (in_compiler_thread && DelayCompilerStubsGeneration) {
260     // Temporarily revert state of stubs generation because
261     // it is called after final_stubs_init() finished
262     // during compiler runtime initialization.
263     // It is fine because these stubs are only used by
264     // compiled code and compiler is not running yet.
265     StubCodeDesc::unfreeze();
266     StubRoutines::initialize_compiler_stubs();
267     StubCodeDesc::freeze();
268   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
269     StubRoutines::initialize_compiler_stubs();
270   }
271 }
272 
273 //
274 // Default versions of arraycopy functions
275 //
276 
277 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
278 #ifndef PRODUCT
279   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
280 #endif // !PRODUCT
281   Copy::conjoint_jbytes_atomic(src, dest, count);
282 JRT_END
283 
284 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
285 #ifndef PRODUCT
286   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
287 #endif // !PRODUCT
288   Copy::conjoint_jshorts_atomic(src, dest, count);
289 JRT_END
290 
291 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
292 #ifndef PRODUCT
293   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
294 #endif // !PRODUCT
295   Copy::conjoint_jints_atomic(src, dest, count);
296 JRT_END
297 
298 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
299 #ifndef PRODUCT
300   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
301 #endif // !PRODUCT
302   Copy::conjoint_jlongs_atomic(src, dest, count);
303 JRT_END
304 
305 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
306 #ifndef PRODUCT
307   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
308 #endif // !PRODUCT
309   assert(count != 0, "count should be non-zero");
310   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
311 JRT_END
312 
313 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
314 #ifndef PRODUCT
315   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
316 #endif // !PRODUCT
317   assert(count != 0, "count should be non-zero");
318   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
319 JRT_END
320 
321 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
322 #ifndef PRODUCT
323   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
324 #endif // !PRODUCT
325   Copy::arrayof_conjoint_jbytes(src, dest, count);
326 JRT_END
327 
328 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
329 #ifndef PRODUCT
330   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
331 #endif // !PRODUCT
332   Copy::arrayof_conjoint_jshorts(src, dest, count);
333 JRT_END
334 
335 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
336 #ifndef PRODUCT
337   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
338 #endif // !PRODUCT
339   Copy::arrayof_conjoint_jints(src, dest, count);
340 JRT_END
341 
342 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
343 #ifndef PRODUCT
344   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
345 #endif // !PRODUCT
346   Copy::arrayof_conjoint_jlongs(src, dest, count);
347 JRT_END
348 
349 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
350 #ifndef PRODUCT
351   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
352 #endif // !PRODUCT
353   assert(count != 0, "count should be non-zero");
354   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
355 JRT_END
356 
357 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
358 #ifndef PRODUCT
359   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
360 #endif // !PRODUCT
361   assert(count != 0, "count should be non-zero");
362   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
363 JRT_END
364 
365 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
366 #define RETURN_STUB(xxx_fill) { \
367   name = #xxx_fill; \
368   return StubRoutines::xxx_fill(); }
369 
370   switch (t) {
371   case T_BYTE:
372   case T_BOOLEAN:
373     if (!aligned) RETURN_STUB(jbyte_fill);
374     RETURN_STUB(arrayof_jbyte_fill);
375   case T_CHAR:
376   case T_SHORT:
377     if (!aligned) RETURN_STUB(jshort_fill);
378     RETURN_STUB(arrayof_jshort_fill);
379   case T_INT:
380   case T_FLOAT:
381     if (!aligned) RETURN_STUB(jint_fill);
382     RETURN_STUB(arrayof_jint_fill);
383   case T_DOUBLE:
384   case T_LONG:
385   case T_ARRAY:
386   case T_OBJECT:
387   case T_NARROWOOP:
388   case T_NARROWKLASS:
389   case T_ADDRESS:
390   case T_VOID:
391     // Currently unsupported
392     return nullptr;
393 
394   default:
395     ShouldNotReachHere();
396     return nullptr;
397   }
398 
399 #undef RETURN_STUB
400 }
401 
402 // constants for computing the copy function
403 enum {
404   COPYFUNC_UNALIGNED = 0,
405   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
406   COPYFUNC_CONJOINT = 0,
407   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
408 };
409 
410 // Note:  The condition "disjoint" applies also for overlapping copies
411 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
412 address
413 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
414   int selector =
415     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
416     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
417 
418 #define RETURN_STUB(xxx_arraycopy) { \
419   name = #xxx_arraycopy; \
420   return StubRoutines::xxx_arraycopy(); }
421 
422 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
423   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
424   return StubRoutines::xxx_arraycopy(parm); }
425 
426   switch (t) {
427   case T_BYTE:
428   case T_BOOLEAN:
429     switch (selector) {
430     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
431     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
432     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
433     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
434     }
435   case T_CHAR:
436   case T_SHORT:
437     switch (selector) {
438     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
439     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
440     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
441     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
442     }
443   case T_INT:
444   case T_FLOAT:
445     switch (selector) {
446     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
447     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
448     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
449     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
450     }
451   case T_DOUBLE:
452   case T_LONG:
453     switch (selector) {
454     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
455     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
456     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
457     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
458     }
459   case T_ARRAY:
460   case T_OBJECT:
461     switch (selector) {
462     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
463     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
464     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
465     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
466     }
467   default:
468     ShouldNotReachHere();
469     return nullptr;
470   }
471 
472 #undef RETURN_STUB
473 #undef RETURN_STUB_PARM
474 }
475 
476 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
477   _cgen = cgen;
478   _ucm_entry = nullptr;
479   if (add_entry) {
480     address err_exit_pc = nullptr;
481     if (!continue_at_scope_end) {
482       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
483     }
484     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
485     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
486   }
487 }
488 
489 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
490   if (_ucm_entry != nullptr) {
491     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
492     if (_ucm_entry->error_exit_pc() == nullptr) {
493       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
494     }
495   }
496 }