1 /*
2 * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmClasses.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/nmethod.hpp"
30 #include "code/pcDesc.hpp"
31 #include "code/scopeDesc.hpp"
32 #include "code/vtableStubs.hpp"
33 #include "compiler/compilationMemoryStatistic.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/flatArrayKlass.hpp"
48 #include "oops/flatArrayOop.inline.hpp"
49 #include "oops/inlineKlass.inline.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/objArrayKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "oops/typeArrayOop.inline.hpp"
54 #include "oops/valuePayload.inline.hpp"
55 #include "opto/ad.hpp"
56 #include "opto/addnode.hpp"
57 #include "opto/callnode.hpp"
58 #include "opto/cfgnode.hpp"
59 #include "opto/graphKit.hpp"
60 #include "opto/machnode.hpp"
61 #include "opto/matcher.hpp"
62 #include "opto/memnode.hpp"
63 #include "opto/mulnode.hpp"
64 #include "opto/output.hpp"
65 #include "opto/runtime.hpp"
66 #include "opto/subnode.hpp"
67 #include "prims/jvmtiExport.hpp"
68 #include "runtime/atomicAccess.hpp"
69 #include "runtime/frame.inline.hpp"
70 #include "runtime/handles.inline.hpp"
71 #include "runtime/interfaceSupport.inline.hpp"
72 #include "runtime/javaCalls.hpp"
73 #include "runtime/mountUnmountDisabler.hpp"
74 #include "runtime/sharedRuntime.hpp"
75 #include "runtime/signature.hpp"
76 #include "runtime/stackWatermarkSet.hpp"
77 #include "runtime/synchronizer.hpp"
78 #include "runtime/threadWXSetters.inline.hpp"
79 #include "runtime/vframe.hpp"
80 #include "runtime/vframe_hp.hpp"
81 #include "runtime/vframeArray.hpp"
82 #include "utilities/copy.hpp"
83 #include "utilities/preserveException.hpp"
84
85
86 // For debugging purposes:
87 // To force FullGCALot inside a runtime function, add the following two lines
88 //
89 // Universe::release_fullgc_alot_dummy();
90 // Universe::heap()->collect();
91 //
92 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
93
94
95 #define C2_BLOB_FIELD_DEFINE(name, type) \
96 type* OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr;
97 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
98 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
99 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
100 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE)
101 #undef C2_BLOB_FIELD_DEFINE
102 #undef C2_STUB_FIELD_DEFINE
103
104 // This should be called in an assertion at the start of OptoRuntime routines
105 // which are entered from compiled code (all of them)
106 #ifdef ASSERT
107 static bool check_compiled_frame(JavaThread* thread) {
108 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
109 RegisterMap map(thread,
110 RegisterMap::UpdateMap::skip,
111 RegisterMap::ProcessFrames::include,
112 RegisterMap::WalkContinuation::skip);
113 frame caller = thread->last_frame().sender(&map);
114 assert(caller.is_compiled_frame(), "not being called from compiled like code");
115 return true;
116 }
117 #endif // ASSERT
118
119 /*
120 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
121 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
122 if (var == nullptr) { return false; }
123 */
124
125 #define GEN_C2_BLOB(name, type) \
126 BLOB_FIELD_NAME(name) = \
127 generate_ ## name ## _blob(); \
128 if (BLOB_FIELD_NAME(name) == nullptr) { return false; }
129
130 // a few helper macros to conjure up generate_stub call arguments
131 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
132 #define C2_STUB_TYPEFUNC(name) name ## _Type
133 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C)
134 #define C2_STUB_ID(name) StubId:: JOIN3(c2, name, id)
135 #define C2_STUB_NAME(name) stub_name(C2_STUB_ID(name))
136
137 // Almost all the C functions targeted from the generated stubs are
138 // implemented locally to OptoRuntime with names that can be generated
139 // from the stub name by appending suffix '_C'. However, in two cases
140 // a common target method also needs to be called from shared runtime
141 // stubs. In these two cases the opto stubs rely on method
142 // imlementations defined in class SharedRuntime. The following
143 // defines temporarily rebind the generated names to reference the
144 // relevant implementations.
145
146 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \
147 C2_STUB_FIELD_NAME(name) = \
148 generate_stub(env, \
149 C2_STUB_TYPEFUNC(name), \
150 C2_STUB_C_FUNC(name), \
151 C2_STUB_NAME(name), \
152 C2_STUB_ID(name), \
153 fancy_jump, \
154 pass_tls, \
155 pass_retpc); \
156 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \
157
158 bool OptoRuntime::generate(ciEnv* env) {
159
160 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB)
161
162 return true;
163 }
164
165 #undef GEN_C2_BLOB
166
167 #undef C2_STUB_FIELD_NAME
168 #undef C2_STUB_TYPEFUNC
169 #undef C2_STUB_C_FUNC
170 #undef C2_STUB_NAME
171 #undef GEN_C2_STUB
172
173 // #undef gen
174
175 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr;
176 const TypeFunc* OptoRuntime::_new_array_Type = nullptr;
177 const TypeFunc* OptoRuntime::_new_array_nozero_Type = nullptr;
178 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr;
179 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr;
180 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr;
181 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr;
182 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr;
183 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr;
184 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr;
185 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr;
186 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr;
187 const TypeFunc* OptoRuntime::_athrow_Type = nullptr;
188 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr;
189 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr;
190 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr;
191 const TypeFunc* OptoRuntime::_modf_Type = nullptr;
192 const TypeFunc* OptoRuntime::_l2f_Type = nullptr;
193 const TypeFunc* OptoRuntime::_void_long_Type = nullptr;
194 const TypeFunc* OptoRuntime::_void_void_Type = nullptr;
195 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr;
196 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr;
197 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr;
198 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr;
199 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr;
200 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr;
201 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr;
202 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr;
203 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr;
204 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr;
205 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr;
206 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr;
207 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr;
208 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr;
209 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr;
210 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr;
211 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr;
212 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr;
213 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr;
214 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr;
215 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr;
216 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr;
217 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr;
218 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr;
219 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr;
220 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr;
221 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr;
222 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr;
223 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr;
224 const TypeFunc* OptoRuntime::_kyberNtt_Type = nullptr;
225 const TypeFunc* OptoRuntime::_kyberInverseNtt_Type = nullptr;
226 const TypeFunc* OptoRuntime::_kyberNttMult_Type = nullptr;
227 const TypeFunc* OptoRuntime::_kyberAddPoly_2_Type = nullptr;
228 const TypeFunc* OptoRuntime::_kyberAddPoly_3_Type = nullptr;
229 const TypeFunc* OptoRuntime::_kyber12To16_Type = nullptr;
230 const TypeFunc* OptoRuntime::_kyberBarrettReduce_Type = nullptr;
231 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr;
232 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr;
233 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr;
234 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr;
235 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr;
236 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr;
237 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr;
238 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr;
239 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr;
240 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr;
241 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr;
242 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr;
243 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr;
244 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr;
245 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr;
246 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr;
247 const TypeFunc* OptoRuntime::_vthread_transition_Type = nullptr;
248 #if INCLUDE_JFR
249 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr;
250 #endif // INCLUDE_JFR
251 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr;
252 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr;
253
254 // Helper method to do generation of RunTimeStub's
255 address OptoRuntime::generate_stub(ciEnv* env,
256 TypeFunc_generator gen, address C_function,
257 const char *name, StubId stub_id,
258 int is_fancy_jump, bool pass_tls,
259 bool return_pc) {
260
261 // Matching the default directive, we currently have no method to match.
262 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
263 CompilationMemoryStatisticMark cmsm(directive);
264 ResourceMark rm;
265 Compile C(env, gen, C_function, name, stub_id, is_fancy_jump, pass_tls, return_pc, directive);
266 DirectivesStack::release(directive);
267 return C.stub_entry_point();
268 }
269
270 const char* OptoRuntime::stub_name(address entry) {
271 #ifndef PRODUCT
272 CodeBlob* cb = CodeCache::find_blob(entry);
273 RuntimeStub* rs =(RuntimeStub *)cb;
274 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
275 return rs->name();
276 #else
277 // Fast implementation for product mode (maybe it should be inlined too)
278 return "runtime stub";
279 #endif
280 }
281
282 // local methods passed as arguments to stub generator that forward
283 // control to corresponding JRT methods of SharedRuntime
284
285 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
286 oopDesc* dest, jint dest_pos,
287 jint length, JavaThread* thread) {
288 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
289 }
290
291 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
292 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
293 }
294
295
296 //=============================================================================
297 // Opto compiler runtime routines
298 //=============================================================================
299
300
301 //=============================allocation======================================
302 // We failed the fast-path allocation. Now we need to do a scavenge or GC
303 // and try allocation again.
304
305 // object allocation
306 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
307 JRT_BLOCK;
308 #ifndef PRODUCT
309 SharedRuntime::_new_instance_ctr++; // new instance requires GC
310 #endif
311 assert(check_compiled_frame(current), "incorrect caller");
312
313 // These checks are cheap to make and support reflective allocation.
314 int lh = klass->layout_helper();
315 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
316 Handle holder(current, klass->klass_holder()); // keep the klass alive
317 klass->check_valid_for_instantiation(false, THREAD);
318 if (!HAS_PENDING_EXCEPTION) {
319 InstanceKlass::cast(klass)->initialize(THREAD);
320 }
321 }
322
323 if (!HAS_PENDING_EXCEPTION) {
324 // Scavenge and allocate an instance.
325 Handle holder(current, klass->klass_holder()); // keep the klass alive
326 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
327 current->set_vm_result_oop(result);
328
329 // Pass oops back through thread local storage. Our apparent type to Java
330 // is that we return an oop, but we can block on exit from this routine and
331 // a GC can trash the oop in C's return register. The generated stub will
332 // fetch the oop from TLS after any possible GC.
333 }
334
335 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
336 JRT_BLOCK_END;
337
338 // inform GC that we won't do card marks for initializing writes.
339 SharedRuntime::on_slowpath_allocation_exit(current);
340 JRT_END
341
342
343 // array allocation
344 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, oopDesc* init_val, JavaThread* current))
345 JRT_BLOCK;
346 #ifndef PRODUCT
347 SharedRuntime::_new_array_ctr++; // new array requires GC
348 #endif
349 assert(check_compiled_frame(current), "incorrect caller");
350
351 // Scavenge and allocate an instance.
352 oop result;
353 Handle h_init_val(current, init_val); // keep the init_val object alive
354
355 if (array_type->is_typeArray_klass()) {
356 // The oopFactory likes to work with the element type.
357 // (We could bypass the oopFactory, since it doesn't add much value.)
358 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
359 result = oopFactory::new_typeArray(elem_type, len, THREAD);
360 } else {
361 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
362 ObjArrayKlass* oak = ObjArrayKlass::cast(array_type);
363 result = oopFactory::new_objArray(oak->element_klass(), len, oak->properties(), THREAD);
364 if (!HAS_PENDING_EXCEPTION && array_type->is_null_free_array_klass() && !h_init_val.is_null()) {
365 // Null-free arrays need to be initialized
366 #ifdef ASSERT
367 ObjArrayKlass* result_oak = ObjArrayKlass::cast(result->klass());
368 assert(result_oak->is_null_free_array_klass(), "Sanity check");
369 #endif
370 for (int i = 0; i < len; i++) {
371 ((objArrayOop)result)->obj_at_put(i, h_init_val());
372 }
373 }
374 }
375
376 // Pass oops back through thread local storage. Our apparent type to Java
377 // is that we return an oop, but we can block on exit from this routine and
378 // a GC can trash the oop in C's return register. The generated stub will
379 // fetch the oop from TLS after any possible GC.
380 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
381 current->set_vm_result_oop(result);
382 JRT_BLOCK_END;
383
384 // inform GC that we won't do card marks for initializing writes.
385 SharedRuntime::on_slowpath_allocation_exit(current);
386 JRT_END
387
388 // array allocation without zeroing
389 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
390 JRT_BLOCK;
391 #ifndef PRODUCT
392 SharedRuntime::_new_array_ctr++; // new array requires GC
393 #endif
394 assert(check_compiled_frame(current), "incorrect caller");
395
396 // Scavenge and allocate an instance.
397 oop result;
398
399 assert(array_type->is_typeArray_klass(), "should be called only for type array");
400 // The oopFactory likes to work with the element type.
401 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
402 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
403
404 // Pass oops back through thread local storage. Our apparent type to Java
405 // is that we return an oop, but we can block on exit from this routine and
406 // a GC can trash the oop in C's return register. The generated stub will
407 // fetch the oop from TLS after any possible GC.
408 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
409 current->set_vm_result_oop(result);
410 JRT_BLOCK_END;
411
412
413 // inform GC that we won't do card marks for initializing writes.
414 SharedRuntime::on_slowpath_allocation_exit(current);
415
416 oop result = current->vm_result_oop();
417 if ((len > 0) && (result != nullptr) &&
418 is_deoptimized_caller_frame(current)) {
419 // Zero array here if the caller is deoptimized.
420 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result);
421 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
422 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
423 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
424 HeapWord* obj = cast_from_oop<HeapWord*>(result);
425 if (!is_aligned(hs_bytes, BytesPerLong)) {
426 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
427 hs_bytes += BytesPerInt;
428 }
429
430 // Optimized zeroing.
431 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
432 const size_t aligned_hs = hs_bytes / BytesPerLong;
433 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
434 }
435
436 JRT_END
437
438 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
439
440 // multianewarray for 2 dimensions
441 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
442 #ifndef PRODUCT
443 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
444 #endif
445 assert(check_compiled_frame(current), "incorrect caller");
446 assert(elem_type->is_klass(), "not a class");
447 jint dims[2];
448 dims[0] = len1;
449 dims[1] = len2;
450 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
451 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
452 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
453 current->set_vm_result_oop(obj);
454 JRT_END
455
456 // multianewarray for 3 dimensions
457 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
458 #ifndef PRODUCT
459 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
460 #endif
461 assert(check_compiled_frame(current), "incorrect caller");
462 assert(elem_type->is_klass(), "not a class");
463 jint dims[3];
464 dims[0] = len1;
465 dims[1] = len2;
466 dims[2] = len3;
467 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
468 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
469 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
470 current->set_vm_result_oop(obj);
471 JRT_END
472
473 // multianewarray for 4 dimensions
474 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
475 #ifndef PRODUCT
476 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
477 #endif
478 assert(check_compiled_frame(current), "incorrect caller");
479 assert(elem_type->is_klass(), "not a class");
480 jint dims[4];
481 dims[0] = len1;
482 dims[1] = len2;
483 dims[2] = len3;
484 dims[3] = len4;
485 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
486 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
487 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
488 current->set_vm_result_oop(obj);
489 JRT_END
490
491 // multianewarray for 5 dimensions
492 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
493 #ifndef PRODUCT
494 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
495 #endif
496 assert(check_compiled_frame(current), "incorrect caller");
497 assert(elem_type->is_klass(), "not a class");
498 jint dims[5];
499 dims[0] = len1;
500 dims[1] = len2;
501 dims[2] = len3;
502 dims[3] = len4;
503 dims[4] = len5;
504 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
505 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
506 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
507 current->set_vm_result_oop(obj);
508 JRT_END
509
510 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
511 assert(check_compiled_frame(current), "incorrect caller");
512 assert(elem_type->is_klass(), "not a class");
513 assert(oop(dims)->is_typeArray(), "not an array");
514
515 ResourceMark rm;
516 jint len = dims->length();
517 assert(len > 0, "Dimensions array should contain data");
518 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
519 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
520 c_dims, len);
521
522 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
523 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
524 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
525 current->set_vm_result_oop(obj);
526 JRT_END
527
528 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
529
530 // Very few notify/notifyAll operations find any threads on the waitset, so
531 // the dominant fast-path is to simply return.
532 // Relatedly, it's critical that notify/notifyAll be fast in order to
533 // reduce lock hold times.
534 if (!SafepointSynchronize::is_synchronizing()) {
535 if (ObjectSynchronizer::quick_notify(obj, current, false)) {
536 return;
537 }
538 }
539
540 // This is the case the fast-path above isn't provisioned to handle.
541 // The fast-path is designed to handle frequently arising cases in an efficient manner.
542 // (The fast-path is just a degenerate variant of the slow-path).
543 // Perform the dreaded state transition and pass control into the slow-path.
544 JRT_BLOCK;
545 Handle h_obj(current, obj);
546 ObjectSynchronizer::notify(h_obj, CHECK);
547 JRT_BLOCK_END;
548 JRT_END
549
550 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
551
552 if (!SafepointSynchronize::is_synchronizing() ) {
553 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
554 return;
555 }
556 }
557
558 // This is the case the fast-path above isn't provisioned to handle.
559 // The fast-path is designed to handle frequently arising cases in an efficient manner.
560 // (The fast-path is just a degenerate variant of the slow-path).
561 // Perform the dreaded state transition and pass control into the slow-path.
562 JRT_BLOCK;
563 Handle h_obj(current, obj);
564 ObjectSynchronizer::notifyall(h_obj, CHECK);
565 JRT_BLOCK_END;
566 JRT_END
567
568 JRT_ENTRY(void, OptoRuntime::vthread_end_first_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
569 MountUnmountDisabler::end_transition(current, vt, true /*is_mount*/, true /*is_thread_start*/);
570 JRT_END
571
572 JRT_ENTRY(void, OptoRuntime::vthread_start_final_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
573 java_lang_Thread::set_is_in_vthread_transition(vt, false);
574 current->set_is_in_vthread_transition(false);
575 MountUnmountDisabler::start_transition(current, vt, false /*is_mount */, true /*is_thread_end*/);
576 JRT_END
577
578 JRT_ENTRY(void, OptoRuntime::vthread_start_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
579 java_lang_Thread::set_is_in_vthread_transition(vt, false);
580 current->set_is_in_vthread_transition(false);
581 MountUnmountDisabler::start_transition(current, vt, is_mount, false /*is_thread_end*/);
582 JRT_END
583
584 JRT_ENTRY(void, OptoRuntime::vthread_end_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
585 MountUnmountDisabler::end_transition(current, vt, is_mount, false /*is_thread_start*/);
586 JRT_END
587
588 static const TypeFunc* make_new_instance_Type() {
589 // create input type (domain)
590 const Type **fields = TypeTuple::fields(1);
591 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
592 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
593
594 // create result type (range)
595 fields = TypeTuple::fields(1);
596 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
597
598 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
599
600 return TypeFunc::make(domain, range);
601 }
602
603 static const TypeFunc* make_vthread_transition_Type() {
604 // create input type (domain)
605 const Type **fields = TypeTuple::fields(2);
606 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
607 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
608 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
609
610 // no result type needed
611 fields = TypeTuple::fields(1);
612 fields[TypeFunc::Parms+0] = nullptr; // void
613 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
614
615 return TypeFunc::make(domain,range);
616 }
617
618 static const TypeFunc* make_athrow_Type() {
619 // create input type (domain)
620 const Type **fields = TypeTuple::fields(1);
621 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
622 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
623
624 // create result type (range)
625 fields = TypeTuple::fields(0);
626
627 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
628
629 return TypeFunc::make(domain, range);
630 }
631
632 static const TypeFunc* make_new_array_Type() {
633 // create input type (domain)
634 const Type **fields = TypeTuple::fields(3);
635 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
636 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
637 fields[TypeFunc::Parms+2] = TypeInstPtr::NOTNULL; // init value
638 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
639
640 // create result type (range)
641 fields = TypeTuple::fields(1);
642 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
643
644 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
645
646 return TypeFunc::make(domain, range);
647 }
648
649 static const TypeFunc* make_new_array_nozero_Type() {
650 // create input type (domain)
651 const Type **fields = TypeTuple::fields(2);
652 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
653 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
654 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
655
656 // create result type (range)
657 fields = TypeTuple::fields(1);
658 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
659
660 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
661
662 return TypeFunc::make(domain, range);
663 }
664
665 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) {
666 // create input type (domain)
667 const int nargs = ndim + 1;
668 const Type **fields = TypeTuple::fields(nargs);
669 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
670 for( int i = 1; i < nargs; i++ )
671 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
672 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
673
674 // create result type (range)
675 fields = TypeTuple::fields(1);
676 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
677 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
678
679 return TypeFunc::make(domain, range);
680 }
681
682 static const TypeFunc* make_multianewarrayN_Type() {
683 // create input type (domain)
684 const Type **fields = TypeTuple::fields(2);
685 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
686 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
687 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
688
689 // create result type (range)
690 fields = TypeTuple::fields(1);
691 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
692 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
693
694 return TypeFunc::make(domain, range);
695 }
696
697 static const TypeFunc* make_uncommon_trap_Type() {
698 // create input type (domain)
699 const Type **fields = TypeTuple::fields(1);
700 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
701 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
702
703 // create result type (range)
704 fields = TypeTuple::fields(0);
705 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
706
707 return TypeFunc::make(domain, range);
708 }
709
710 //-----------------------------------------------------------------------------
711 // Monitor Handling
712
713 static const TypeFunc* make_complete_monitor_enter_Type() {
714 // create input type (domain)
715 const Type **fields = TypeTuple::fields(2);
716 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
717 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
718 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
719
720 // create result type (range)
721 fields = TypeTuple::fields(0);
722
723 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
724
725 return TypeFunc::make(domain, range);
726 }
727
728 //-----------------------------------------------------------------------------
729
730 static const TypeFunc* make_complete_monitor_exit_Type() {
731 // create input type (domain)
732 const Type **fields = TypeTuple::fields(3);
733 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
734 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
735 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
736 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
737
738 // create result type (range)
739 fields = TypeTuple::fields(0);
740
741 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
742
743 return TypeFunc::make(domain, range);
744 }
745
746 static const TypeFunc* make_monitor_notify_Type() {
747 // create input type (domain)
748 const Type **fields = TypeTuple::fields(1);
749 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
750 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
751
752 // create result type (range)
753 fields = TypeTuple::fields(0);
754 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
755 return TypeFunc::make(domain, range);
756 }
757
758 static const TypeFunc* make_flush_windows_Type() {
759 // create input type (domain)
760 const Type** fields = TypeTuple::fields(1);
761 fields[TypeFunc::Parms+0] = nullptr; // void
762 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
763
764 // create result type
765 fields = TypeTuple::fields(1);
766 fields[TypeFunc::Parms+0] = nullptr; // void
767 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
768
769 return TypeFunc::make(domain, range);
770 }
771
772 static const TypeFunc* make_l2f_Type() {
773 // create input type (domain)
774 const Type **fields = TypeTuple::fields(2);
775 fields[TypeFunc::Parms+0] = TypeLong::LONG;
776 fields[TypeFunc::Parms+1] = Type::HALF;
777 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
778
779 // create result type (range)
780 fields = TypeTuple::fields(1);
781 fields[TypeFunc::Parms+0] = Type::FLOAT;
782 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
783
784 return TypeFunc::make(domain, range);
785 }
786
787 static const TypeFunc* make_modf_Type() {
788 const Type **fields = TypeTuple::fields(2);
789 fields[TypeFunc::Parms+0] = Type::FLOAT;
790 fields[TypeFunc::Parms+1] = Type::FLOAT;
791 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
792
793 // create result type (range)
794 fields = TypeTuple::fields(1);
795 fields[TypeFunc::Parms+0] = Type::FLOAT;
796
797 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
798
799 return TypeFunc::make(domain, range);
800 }
801
802 static const TypeFunc* make_Math_D_D_Type() {
803 // create input type (domain)
804 const Type **fields = TypeTuple::fields(2);
805 // Symbol* name of class to be loaded
806 fields[TypeFunc::Parms+0] = Type::DOUBLE;
807 fields[TypeFunc::Parms+1] = Type::HALF;
808 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
809
810 // create result type (range)
811 fields = TypeTuple::fields(2);
812 fields[TypeFunc::Parms+0] = Type::DOUBLE;
813 fields[TypeFunc::Parms+1] = Type::HALF;
814 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
815
816 return TypeFunc::make(domain, range);
817 }
818
819 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) {
820 // create input type (domain)
821 const Type **fields = TypeTuple::fields(num_arg);
822 // Symbol* name of class to be loaded
823 assert(num_arg > 0, "must have at least 1 input");
824 for (uint i = 0; i < num_arg; i++) {
825 fields[TypeFunc::Parms+i] = in_type;
826 }
827 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields);
828
829 // create result type (range)
830 const uint num_ret = 1;
831 fields = TypeTuple::fields(num_ret);
832 fields[TypeFunc::Parms+0] = out_type;
833 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields);
834
835 return TypeFunc::make(domain, range);
836 }
837
838 static const TypeFunc* make_Math_DD_D_Type() {
839 const Type **fields = TypeTuple::fields(4);
840 fields[TypeFunc::Parms+0] = Type::DOUBLE;
841 fields[TypeFunc::Parms+1] = Type::HALF;
842 fields[TypeFunc::Parms+2] = Type::DOUBLE;
843 fields[TypeFunc::Parms+3] = Type::HALF;
844 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
845
846 // create result type (range)
847 fields = TypeTuple::fields(2);
848 fields[TypeFunc::Parms+0] = Type::DOUBLE;
849 fields[TypeFunc::Parms+1] = Type::HALF;
850 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
851
852 return TypeFunc::make(domain, range);
853 }
854
855 //-------------- currentTimeMillis, currentTimeNanos, etc
856
857 static const TypeFunc* make_void_long_Type() {
858 // create input type (domain)
859 const Type **fields = TypeTuple::fields(0);
860 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
861
862 // create result type (range)
863 fields = TypeTuple::fields(2);
864 fields[TypeFunc::Parms+0] = TypeLong::LONG;
865 fields[TypeFunc::Parms+1] = Type::HALF;
866 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
867
868 return TypeFunc::make(domain, range);
869 }
870
871 static const TypeFunc* make_void_void_Type() {
872 // create input type (domain)
873 const Type **fields = TypeTuple::fields(0);
874 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
875
876 // create result type (range)
877 fields = TypeTuple::fields(0);
878 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
879 return TypeFunc::make(domain, range);
880 }
881
882 static const TypeFunc* make_jfr_write_checkpoint_Type() {
883 // create input type (domain)
884 const Type **fields = TypeTuple::fields(0);
885 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
886
887 // create result type (range)
888 fields = TypeTuple::fields(1);
889 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
890 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 1, fields);
891 return TypeFunc::make(domain, range);
892 }
893
894
895 // Takes as parameters:
896 // void *dest
897 // long size
898 // uchar byte
899
900 static const TypeFunc* make_setmemory_Type() {
901 // create input type (domain)
902 int argcnt = NOT_LP64(3) LP64_ONLY(4);
903 const Type** fields = TypeTuple::fields(argcnt);
904 int argp = TypeFunc::Parms;
905 fields[argp++] = TypePtr::NOTNULL; // dest
906 fields[argp++] = TypeX_X; // size
907 LP64_ONLY(fields[argp++] = Type::HALF); // size
908 fields[argp++] = TypeInt::UBYTE; // bytevalue
909 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
910 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
911
912 // no result type needed
913 fields = TypeTuple::fields(1);
914 fields[TypeFunc::Parms+0] = nullptr; // void
915 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
916 return TypeFunc::make(domain, range);
917 }
918
919 // arraycopy stub variations:
920 enum ArrayCopyType {
921 ac_fast, // void(ptr, ptr, size_t)
922 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
923 ac_slow, // void(ptr, int, ptr, int, int)
924 ac_generic // int(ptr, int, ptr, int, int)
925 };
926
927 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
928 // create input type (domain)
929 int num_args = (act == ac_fast ? 3 : 5);
930 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
931 int argcnt = num_args;
932 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
933 const Type** fields = TypeTuple::fields(argcnt);
934 int argp = TypeFunc::Parms;
935 fields[argp++] = TypePtr::NOTNULL; // src
936 if (num_size_args == 0) {
937 fields[argp++] = TypeInt::INT; // src_pos
938 }
939 fields[argp++] = TypePtr::NOTNULL; // dest
940 if (num_size_args == 0) {
941 fields[argp++] = TypeInt::INT; // dest_pos
942 fields[argp++] = TypeInt::INT; // length
943 }
944 while (num_size_args-- > 0) {
945 fields[argp++] = TypeX_X; // size in whatevers (size_t)
946 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
947 }
948 if (act == ac_checkcast) {
949 fields[argp++] = TypePtr::NOTNULL; // super_klass
950 }
951 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
952 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
953
954 // create result type if needed
955 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
956 fields = TypeTuple::fields(1);
957 if (retcnt == 0)
958 fields[TypeFunc::Parms+0] = nullptr; // void
959 else
960 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
961 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
962 return TypeFunc::make(domain, range);
963 }
964
965 static const TypeFunc* make_array_fill_Type() {
966 const Type** fields;
967 int argp = TypeFunc::Parms;
968 // create input type (domain): pointer, int, size_t
969 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
970 fields[argp++] = TypePtr::NOTNULL;
971 fields[argp++] = TypeInt::INT;
972 fields[argp++] = TypeX_X; // size in whatevers (size_t)
973 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
974 const TypeTuple *domain = TypeTuple::make(argp, fields);
975
976 // create result type
977 fields = TypeTuple::fields(1);
978 fields[TypeFunc::Parms+0] = nullptr; // void
979 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
980
981 return TypeFunc::make(domain, range);
982 }
983
984 static const TypeFunc* make_array_partition_Type() {
985 // create input type (domain)
986 int num_args = 7;
987 int argcnt = num_args;
988 const Type** fields = TypeTuple::fields(argcnt);
989 int argp = TypeFunc::Parms;
990 fields[argp++] = TypePtr::NOTNULL; // array
991 fields[argp++] = TypeInt::INT; // element type
992 fields[argp++] = TypeInt::INT; // low
993 fields[argp++] = TypeInt::INT; // end
994 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array)
995 fields[argp++] = TypeInt::INT; // indexPivot1
996 fields[argp++] = TypeInt::INT; // indexPivot2
997 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
998 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
999
1000 // no result type needed
1001 fields = TypeTuple::fields(1);
1002 fields[TypeFunc::Parms+0] = nullptr; // void
1003 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1004 return TypeFunc::make(domain, range);
1005 }
1006
1007 static const TypeFunc* make_array_sort_Type() {
1008 // create input type (domain)
1009 int num_args = 4;
1010 int argcnt = num_args;
1011 const Type** fields = TypeTuple::fields(argcnt);
1012 int argp = TypeFunc::Parms;
1013 fields[argp++] = TypePtr::NOTNULL; // array
1014 fields[argp++] = TypeInt::INT; // element type
1015 fields[argp++] = TypeInt::INT; // fromIndex
1016 fields[argp++] = TypeInt::INT; // toIndex
1017 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1018 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1019
1020 // no result type needed
1021 fields = TypeTuple::fields(1);
1022 fields[TypeFunc::Parms+0] = nullptr; // void
1023 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1024 return TypeFunc::make(domain, range);
1025 }
1026
1027 static const TypeFunc* make_aescrypt_block_Type() {
1028 // create input type (domain)
1029 int num_args = 3;
1030 int argcnt = num_args;
1031 const Type** fields = TypeTuple::fields(argcnt);
1032 int argp = TypeFunc::Parms;
1033 fields[argp++] = TypePtr::NOTNULL; // src
1034 fields[argp++] = TypePtr::NOTNULL; // dest
1035 fields[argp++] = TypePtr::NOTNULL; // k array
1036 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1037 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1038
1039 // no result type needed
1040 fields = TypeTuple::fields(1);
1041 fields[TypeFunc::Parms+0] = nullptr; // void
1042 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1043 return TypeFunc::make(domain, range);
1044 }
1045
1046 static const TypeFunc* make_updateBytesCRC32_Type() {
1047 // create input type (domain)
1048 int num_args = 3;
1049 int argcnt = num_args;
1050 const Type** fields = TypeTuple::fields(argcnt);
1051 int argp = TypeFunc::Parms;
1052 fields[argp++] = TypeInt::INT; // crc
1053 fields[argp++] = TypePtr::NOTNULL; // src
1054 fields[argp++] = TypeInt::INT; // len
1055 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1056 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1057
1058 // result type needed
1059 fields = TypeTuple::fields(1);
1060 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1061 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1062 return TypeFunc::make(domain, range);
1063 }
1064
1065 static const TypeFunc* make_updateBytesCRC32C_Type() {
1066 // create input type (domain)
1067 int num_args = 4;
1068 int argcnt = num_args;
1069 const Type** fields = TypeTuple::fields(argcnt);
1070 int argp = TypeFunc::Parms;
1071 fields[argp++] = TypeInt::INT; // crc
1072 fields[argp++] = TypePtr::NOTNULL; // buf
1073 fields[argp++] = TypeInt::INT; // len
1074 fields[argp++] = TypePtr::NOTNULL; // table
1075 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1076 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1077
1078 // result type needed
1079 fields = TypeTuple::fields(1);
1080 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1081 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1082 return TypeFunc::make(domain, range);
1083 }
1084
1085 static const TypeFunc* make_updateBytesAdler32_Type() {
1086 // create input type (domain)
1087 int num_args = 3;
1088 int argcnt = num_args;
1089 const Type** fields = TypeTuple::fields(argcnt);
1090 int argp = TypeFunc::Parms;
1091 fields[argp++] = TypeInt::INT; // crc
1092 fields[argp++] = TypePtr::NOTNULL; // src + offset
1093 fields[argp++] = TypeInt::INT; // len
1094 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1095 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1096
1097 // result type needed
1098 fields = TypeTuple::fields(1);
1099 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1100 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1101 return TypeFunc::make(domain, range);
1102 }
1103
1104 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() {
1105 // create input type (domain)
1106 int num_args = 5;
1107 int argcnt = num_args;
1108 const Type** fields = TypeTuple::fields(argcnt);
1109 int argp = TypeFunc::Parms;
1110 fields[argp++] = TypePtr::NOTNULL; // src
1111 fields[argp++] = TypePtr::NOTNULL; // dest
1112 fields[argp++] = TypePtr::NOTNULL; // k array
1113 fields[argp++] = TypePtr::NOTNULL; // r array
1114 fields[argp++] = TypeInt::INT; // src len
1115 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1116 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1117
1118 // returning cipher len (int)
1119 fields = TypeTuple::fields(1);
1120 fields[TypeFunc::Parms+0] = TypeInt::INT;
1121 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1122 return TypeFunc::make(domain, range);
1123 }
1124
1125 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() {
1126 // create input type (domain)
1127 int num_args = 4;
1128 int argcnt = num_args;
1129 const Type** fields = TypeTuple::fields(argcnt);
1130 int argp = TypeFunc::Parms;
1131 fields[argp++] = TypePtr::NOTNULL; // src
1132 fields[argp++] = TypePtr::NOTNULL; // dest
1133 fields[argp++] = TypePtr::NOTNULL; // k array
1134 fields[argp++] = TypeInt::INT; // src len
1135 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1136 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1137
1138 // returning cipher len (int)
1139 fields = TypeTuple::fields(1);
1140 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1141 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1142 return TypeFunc::make(domain, range);
1143 }
1144
1145 static const TypeFunc* make_counterMode_aescrypt_Type() {
1146 // create input type (domain)
1147 int num_args = 7;
1148 int argcnt = num_args;
1149 const Type** fields = TypeTuple::fields(argcnt);
1150 int argp = TypeFunc::Parms;
1151 fields[argp++] = TypePtr::NOTNULL; // src
1152 fields[argp++] = TypePtr::NOTNULL; // dest
1153 fields[argp++] = TypePtr::NOTNULL; // k array
1154 fields[argp++] = TypePtr::NOTNULL; // counter array
1155 fields[argp++] = TypeInt::INT; // src len
1156 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
1157 fields[argp++] = TypePtr::NOTNULL; // saved used addr
1158 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1159 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1160 // returning cipher len (int)
1161 fields = TypeTuple::fields(1);
1162 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1163 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1164 return TypeFunc::make(domain, range);
1165 }
1166
1167 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() {
1168 // create input type (domain)
1169 int num_args = 8;
1170 int argcnt = num_args;
1171 const Type** fields = TypeTuple::fields(argcnt);
1172 int argp = TypeFunc::Parms;
1173 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs
1174 fields[argp++] = TypeInt::INT; // int len
1175 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs
1176 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs
1177 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj
1178 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj
1179 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj
1180 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj
1181
1182 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1183 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1184 // returning cipher len (int)
1185 fields = TypeTuple::fields(1);
1186 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1187 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1188 return TypeFunc::make(domain, range);
1189 }
1190
1191 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) {
1192 // create input type (domain)
1193 int num_args = is_sha3 ? 3 : 2;
1194 int argcnt = num_args;
1195 const Type** fields = TypeTuple::fields(argcnt);
1196 int argp = TypeFunc::Parms;
1197 fields[argp++] = TypePtr::NOTNULL; // buf
1198 fields[argp++] = TypePtr::NOTNULL; // state
1199 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1200 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1201 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1202
1203 // no result type needed
1204 fields = TypeTuple::fields(1);
1205 fields[TypeFunc::Parms+0] = nullptr; // void
1206 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1207 return TypeFunc::make(domain, range);
1208 }
1209
1210 /*
1211 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
1212 */
1213 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) {
1214 // create input type (domain)
1215 int num_args = is_sha3 ? 5 : 4;
1216 int argcnt = num_args;
1217 const Type** fields = TypeTuple::fields(argcnt);
1218 int argp = TypeFunc::Parms;
1219 fields[argp++] = TypePtr::NOTNULL; // buf
1220 fields[argp++] = TypePtr::NOTNULL; // state
1221 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1222 fields[argp++] = TypeInt::INT; // ofs
1223 fields[argp++] = TypeInt::INT; // limit
1224 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1225 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1226
1227 // returning ofs (int)
1228 fields = TypeTuple::fields(1);
1229 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1230 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1231 return TypeFunc::make(domain, range);
1232 }
1233
1234 // SHAKE128Parallel doubleKeccak function
1235 static const TypeFunc* make_double_keccak_Type() {
1236 int argcnt = 2;
1237
1238 const Type** fields = TypeTuple::fields(argcnt);
1239 int argp = TypeFunc::Parms;
1240 fields[argp++] = TypePtr::NOTNULL; // status0
1241 fields[argp++] = TypePtr::NOTNULL; // status1
1242
1243 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1244 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1245
1246 // result type needed
1247 fields = TypeTuple::fields(1);
1248 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1249 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1250 return TypeFunc::make(domain, range);
1251 }
1252
1253 static const TypeFunc* make_multiplyToLen_Type() {
1254 // create input type (domain)
1255 int num_args = 5;
1256 int argcnt = num_args;
1257 const Type** fields = TypeTuple::fields(argcnt);
1258 int argp = TypeFunc::Parms;
1259 fields[argp++] = TypePtr::NOTNULL; // x
1260 fields[argp++] = TypeInt::INT; // xlen
1261 fields[argp++] = TypePtr::NOTNULL; // y
1262 fields[argp++] = TypeInt::INT; // ylen
1263 fields[argp++] = TypePtr::NOTNULL; // z
1264 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1265 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1266
1267 // no result type needed
1268 fields = TypeTuple::fields(1);
1269 fields[TypeFunc::Parms+0] = nullptr;
1270 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1271 return TypeFunc::make(domain, range);
1272 }
1273
1274 static const TypeFunc* make_squareToLen_Type() {
1275 // create input type (domain)
1276 int num_args = 4;
1277 int argcnt = num_args;
1278 const Type** fields = TypeTuple::fields(argcnt);
1279 int argp = TypeFunc::Parms;
1280 fields[argp++] = TypePtr::NOTNULL; // x
1281 fields[argp++] = TypeInt::INT; // len
1282 fields[argp++] = TypePtr::NOTNULL; // z
1283 fields[argp++] = TypeInt::INT; // zlen
1284 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1285 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1286
1287 // no result type needed
1288 fields = TypeTuple::fields(1);
1289 fields[TypeFunc::Parms+0] = nullptr;
1290 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1291 return TypeFunc::make(domain, range);
1292 }
1293
1294 static const TypeFunc* make_mulAdd_Type() {
1295 // create input type (domain)
1296 int num_args = 5;
1297 int argcnt = num_args;
1298 const Type** fields = TypeTuple::fields(argcnt);
1299 int argp = TypeFunc::Parms;
1300 fields[argp++] = TypePtr::NOTNULL; // out
1301 fields[argp++] = TypePtr::NOTNULL; // in
1302 fields[argp++] = TypeInt::INT; // offset
1303 fields[argp++] = TypeInt::INT; // len
1304 fields[argp++] = TypeInt::INT; // k
1305 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1306 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1307
1308 // returning carry (int)
1309 fields = TypeTuple::fields(1);
1310 fields[TypeFunc::Parms+0] = TypeInt::INT;
1311 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1312 return TypeFunc::make(domain, range);
1313 }
1314
1315 static const TypeFunc* make_montgomeryMultiply_Type() {
1316 // create input type (domain)
1317 int num_args = 7;
1318 int argcnt = num_args;
1319 const Type** fields = TypeTuple::fields(argcnt);
1320 int argp = TypeFunc::Parms;
1321 fields[argp++] = TypePtr::NOTNULL; // a
1322 fields[argp++] = TypePtr::NOTNULL; // b
1323 fields[argp++] = TypePtr::NOTNULL; // n
1324 fields[argp++] = TypeInt::INT; // len
1325 fields[argp++] = TypeLong::LONG; // inv
1326 fields[argp++] = Type::HALF;
1327 fields[argp++] = TypePtr::NOTNULL; // result
1328 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1329 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1330
1331 // result type needed
1332 fields = TypeTuple::fields(1);
1333 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1334
1335 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1336 return TypeFunc::make(domain, range);
1337 }
1338
1339 static const TypeFunc* make_montgomerySquare_Type() {
1340 // create input type (domain)
1341 int num_args = 6;
1342 int argcnt = num_args;
1343 const Type** fields = TypeTuple::fields(argcnt);
1344 int argp = TypeFunc::Parms;
1345 fields[argp++] = TypePtr::NOTNULL; // a
1346 fields[argp++] = TypePtr::NOTNULL; // n
1347 fields[argp++] = TypeInt::INT; // len
1348 fields[argp++] = TypeLong::LONG; // inv
1349 fields[argp++] = Type::HALF;
1350 fields[argp++] = TypePtr::NOTNULL; // result
1351 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1352 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1353
1354 // result type needed
1355 fields = TypeTuple::fields(1);
1356 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1357
1358 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1359 return TypeFunc::make(domain, range);
1360 }
1361
1362 static const TypeFunc* make_bigIntegerShift_Type() {
1363 int argcnt = 5;
1364 const Type** fields = TypeTuple::fields(argcnt);
1365 int argp = TypeFunc::Parms;
1366 fields[argp++] = TypePtr::NOTNULL; // newArr
1367 fields[argp++] = TypePtr::NOTNULL; // oldArr
1368 fields[argp++] = TypeInt::INT; // newIdx
1369 fields[argp++] = TypeInt::INT; // shiftCount
1370 fields[argp++] = TypeInt::INT; // numIter
1371 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1372 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1373
1374 // no result type needed
1375 fields = TypeTuple::fields(1);
1376 fields[TypeFunc::Parms + 0] = nullptr;
1377 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1378 return TypeFunc::make(domain, range);
1379 }
1380
1381 static const TypeFunc* make_vectorizedMismatch_Type() {
1382 // create input type (domain)
1383 int num_args = 4;
1384 int argcnt = num_args;
1385 const Type** fields = TypeTuple::fields(argcnt);
1386 int argp = TypeFunc::Parms;
1387 fields[argp++] = TypePtr::NOTNULL; // obja
1388 fields[argp++] = TypePtr::NOTNULL; // objb
1389 fields[argp++] = TypeInt::INT; // length, number of elements
1390 fields[argp++] = TypeInt::INT; // log2scale, element size
1391 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1392 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1393
1394 //return mismatch index (int)
1395 fields = TypeTuple::fields(1);
1396 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1397 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1398 return TypeFunc::make(domain, range);
1399 }
1400
1401 static const TypeFunc* make_ghash_processBlocks_Type() {
1402 int argcnt = 4;
1403
1404 const Type** fields = TypeTuple::fields(argcnt);
1405 int argp = TypeFunc::Parms;
1406 fields[argp++] = TypePtr::NOTNULL; // state
1407 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1408 fields[argp++] = TypePtr::NOTNULL; // data
1409 fields[argp++] = TypeInt::INT; // blocks
1410 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1411 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1412
1413 // result type needed
1414 fields = TypeTuple::fields(1);
1415 fields[TypeFunc::Parms+0] = nullptr; // void
1416 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1417 return TypeFunc::make(domain, range);
1418 }
1419
1420 static const TypeFunc* make_chacha20Block_Type() {
1421 int argcnt = 2;
1422
1423 const Type** fields = TypeTuple::fields(argcnt);
1424 int argp = TypeFunc::Parms;
1425 fields[argp++] = TypePtr::NOTNULL; // state
1426 fields[argp++] = TypePtr::NOTNULL; // result
1427
1428 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1429 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1430
1431 // result type needed
1432 fields = TypeTuple::fields(1);
1433 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int
1434 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1435 return TypeFunc::make(domain, range);
1436 }
1437
1438 // Kyber NTT function
1439 static const TypeFunc* make_kyberNtt_Type() {
1440 int argcnt = 2;
1441
1442 const Type** fields = TypeTuple::fields(argcnt);
1443 int argp = TypeFunc::Parms;
1444 fields[argp++] = TypePtr::NOTNULL; // coeffs
1445 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1446
1447 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1448 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1449
1450 // result type needed
1451 fields = TypeTuple::fields(1);
1452 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1453 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1454 return TypeFunc::make(domain, range);
1455 }
1456
1457 // Kyber inverse NTT function
1458 static const TypeFunc* make_kyberInverseNtt_Type() {
1459 int argcnt = 2;
1460
1461 const Type** fields = TypeTuple::fields(argcnt);
1462 int argp = TypeFunc::Parms;
1463 fields[argp++] = TypePtr::NOTNULL; // coeffs
1464 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1465
1466 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1467 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1468
1469 // result type needed
1470 fields = TypeTuple::fields(1);
1471 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1472 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1473 return TypeFunc::make(domain, range);
1474 }
1475
1476 // Kyber NTT multiply function
1477 static const TypeFunc* make_kyberNttMult_Type() {
1478 int argcnt = 4;
1479
1480 const Type** fields = TypeTuple::fields(argcnt);
1481 int argp = TypeFunc::Parms;
1482 fields[argp++] = TypePtr::NOTNULL; // result
1483 fields[argp++] = TypePtr::NOTNULL; // ntta
1484 fields[argp++] = TypePtr::NOTNULL; // nttb
1485 fields[argp++] = TypePtr::NOTNULL; // NTT multiply zetas
1486
1487 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1488 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1489
1490 // result type needed
1491 fields = TypeTuple::fields(1);
1492 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1493 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1494 return TypeFunc::make(domain, range);
1495 }
1496
1497 // Kyber add 2 polynomials function
1498 static const TypeFunc* make_kyberAddPoly_2_Type() {
1499 int argcnt = 3;
1500
1501 const Type** fields = TypeTuple::fields(argcnt);
1502 int argp = TypeFunc::Parms;
1503 fields[argp++] = TypePtr::NOTNULL; // result
1504 fields[argp++] = TypePtr::NOTNULL; // a
1505 fields[argp++] = TypePtr::NOTNULL; // b
1506
1507 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1508 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1509
1510 // result type needed
1511 fields = TypeTuple::fields(1);
1512 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1513 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1514 return TypeFunc::make(domain, range);
1515 }
1516
1517
1518 // Kyber add 3 polynomials function
1519 static const TypeFunc* make_kyberAddPoly_3_Type() {
1520 int argcnt = 4;
1521
1522 const Type** fields = TypeTuple::fields(argcnt);
1523 int argp = TypeFunc::Parms;
1524 fields[argp++] = TypePtr::NOTNULL; // result
1525 fields[argp++] = TypePtr::NOTNULL; // a
1526 fields[argp++] = TypePtr::NOTNULL; // b
1527 fields[argp++] = TypePtr::NOTNULL; // c
1528
1529 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1530 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1531
1532 // result type needed
1533 fields = TypeTuple::fields(1);
1534 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1535 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1536 return TypeFunc::make(domain, range);
1537 }
1538
1539
1540 // Kyber XOF output parsing into polynomial coefficients candidates
1541 // or decompress(12,...) function
1542 static const TypeFunc* make_kyber12To16_Type() {
1543 int argcnt = 4;
1544
1545 const Type** fields = TypeTuple::fields(argcnt);
1546 int argp = TypeFunc::Parms;
1547 fields[argp++] = TypePtr::NOTNULL; // condensed
1548 fields[argp++] = TypeInt::INT; // condensedOffs
1549 fields[argp++] = TypePtr::NOTNULL; // parsed
1550 fields[argp++] = TypeInt::INT; // parsedLength
1551
1552 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1553 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1554
1555 // result type needed
1556 fields = TypeTuple::fields(1);
1557 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1558 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1559 return TypeFunc::make(domain, range);
1560 }
1561
1562 // Kyber Barrett reduce function
1563 static const TypeFunc* make_kyberBarrettReduce_Type() {
1564 int argcnt = 1;
1565
1566 const Type** fields = TypeTuple::fields(argcnt);
1567 int argp = TypeFunc::Parms;
1568 fields[argp++] = TypePtr::NOTNULL; // coeffs
1569
1570 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1571 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1572
1573 // result type needed
1574 fields = TypeTuple::fields(1);
1575 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1576 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1577 return TypeFunc::make(domain, range);
1578 }
1579
1580 // Dilithium NTT function except for the final "normalization" to |coeff| < Q
1581 static const TypeFunc* make_dilithiumAlmostNtt_Type() {
1582 int argcnt = 2;
1583
1584 const Type** fields = TypeTuple::fields(argcnt);
1585 int argp = TypeFunc::Parms;
1586 fields[argp++] = TypePtr::NOTNULL; // coeffs
1587 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1588
1589 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1590 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1591
1592 // result type needed
1593 fields = TypeTuple::fields(1);
1594 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1595 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1596 return TypeFunc::make(domain, range);
1597 }
1598
1599 // Dilithium inverse NTT function except the final mod Q division by 2^256
1600 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() {
1601 int argcnt = 2;
1602
1603 const Type** fields = TypeTuple::fields(argcnt);
1604 int argp = TypeFunc::Parms;
1605 fields[argp++] = TypePtr::NOTNULL; // coeffs
1606 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1607
1608 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1609 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1610
1611 // result type needed
1612 fields = TypeTuple::fields(1);
1613 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1614 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1615 return TypeFunc::make(domain, range);
1616 }
1617
1618 // Dilithium NTT multiply function
1619 static const TypeFunc* make_dilithiumNttMult_Type() {
1620 int argcnt = 3;
1621
1622 const Type** fields = TypeTuple::fields(argcnt);
1623 int argp = TypeFunc::Parms;
1624 fields[argp++] = TypePtr::NOTNULL; // result
1625 fields[argp++] = TypePtr::NOTNULL; // ntta
1626 fields[argp++] = TypePtr::NOTNULL; // nttb
1627
1628 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1629 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1630
1631 // result type needed
1632 fields = TypeTuple::fields(1);
1633 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1634 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1635 return TypeFunc::make(domain, range);
1636 }
1637
1638 // Dilithium Montgomery multiply a polynome coefficient array by a constant
1639 static const TypeFunc* make_dilithiumMontMulByConstant_Type() {
1640 int argcnt = 2;
1641
1642 const Type** fields = TypeTuple::fields(argcnt);
1643 int argp = TypeFunc::Parms;
1644 fields[argp++] = TypePtr::NOTNULL; // coeffs
1645 fields[argp++] = TypeInt::INT; // constant multiplier
1646
1647 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1648 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1649
1650 // result type needed
1651 fields = TypeTuple::fields(1);
1652 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1653 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1654 return TypeFunc::make(domain, range);
1655 }
1656
1657 // Dilithium decompose polynomial
1658 static const TypeFunc* make_dilithiumDecomposePoly_Type() {
1659 int argcnt = 5;
1660
1661 const Type** fields = TypeTuple::fields(argcnt);
1662 int argp = TypeFunc::Parms;
1663 fields[argp++] = TypePtr::NOTNULL; // input
1664 fields[argp++] = TypePtr::NOTNULL; // lowPart
1665 fields[argp++] = TypePtr::NOTNULL; // highPart
1666 fields[argp++] = TypeInt::INT; // 2 * gamma2
1667 fields[argp++] = TypeInt::INT; // multiplier
1668
1669 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1670 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1671
1672 // result type needed
1673 fields = TypeTuple::fields(1);
1674 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1675 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1676 return TypeFunc::make(domain, range);
1677 }
1678
1679 static const TypeFunc* make_base64_encodeBlock_Type() {
1680 int argcnt = 6;
1681
1682 const Type** fields = TypeTuple::fields(argcnt);
1683 int argp = TypeFunc::Parms;
1684 fields[argp++] = TypePtr::NOTNULL; // src array
1685 fields[argp++] = TypeInt::INT; // offset
1686 fields[argp++] = TypeInt::INT; // length
1687 fields[argp++] = TypePtr::NOTNULL; // dest array
1688 fields[argp++] = TypeInt::INT; // dp
1689 fields[argp++] = TypeInt::BOOL; // isURL
1690 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1691 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1692
1693 // result type needed
1694 fields = TypeTuple::fields(1);
1695 fields[TypeFunc::Parms + 0] = nullptr; // void
1696 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1697 return TypeFunc::make(domain, range);
1698 }
1699
1700 static const TypeFunc* make_string_IndexOf_Type() {
1701 int argcnt = 4;
1702
1703 const Type** fields = TypeTuple::fields(argcnt);
1704 int argp = TypeFunc::Parms;
1705 fields[argp++] = TypePtr::NOTNULL; // haystack array
1706 fields[argp++] = TypeInt::INT; // haystack length
1707 fields[argp++] = TypePtr::NOTNULL; // needle array
1708 fields[argp++] = TypeInt::INT; // needle length
1709 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1710 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1711
1712 // result type needed
1713 fields = TypeTuple::fields(1);
1714 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack
1715 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1716 return TypeFunc::make(domain, range);
1717 }
1718
1719 static const TypeFunc* make_base64_decodeBlock_Type() {
1720 int argcnt = 7;
1721
1722 const Type** fields = TypeTuple::fields(argcnt);
1723 int argp = TypeFunc::Parms;
1724 fields[argp++] = TypePtr::NOTNULL; // src array
1725 fields[argp++] = TypeInt::INT; // src offset
1726 fields[argp++] = TypeInt::INT; // src length
1727 fields[argp++] = TypePtr::NOTNULL; // dest array
1728 fields[argp++] = TypeInt::INT; // dest offset
1729 fields[argp++] = TypeInt::BOOL; // isURL
1730 fields[argp++] = TypeInt::BOOL; // isMIME
1731 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1732 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1733
1734 // result type needed
1735 fields = TypeTuple::fields(1);
1736 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst
1737 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1738 return TypeFunc::make(domain, range);
1739 }
1740
1741 static const TypeFunc* make_poly1305_processBlocks_Type() {
1742 int argcnt = 4;
1743
1744 const Type** fields = TypeTuple::fields(argcnt);
1745 int argp = TypeFunc::Parms;
1746 fields[argp++] = TypePtr::NOTNULL; // input array
1747 fields[argp++] = TypeInt::INT; // input length
1748 fields[argp++] = TypePtr::NOTNULL; // accumulator array
1749 fields[argp++] = TypePtr::NOTNULL; // r array
1750 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1751 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1752
1753 // result type needed
1754 fields = TypeTuple::fields(1);
1755 fields[TypeFunc::Parms + 0] = nullptr; // void
1756 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1757 return TypeFunc::make(domain, range);
1758 }
1759
1760 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() {
1761 int argcnt = 3;
1762
1763 const Type** fields = TypeTuple::fields(argcnt);
1764 int argp = TypeFunc::Parms;
1765 fields[argp++] = TypePtr::NOTNULL; // a array
1766 fields[argp++] = TypePtr::NOTNULL; // b array
1767 fields[argp++] = TypePtr::NOTNULL; // r(esult) array
1768 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1769 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1770
1771 // result type needed
1772 fields = TypeTuple::fields(1);
1773 fields[TypeFunc::Parms + 0] = nullptr; // void
1774 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1775 return TypeFunc::make(domain, range);
1776 }
1777
1778 static const TypeFunc* make_intpoly_assign_Type() {
1779 int argcnt = 4;
1780
1781 const Type** fields = TypeTuple::fields(argcnt);
1782 int argp = TypeFunc::Parms;
1783 fields[argp++] = TypeInt::INT; // set flag
1784 fields[argp++] = TypePtr::NOTNULL; // a array (result)
1785 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set)
1786 fields[argp++] = TypeInt::INT; // array length
1787 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1788 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1789
1790 // result type needed
1791 fields = TypeTuple::fields(1);
1792 fields[TypeFunc::Parms + 0] = nullptr; // void
1793 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1794 return TypeFunc::make(domain, range);
1795 }
1796
1797 //------------- Interpreter state for on stack replacement
1798 static const TypeFunc* make_osr_end_Type() {
1799 // create input type (domain)
1800 const Type **fields = TypeTuple::fields(1);
1801 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1802 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1803
1804 // create result type
1805 fields = TypeTuple::fields(1);
1806 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1807 fields[TypeFunc::Parms+0] = nullptr; // void
1808 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1809 return TypeFunc::make(domain, range);
1810 }
1811
1812 #ifndef PRODUCT
1813 static void debug_print_convert_type(const Type** fields, int* argp, Node *parm) {
1814 const BasicType bt = parm->bottom_type()->basic_type();
1815 fields[(*argp)++] = Type::get_const_basic_type(bt);
1816 if (bt == T_LONG || bt == T_DOUBLE) {
1817 fields[(*argp)++] = Type::HALF;
1818 }
1819 }
1820
1821 static void update_arg_cnt(const Node* parm, int* arg_cnt) {
1822 (*arg_cnt)++;
1823 const BasicType bt = parm->bottom_type()->basic_type();
1824 if (bt == T_LONG || bt == T_DOUBLE) {
1825 (*arg_cnt)++;
1826 }
1827 }
1828
1829 const TypeFunc* OptoRuntime::debug_print_Type(Node* parm0, Node* parm1,
1830 Node* parm2, Node* parm3,
1831 Node* parm4, Node* parm5,
1832 Node* parm6) {
1833 int argcnt = 1;
1834 if (parm0 != nullptr) { update_arg_cnt(parm0, &argcnt);
1835 if (parm1 != nullptr) { update_arg_cnt(parm1, &argcnt);
1836 if (parm2 != nullptr) { update_arg_cnt(parm2, &argcnt);
1837 if (parm3 != nullptr) { update_arg_cnt(parm3, &argcnt);
1838 if (parm4 != nullptr) { update_arg_cnt(parm4, &argcnt);
1839 if (parm5 != nullptr) { update_arg_cnt(parm5, &argcnt);
1840 if (parm6 != nullptr) { update_arg_cnt(parm6, &argcnt);
1841 /* close each nested if ===> */ } } } } } } }
1842
1843 // create input type (domain)
1844 const Type** fields = TypeTuple::fields(argcnt);
1845 int argp = TypeFunc::Parms;
1846 fields[argp++] = TypePtr::NOTNULL; // static string pointer
1847
1848 if (parm0 != nullptr) { debug_print_convert_type(fields, &argp, parm0);
1849 if (parm1 != nullptr) { debug_print_convert_type(fields, &argp, parm1);
1850 if (parm2 != nullptr) { debug_print_convert_type(fields, &argp, parm2);
1851 if (parm3 != nullptr) { debug_print_convert_type(fields, &argp, parm3);
1852 if (parm4 != nullptr) { debug_print_convert_type(fields, &argp, parm4);
1853 if (parm5 != nullptr) { debug_print_convert_type(fields, &argp, parm5);
1854 if (parm6 != nullptr) { debug_print_convert_type(fields, &argp, parm6);
1855 /* close each nested if ===> */ } } } } } } }
1856
1857 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1858 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1859
1860 // no result type needed
1861 fields = TypeTuple::fields(1);
1862 fields[TypeFunc::Parms+0] = nullptr; // void
1863 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1864 return TypeFunc::make(domain, range);
1865 }
1866 #endif // PRODUCT
1867
1868 //-------------------------------------------------------------------------------------
1869 // register policy
1870
1871 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1872 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1873 switch (register_save_policy[reg]) {
1874 case 'C': return false; //SOC
1875 case 'E': return true ; //SOE
1876 case 'N': return false; //NS
1877 case 'A': return false; //AS
1878 }
1879 ShouldNotReachHere();
1880 return false;
1881 }
1882
1883 //-----------------------------------------------------------------------
1884 // Exceptions
1885 //
1886
1887 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1888
1889 // The method is an entry that is always called by a C++ method not
1890 // directly from compiled code. Compiled code will call the C++ method following.
1891 // We can't allow async exception to be installed during exception processing.
1892 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1893 // The frame we rethrow the exception to might not have been processed by the GC yet.
1894 // The stack watermark barrier takes care of detecting that and ensuring the frame
1895 // has updated oops.
1896 StackWatermarkSet::after_unwind(current);
1897
1898 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
1899
1900 // Do not confuse exception_oop with pending_exception. The exception_oop
1901 // is only used to pass arguments into the method. Not for general
1902 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1903 // the runtime stubs checks this on exit.
1904 assert(current->exception_oop() != nullptr, "exception oop is found");
1905 address handler_address = nullptr;
1906
1907 Handle exception(current, current->exception_oop());
1908 address pc = current->exception_pc();
1909
1910 // Clear out the exception oop and pc since looking up an
1911 // exception handler can cause class loading, which might throw an
1912 // exception and those fields are expected to be clear during
1913 // normal bytecode execution.
1914 current->clear_exception_oop_and_pc();
1915
1916 LogTarget(Info, exceptions) lt;
1917 if (lt.is_enabled()) {
1918 LogStream ls(lt);
1919 trace_exception(&ls, exception(), pc, "");
1920 }
1921
1922 // for AbortVMOnException flag
1923 Exceptions::debug_check_abort(exception);
1924
1925 #ifdef ASSERT
1926 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1927 // should throw an exception here
1928 ShouldNotReachHere();
1929 }
1930 #endif
1931
1932 // new exception handling: this method is entered only from adapters
1933 // exceptions from compiled java methods are handled in compiled code
1934 // using rethrow node
1935
1936 nm = CodeCache::find_nmethod(pc);
1937 assert(nm != nullptr, "No NMethod found");
1938 if (nm->is_native_method()) {
1939 fatal("Native method should not have path to exception handling");
1940 } else {
1941 // we are switching to old paradigm: search for exception handler in caller_frame
1942 // instead in exception handler of caller_frame.sender()
1943
1944 if (JvmtiExport::can_post_on_exceptions()) {
1945 // "Full-speed catching" is not necessary here,
1946 // since we're notifying the VM on every catch.
1947 // Force deoptimization and the rest of the lookup
1948 // will be fine.
1949 deoptimize_caller_frame(current);
1950 }
1951
1952 // Check the stack guard pages. If enabled, look for handler in this frame;
1953 // otherwise, forcibly unwind the frame.
1954 //
1955 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1956 bool force_unwind = !current->stack_overflow_state()->reguard_stack();
1957 bool deopting = false;
1958 if (nm->is_deopt_pc(pc)) {
1959 deopting = true;
1960 RegisterMap map(current,
1961 RegisterMap::UpdateMap::skip,
1962 RegisterMap::ProcessFrames::include,
1963 RegisterMap::WalkContinuation::skip);
1964 frame deoptee = current->last_frame().sender(&map);
1965 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1966 // Adjust the pc back to the original throwing pc
1967 pc = deoptee.pc();
1968 }
1969
1970 // If we are forcing an unwind because of stack overflow then deopt is
1971 // irrelevant since we are throwing the frame away anyway.
1972
1973 if (deopting && !force_unwind) {
1974 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1975 } else {
1976
1977 handler_address =
1978 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
1979
1980 if (handler_address == nullptr) {
1981 bool recursive_exception = false;
1982 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1983 assert (handler_address != nullptr, "must have compiled handler");
1984 // Update the exception cache only when the unwind was not forced
1985 // and there didn't happen another exception during the computation of the
1986 // compiled exception handler. Checking for exception oop equality is not
1987 // sufficient because some exceptions are pre-allocated and reused.
1988 if (!force_unwind && !recursive_exception) {
1989 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
1990 }
1991 } else {
1992 #ifdef ASSERT
1993 bool recursive_exception = false;
1994 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1995 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
1996 p2i(handler_address), p2i(computed_address));
1997 #endif
1998 }
1999 }
2000
2001 current->set_exception_pc(pc);
2002 current->set_exception_handler_pc(handler_address);
2003 }
2004
2005 // Restore correct return pc. Was saved above.
2006 current->set_exception_oop(exception());
2007 return handler_address;
2008
2009 JRT_END
2010
2011 // We are entering here from exception_blob
2012 // If there is a compiled exception handler in this method, we will continue there;
2013 // otherwise we will unwind the stack and continue at the caller of top frame method
2014 // Note we enter without the usual JRT wrapper. We will call a helper routine that
2015 // will do the normal VM entry. We do it this way so that we can see if the nmethod
2016 // we looked up the handler for has been deoptimized in the meantime. If it has been
2017 // we must not use the handler and instead return the deopt blob.
2018 address OptoRuntime::handle_exception_C(JavaThread* current) {
2019 //
2020 // We are in Java not VM and in debug mode we have a NoHandleMark
2021 //
2022 #ifndef PRODUCT
2023 SharedRuntime::_find_handler_ctr++; // find exception handler
2024 #endif
2025 DEBUG_ONLY(NoHandleMark __hm;)
2026 nmethod* nm = nullptr;
2027 address handler_address = nullptr;
2028 {
2029 // Enter the VM
2030
2031 ResetNoHandleMark rnhm;
2032 handler_address = handle_exception_C_helper(current, nm);
2033 }
2034
2035 // Back in java: Use no oops, DON'T safepoint
2036
2037 // Now check to see if the handler we are returning is in a now
2038 // deoptimized frame
2039
2040 if (nm != nullptr) {
2041 RegisterMap map(current,
2042 RegisterMap::UpdateMap::skip,
2043 RegisterMap::ProcessFrames::skip,
2044 RegisterMap::WalkContinuation::skip);
2045 frame caller = current->last_frame().sender(&map);
2046 #ifdef ASSERT
2047 assert(caller.is_compiled_frame(), "must be");
2048 #endif // ASSERT
2049 if (caller.is_deoptimized_frame()) {
2050 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
2051 }
2052 }
2053 return handler_address;
2054 }
2055
2056 //------------------------------rethrow----------------------------------------
2057 // We get here after compiled code has executed a 'RethrowNode'. The callee
2058 // is either throwing or rethrowing an exception. The callee-save registers
2059 // have been restored, synchronized objects have been unlocked and the callee
2060 // stack frame has been removed. The return address was passed in.
2061 // Exception oop is passed as the 1st argument. This routine is then called
2062 // from the stub. On exit, we know where to jump in the caller's code.
2063 // After this C code exits, the stub will pop his frame and end in a jump
2064 // (instead of a return). We enter the caller's default handler.
2065 //
2066 // This must be JRT_LEAF:
2067 // - caller will not change its state as we cannot block on exit,
2068 // therefore raw_exception_handler_for_return_address is all it takes
2069 // to handle deoptimized blobs
2070 //
2071 // However, there needs to be a safepoint check in the middle! So compiled
2072 // safepoints are completely watertight.
2073 //
2074 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier.
2075 //
2076 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
2077 //
2078 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
2079 // ret_pc will have been loaded from the stack, so for AArch64 will be signed.
2080 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc));
2081
2082 #ifndef PRODUCT
2083 SharedRuntime::_rethrow_ctr++; // count rethrows
2084 #endif
2085 assert (exception != nullptr, "should have thrown a NullPointerException");
2086 #ifdef ASSERT
2087 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
2088 // should throw an exception here
2089 ShouldNotReachHere();
2090 }
2091 #endif
2092
2093 thread->set_vm_result_oop(exception);
2094 // Frame not compiled (handles deoptimization blob)
2095 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
2096 }
2097
2098 static const TypeFunc* make_rethrow_Type() {
2099 // create input type (domain)
2100 const Type **fields = TypeTuple::fields(1);
2101 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2102 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2103
2104 // create result type (range)
2105 fields = TypeTuple::fields(1);
2106 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2107 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
2108
2109 return TypeFunc::make(domain, range);
2110 }
2111
2112
2113 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
2114 // Deoptimize the caller before continuing, as the compiled
2115 // exception handler table may not be valid.
2116 if (DeoptimizeOnAllocationException && doit) {
2117 deoptimize_caller_frame(thread);
2118 }
2119 }
2120
2121 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
2122 // Called from within the owner thread, so no need for safepoint
2123 RegisterMap reg_map(thread,
2124 RegisterMap::UpdateMap::include,
2125 RegisterMap::ProcessFrames::include,
2126 RegisterMap::WalkContinuation::skip);
2127 frame stub_frame = thread->last_frame();
2128 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2129 frame caller_frame = stub_frame.sender(®_map);
2130
2131 // Deoptimize the caller frame.
2132 Deoptimization::deoptimize_frame(thread, caller_frame.id());
2133 }
2134
2135
2136 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
2137 // Called from within the owner thread, so no need for safepoint
2138 RegisterMap reg_map(thread,
2139 RegisterMap::UpdateMap::include,
2140 RegisterMap::ProcessFrames::include,
2141 RegisterMap::WalkContinuation::skip);
2142 frame stub_frame = thread->last_frame();
2143 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2144 frame caller_frame = stub_frame.sender(®_map);
2145 return caller_frame.is_deoptimized_frame();
2146 }
2147
2148 static const TypeFunc* make_register_finalizer_Type() {
2149 // create input type (domain)
2150 const Type **fields = TypeTuple::fields(1);
2151 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
2152 // // The JavaThread* is passed to each routine as the last argument
2153 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
2154 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2155
2156 // create result type (range)
2157 fields = TypeTuple::fields(0);
2158
2159 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2160
2161 return TypeFunc::make(domain, range);
2162 }
2163
2164 #if INCLUDE_JFR
2165 static const TypeFunc* make_class_id_load_barrier_Type() {
2166 // create input type (domain)
2167 const Type **fields = TypeTuple::fields(1);
2168 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
2169 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
2170
2171 // create result type (range)
2172 fields = TypeTuple::fields(0);
2173
2174 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
2175
2176 return TypeFunc::make(domain,range);
2177 }
2178 #endif // INCLUDE_JFR
2179
2180 //-----------------------------------------------------------------------------
2181 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
2182 // create input type (domain)
2183 const Type **fields = TypeTuple::fields(2);
2184 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2185 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
2186 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2187
2188 // create result type (range)
2189 fields = TypeTuple::fields(0);
2190
2191 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2192
2193 return TypeFunc::make(domain, range);
2194 }
2195
2196 static const TypeFunc* make_dtrace_object_alloc_Type() {
2197 // create input type (domain)
2198 const Type **fields = TypeTuple::fields(2);
2199 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2200 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
2201
2202 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2203
2204 // create result type (range)
2205 fields = TypeTuple::fields(0);
2206
2207 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2208
2209 return TypeFunc::make(domain, range);
2210 }
2211
2212 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
2213 assert(oopDesc::is_oop(obj), "must be a valid oop");
2214 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
2215 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
2216 JRT_END
2217
2218 //-----------------------------------------------------------------------------
2219
2220 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
2221
2222 //
2223 // dump the collected NamedCounters.
2224 //
2225 void OptoRuntime::print_named_counters() {
2226 int total_lock_count = 0;
2227 int eliminated_lock_count = 0;
2228
2229 NamedCounter* c = _named_counters;
2230 while (c) {
2231 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2232 int count = c->count();
2233 if (count > 0) {
2234 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2235 if (Verbose) {
2236 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2237 }
2238 total_lock_count += count;
2239 if (eliminated) {
2240 eliminated_lock_count += count;
2241 }
2242 }
2243 }
2244 c = c->next();
2245 }
2246 if (total_lock_count > 0) {
2247 tty->print_cr("dynamic locks: %d", total_lock_count);
2248 if (eliminated_lock_count) {
2249 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
2250 (int)(eliminated_lock_count * 100.0 / total_lock_count));
2251 }
2252 }
2253 }
2254
2255 //
2256 // Allocate a new NamedCounter. The JVMState is used to generate the
2257 // name which consists of method@line for the inlining tree.
2258 //
2259
2260 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
2261 int max_depth = youngest_jvms->depth();
2262
2263 // Visit scopes from youngest to oldest.
2264 bool first = true;
2265 stringStream st;
2266 for (int depth = max_depth; depth >= 1; depth--) {
2267 JVMState* jvms = youngest_jvms->of_depth(depth);
2268 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
2269 if (!first) {
2270 st.print(" ");
2271 } else {
2272 first = false;
2273 }
2274 int bci = jvms->bci();
2275 if (bci < 0) bci = 0;
2276 if (m != nullptr) {
2277 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
2278 } else {
2279 st.print("no method");
2280 }
2281 st.print("@%d", bci);
2282 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
2283 }
2284 NamedCounter* c = new NamedCounter(st.freeze(), tag);
2285
2286 // atomically add the new counter to the head of the list. We only
2287 // add counters so this is safe.
2288 NamedCounter* head;
2289 do {
2290 c->set_next(nullptr);
2291 head = _named_counters;
2292 c->set_next(head);
2293 } while (AtomicAccess::cmpxchg(&_named_counters, head, c) != head);
2294 return c;
2295 }
2296
2297 void OptoRuntime::initialize_types() {
2298 _new_instance_Type = make_new_instance_Type();
2299 _new_array_Type = make_new_array_Type();
2300 _new_array_nozero_Type = make_new_array_nozero_Type();
2301 _multianewarray2_Type = multianewarray_Type(2);
2302 _multianewarray3_Type = multianewarray_Type(3);
2303 _multianewarray4_Type = multianewarray_Type(4);
2304 _multianewarray5_Type = multianewarray_Type(5);
2305 _multianewarrayN_Type = make_multianewarrayN_Type();
2306 _complete_monitor_enter_Type = make_complete_monitor_enter_Type();
2307 _complete_monitor_exit_Type = make_complete_monitor_exit_Type();
2308 _monitor_notify_Type = make_monitor_notify_Type();
2309 _uncommon_trap_Type = make_uncommon_trap_Type();
2310 _athrow_Type = make_athrow_Type();
2311 _rethrow_Type = make_rethrow_Type();
2312 _Math_D_D_Type = make_Math_D_D_Type();
2313 _Math_DD_D_Type = make_Math_DD_D_Type();
2314 _modf_Type = make_modf_Type();
2315 _l2f_Type = make_l2f_Type();
2316 _void_long_Type = make_void_long_Type();
2317 _void_void_Type = make_void_void_Type();
2318 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type();
2319 _flush_windows_Type = make_flush_windows_Type();
2320 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast);
2321 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast);
2322 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic);
2323 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow);
2324 _unsafe_setmemory_Type = make_setmemory_Type();
2325 _array_fill_Type = make_array_fill_Type();
2326 _array_sort_Type = make_array_sort_Type();
2327 _array_partition_Type = make_array_partition_Type();
2328 _aescrypt_block_Type = make_aescrypt_block_Type();
2329 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type();
2330 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type();
2331 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type();
2332 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type();
2333 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true);
2334 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);;
2335 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true);
2336 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false);
2337 _double_keccak_Type = make_double_keccak_Type();
2338 _multiplyToLen_Type = make_multiplyToLen_Type();
2339 _montgomeryMultiply_Type = make_montgomeryMultiply_Type();
2340 _montgomerySquare_Type = make_montgomerySquare_Type();
2341 _squareToLen_Type = make_squareToLen_Type();
2342 _mulAdd_Type = make_mulAdd_Type();
2343 _bigIntegerShift_Type = make_bigIntegerShift_Type();
2344 _vectorizedMismatch_Type = make_vectorizedMismatch_Type();
2345 _ghash_processBlocks_Type = make_ghash_processBlocks_Type();
2346 _chacha20Block_Type = make_chacha20Block_Type();
2347 _kyberNtt_Type = make_kyberNtt_Type();
2348 _kyberInverseNtt_Type = make_kyberInverseNtt_Type();
2349 _kyberNttMult_Type = make_kyberNttMult_Type();
2350 _kyberAddPoly_2_Type = make_kyberAddPoly_2_Type();
2351 _kyberAddPoly_3_Type = make_kyberAddPoly_3_Type();
2352 _kyber12To16_Type = make_kyber12To16_Type();
2353 _kyberBarrettReduce_Type = make_kyberBarrettReduce_Type();
2354 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type();
2355 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type();
2356 _dilithiumNttMult_Type = make_dilithiumNttMult_Type();
2357 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type();
2358 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type();
2359 _base64_encodeBlock_Type = make_base64_encodeBlock_Type();
2360 _base64_decodeBlock_Type = make_base64_decodeBlock_Type();
2361 _string_IndexOf_Type = make_string_IndexOf_Type();
2362 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type();
2363 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type();
2364 _intpoly_assign_Type = make_intpoly_assign_Type();
2365 _updateBytesCRC32_Type = make_updateBytesCRC32_Type();
2366 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type();
2367 _updateBytesAdler32_Type = make_updateBytesAdler32_Type();
2368 _osr_end_Type = make_osr_end_Type();
2369 _register_finalizer_Type = make_register_finalizer_Type();
2370 _vthread_transition_Type = make_vthread_transition_Type();
2371 JFR_ONLY(
2372 _class_id_load_barrier_Type = make_class_id_load_barrier_Type();
2373 )
2374 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type();
2375 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type();
2376 }
2377
2378 int trace_exception_counter = 0;
2379 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2380 trace_exception_counter++;
2381 stringStream tempst;
2382
2383 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2384 exception_oop->print_value_on(&tempst);
2385 tempst.print(" in ");
2386 CodeBlob* blob = CodeCache::find_blob(exception_pc);
2387 if (blob->is_nmethod()) {
2388 blob->as_nmethod()->method()->print_value_on(&tempst);
2389 } else if (blob->is_runtime_stub()) {
2390 tempst.print("<runtime-stub>");
2391 } else {
2392 tempst.print("<unknown>");
2393 }
2394 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
2395 tempst.print("]");
2396
2397 st->print_raw_cr(tempst.freeze());
2398 }
2399
2400 const TypeFunc *OptoRuntime::store_inline_type_fields_Type() {
2401 // create input type (domain)
2402 uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2403 const Type **fields = TypeTuple::fields(total);
2404 // We don't know the number of returned values and their
2405 // types. Assume all registers available to the return convention
2406 // are used.
2407 fields[TypeFunc::Parms] = TypePtr::BOTTOM;
2408 uint i = 1;
2409 for (; i < SharedRuntime::java_return_convention_max_int; i++) {
2410 fields[TypeFunc::Parms+i] = TypeInt::INT;
2411 }
2412 for (; i < total; i+=2) {
2413 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2414 fields[TypeFunc::Parms+i+1] = Type::HALF;
2415 }
2416 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2417
2418 // create result type (range)
2419 fields = TypeTuple::fields(1);
2420 fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
2421
2422 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2423
2424 return TypeFunc::make(domain, range);
2425 }
2426
2427 const TypeFunc *OptoRuntime::pack_inline_type_Type() {
2428 // create input type (domain)
2429 uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2430 const Type **fields = TypeTuple::fields(total);
2431 // We don't know the number of returned values and their
2432 // types. Assume all registers available to the return convention
2433 // are used.
2434 fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM;
2435 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;
2436 uint i = 2;
2437 for (; i < SharedRuntime::java_return_convention_max_int+1; i++) {
2438 fields[TypeFunc::Parms+i] = TypeInt::INT;
2439 }
2440 for (; i < total; i+=2) {
2441 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2442 fields[TypeFunc::Parms+i+1] = Type::HALF;
2443 }
2444 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2445
2446 // create result type (range)
2447 fields = TypeTuple::fields(1);
2448 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2449
2450 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2451
2452 return TypeFunc::make(domain, range);
2453 }
2454
2455 JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current))
2456 JRT_BLOCK;
2457 oop buffer = array->obj_at(index, THREAD);
2458 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
2459 current->set_vm_result_oop(buffer);
2460 JRT_BLOCK_END;
2461 JRT_END
2462
2463 const TypeFunc* OptoRuntime::load_unknown_inline_Type() {
2464 // create input type (domain)
2465 const Type** fields = TypeTuple::fields(2);
2466 fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL;
2467 fields[TypeFunc::Parms+1] = TypeInt::POS;
2468
2469 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+2, fields);
2470
2471 // create result type (range)
2472 fields = TypeTuple::fields(1);
2473 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
2474
2475 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
2476
2477 return TypeFunc::make(domain, range);
2478 }
2479
2480 JRT_BLOCK_ENTRY(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current))
2481 JRT_BLOCK;
2482 array->obj_at_put(index, buffer, THREAD);
2483 if (HAS_PENDING_EXCEPTION) {
2484 fatal("This entry must be changed to be a non-leaf entry because writing to a flat array can now throw an exception");
2485 }
2486 JRT_BLOCK_END;
2487 JRT_END
2488
2489 const TypeFunc* OptoRuntime::store_unknown_inline_Type() {
2490 // create input type (domain)
2491 const Type** fields = TypeTuple::fields(3);
2492 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2493 fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL;
2494 fields[TypeFunc::Parms+2] = TypeInt::POS;
2495
2496 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
2497
2498 // create result type (range)
2499 fields = TypeTuple::fields(0);
2500 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
2501
2502 return TypeFunc::make(domain, range);
2503 }