1 /*
2 * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmClasses.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/aotCodeCache.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/pcDesc.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compilationMemoryStatistic.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/g1/g1HeapRegion.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "interpreter/bytecode.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "interpreter/linkResolver.hpp"
44 #include "logging/log.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/oopFactory.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "oops/flatArrayKlass.hpp"
49 #include "oops/flatArrayOop.inline.hpp"
50 #include "oops/inlineKlass.inline.hpp"
51 #include "oops/klass.inline.hpp"
52 #include "oops/objArrayKlass.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "oops/typeArrayOop.inline.hpp"
55 #include "oops/valuePayload.inline.hpp"
56 #include "opto/ad.hpp"
57 #include "opto/addnode.hpp"
58 #include "opto/callnode.hpp"
59 #include "opto/cfgnode.hpp"
60 #include "opto/graphKit.hpp"
61 #include "opto/machnode.hpp"
62 #include "opto/matcher.hpp"
63 #include "opto/memnode.hpp"
64 #include "opto/mulnode.hpp"
65 #include "opto/output.hpp"
66 #include "opto/runtime.hpp"
67 #include "opto/subnode.hpp"
68 #include "prims/jvmtiExport.hpp"
69 #include "runtime/atomicAccess.hpp"
70 #include "runtime/frame.inline.hpp"
71 #include "runtime/handles.inline.hpp"
72 #include "runtime/interfaceSupport.inline.hpp"
73 #include "runtime/javaCalls.hpp"
74 #include "runtime/mountUnmountDisabler.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/signature.hpp"
77 #include "runtime/stackWatermarkSet.hpp"
78 #include "runtime/synchronizer.hpp"
79 #include "runtime/threadWXSetters.inline.hpp"
80 #include "runtime/vframe.hpp"
81 #include "runtime/vframe_hp.hpp"
82 #include "runtime/vframeArray.hpp"
83 #include "utilities/copy.hpp"
84 #include "utilities/preserveException.hpp"
85
86
87 // For debugging purposes:
88 // To force FullGCALot inside a runtime function, add the following two lines
89 //
90 // Universe::release_fullgc_alot_dummy();
91 // Universe::heap()->collect();
92 //
93 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
94
95
96 #define C2_BLOB_FIELD_DEFINE(name, type) \
97 type* OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr;
98 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
99 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
100 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
101 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE)
102 #undef C2_BLOB_FIELD_DEFINE
103 #undef C2_STUB_FIELD_DEFINE
104
105 // This should be called in an assertion at the start of OptoRuntime routines
106 // which are entered from compiled code (all of them)
107 #ifdef ASSERT
108 static bool check_compiled_frame(JavaThread* thread) {
109 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
110 RegisterMap map(thread,
111 RegisterMap::UpdateMap::skip,
112 RegisterMap::ProcessFrames::include,
113 RegisterMap::WalkContinuation::skip);
114 frame caller = thread->last_frame().sender(&map);
115 assert(caller.is_compiled_frame(), "not being called from compiled like code");
116 return true;
117 }
118 #endif // ASSERT
119
120 /*
121 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
122 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
123 if (var == nullptr) { return false; }
124 */
125
126 #define GEN_C2_BLOB(name, type) \
127 BLOB_FIELD_NAME(name) = \
128 generate_ ## name ## _blob(); \
129 if (BLOB_FIELD_NAME(name) == nullptr) { return false; }
130
131 // a few helper macros to conjure up generate_stub call arguments
132 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
133 #define C2_STUB_TYPEFUNC(name) name ## _Type
134 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C)
135 #define C2_STUB_ID(name) StubId:: JOIN3(c2, name, id)
136 #define C2_STUB_NAME(name) stub_name(C2_STUB_ID(name))
137
138 // Almost all the C functions targeted from the generated stubs are
139 // implemented locally to OptoRuntime with names that can be generated
140 // from the stub name by appending suffix '_C'. However, in two cases
141 // a common target method also needs to be called from shared runtime
142 // stubs. In these two cases the opto stubs rely on method
143 // imlementations defined in class SharedRuntime. The following
144 // defines temporarily rebind the generated names to reference the
145 // relevant implementations.
146
147 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \
148 C2_STUB_FIELD_NAME(name) = \
149 generate_stub(env, \
150 C2_STUB_TYPEFUNC(name), \
151 C2_STUB_C_FUNC(name), \
152 C2_STUB_NAME(name), \
153 C2_STUB_ID(name), \
154 fancy_jump, \
155 pass_tls, \
156 pass_retpc); \
157 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \
158
159 bool OptoRuntime::generate(ciEnv* env) {
160
161 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB)
162 // disallow any further c2 stub generation
163 AOTCodeCache::set_c2_stubs_complete();
164 return true;
165 }
166
167 #undef GEN_C2_BLOB
168
169 #undef C2_STUB_FIELD_NAME
170 #undef C2_STUB_TYPEFUNC
171 #undef C2_STUB_C_FUNC
172 #undef C2_STUB_NAME
173 #undef GEN_C2_STUB
174
175 // #undef gen
176
177 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr;
178 const TypeFunc* OptoRuntime::_new_array_Type = nullptr;
179 const TypeFunc* OptoRuntime::_new_array_nozero_Type = nullptr;
180 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr;
181 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr;
182 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr;
183 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr;
184 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr;
185 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr;
186 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr;
187 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr;
188 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr;
189 const TypeFunc* OptoRuntime::_athrow_Type = nullptr;
190 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr;
191 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr;
192 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr;
193 const TypeFunc* OptoRuntime::_modf_Type = nullptr;
194 const TypeFunc* OptoRuntime::_l2f_Type = nullptr;
195 const TypeFunc* OptoRuntime::_void_long_Type = nullptr;
196 const TypeFunc* OptoRuntime::_void_void_Type = nullptr;
197 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr;
198 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr;
199 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr;
200 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr;
201 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr;
202 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr;
203 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr;
204 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr;
205 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr;
206 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr;
207 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr;
208 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr;
209 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr;
210 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr;
211 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr;
212 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr;
213 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr;
214 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr;
215 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr;
216 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr;
217 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr;
218 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr;
219 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr;
220 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr;
221 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr;
222 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr;
223 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr;
224 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr;
225 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr;
226 const TypeFunc* OptoRuntime::_kyberNtt_Type = nullptr;
227 const TypeFunc* OptoRuntime::_kyberInverseNtt_Type = nullptr;
228 const TypeFunc* OptoRuntime::_kyberNttMult_Type = nullptr;
229 const TypeFunc* OptoRuntime::_kyberAddPoly_2_Type = nullptr;
230 const TypeFunc* OptoRuntime::_kyberAddPoly_3_Type = nullptr;
231 const TypeFunc* OptoRuntime::_kyber12To16_Type = nullptr;
232 const TypeFunc* OptoRuntime::_kyberBarrettReduce_Type = nullptr;
233 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr;
234 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr;
235 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr;
236 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr;
237 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr;
238 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr;
239 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr;
240 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr;
241 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr;
242 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr;
243 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr;
244 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr;
245 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr;
246 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr;
247 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr;
248 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr;
249 const TypeFunc* OptoRuntime::_vthread_transition_Type = nullptr;
250 #if INCLUDE_JFR
251 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr;
252 #endif // INCLUDE_JFR
253 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr;
254 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr;
255
256 // Helper method to do generation of RunTimeStub's
257 address OptoRuntime::generate_stub(ciEnv* env,
258 TypeFunc_generator gen, address C_function,
259 const char *name, StubId stub_id,
260 int is_fancy_jump, bool pass_tls,
261 bool return_pc) {
262
263 // Matching the default directive, we currently have no method to match.
264 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
265 CompilationMemoryStatisticMark cmsm(directive);
266 ResourceMark rm;
267 Compile C(env, gen, C_function, name, stub_id, is_fancy_jump, pass_tls, return_pc, directive);
268 DirectivesStack::release(directive);
269 return C.stub_entry_point();
270 }
271
272 const char* OptoRuntime::stub_name(address entry) {
273 #ifndef PRODUCT
274 CodeBlob* cb = CodeCache::find_blob(entry);
275 RuntimeStub* rs =(RuntimeStub *)cb;
276 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
277 return rs->name();
278 #else
279 // Fast implementation for product mode (maybe it should be inlined too)
280 return "runtime stub";
281 #endif
282 }
283
284 // local methods passed as arguments to stub generator that forward
285 // control to corresponding JRT methods of SharedRuntime
286
287 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
288 oopDesc* dest, jint dest_pos,
289 jint length, JavaThread* thread) {
290 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
291 }
292
293 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
294 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
295 }
296
297
298 //=============================================================================
299 // Opto compiler runtime routines
300 //=============================================================================
301
302
303 //=============================allocation======================================
304 // We failed the fast-path allocation. Now we need to do a scavenge or GC
305 // and try allocation again.
306
307 // object allocation
308 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
309 JRT_BLOCK;
310 #ifndef PRODUCT
311 SharedRuntime::_new_instance_ctr++; // new instance requires GC
312 #endif
313 assert(check_compiled_frame(current), "incorrect caller");
314
315 // These checks are cheap to make and support reflective allocation.
316 int lh = klass->layout_helper();
317 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
318 Handle holder(current, klass->klass_holder()); // keep the klass alive
319 klass->check_valid_for_instantiation(false, THREAD);
320 if (!HAS_PENDING_EXCEPTION) {
321 InstanceKlass::cast(klass)->initialize(THREAD);
322 }
323 }
324
325 if (!HAS_PENDING_EXCEPTION) {
326 // Scavenge and allocate an instance.
327 Handle holder(current, klass->klass_holder()); // keep the klass alive
328 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
329 current->set_vm_result_oop(result);
330
331 // Pass oops back through thread local storage. Our apparent type to Java
332 // is that we return an oop, but we can block on exit from this routine and
333 // a GC can trash the oop in C's return register. The generated stub will
334 // fetch the oop from TLS after any possible GC.
335 }
336
337 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
338 JRT_BLOCK_END;
339
340 // inform GC that we won't do card marks for initializing writes.
341 SharedRuntime::on_slowpath_allocation_exit(current);
342 JRT_END
343
344
345 // array allocation
346 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, oopDesc* init_val, JavaThread* current))
347 JRT_BLOCK;
348 #ifndef PRODUCT
349 SharedRuntime::_new_array_ctr++; // new array requires GC
350 #endif
351 assert(check_compiled_frame(current), "incorrect caller");
352
353 // Scavenge and allocate an instance.
354 oop result;
355 Handle h_init_val(current, init_val); // keep the init_val object alive
356
357 if (array_type->is_typeArray_klass()) {
358 // The oopFactory likes to work with the element type.
359 // (We could bypass the oopFactory, since it doesn't add much value.)
360 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
361 result = oopFactory::new_typeArray(elem_type, len, THREAD);
362 } else {
363 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
364 ObjArrayKlass* oak = ObjArrayKlass::cast(array_type);
365 result = oopFactory::new_objArray(oak->element_klass(), len, oak->properties(), THREAD);
366 if (!HAS_PENDING_EXCEPTION && array_type->is_null_free_array_klass() && !h_init_val.is_null()) {
367 // Null-free arrays need to be initialized
368 #ifdef ASSERT
369 ObjArrayKlass* result_oak = ObjArrayKlass::cast(result->klass());
370 assert(result_oak->is_null_free_array_klass(), "Sanity check");
371 #endif
372 for (int i = 0; i < len; i++) {
373 ((objArrayOop)result)->obj_at_put(i, h_init_val());
374 }
375 }
376 }
377
378 // Pass oops back through thread local storage. Our apparent type to Java
379 // is that we return an oop, but we can block on exit from this routine and
380 // a GC can trash the oop in C's return register. The generated stub will
381 // fetch the oop from TLS after any possible GC.
382 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
383 current->set_vm_result_oop(result);
384 JRT_BLOCK_END;
385
386 // inform GC that we won't do card marks for initializing writes.
387 SharedRuntime::on_slowpath_allocation_exit(current);
388 JRT_END
389
390 // array allocation without zeroing
391 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
392 JRT_BLOCK;
393 #ifndef PRODUCT
394 SharedRuntime::_new_array_ctr++; // new array requires GC
395 #endif
396 assert(check_compiled_frame(current), "incorrect caller");
397
398 // Scavenge and allocate an instance.
399 oop result;
400
401 assert(array_type->is_typeArray_klass(), "should be called only for type array");
402 // The oopFactory likes to work with the element type.
403 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
404 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
405
406 // Pass oops back through thread local storage. Our apparent type to Java
407 // is that we return an oop, but we can block on exit from this routine and
408 // a GC can trash the oop in C's return register. The generated stub will
409 // fetch the oop from TLS after any possible GC.
410 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
411 current->set_vm_result_oop(result);
412 JRT_BLOCK_END;
413
414
415 // inform GC that we won't do card marks for initializing writes.
416 SharedRuntime::on_slowpath_allocation_exit(current);
417
418 oop result = current->vm_result_oop();
419 if ((len > 0) && (result != nullptr) &&
420 is_deoptimized_caller_frame(current)) {
421 // Zero array here if the caller is deoptimized.
422 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result);
423 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
424 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
425 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
426 HeapWord* obj = cast_from_oop<HeapWord*>(result);
427 if (!is_aligned(hs_bytes, BytesPerLong)) {
428 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
429 hs_bytes += BytesPerInt;
430 }
431
432 // Optimized zeroing.
433 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
434 const size_t aligned_hs = hs_bytes / BytesPerLong;
435 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
436 }
437
438 JRT_END
439
440 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
441
442 // multianewarray for 2 dimensions
443 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
444 #ifndef PRODUCT
445 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
446 #endif
447 assert(check_compiled_frame(current), "incorrect caller");
448 assert(elem_type->is_klass(), "not a class");
449 jint dims[2];
450 dims[0] = len1;
451 dims[1] = len2;
452 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
453 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
454 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
455 current->set_vm_result_oop(obj);
456 JRT_END
457
458 // multianewarray for 3 dimensions
459 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
460 #ifndef PRODUCT
461 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
462 #endif
463 assert(check_compiled_frame(current), "incorrect caller");
464 assert(elem_type->is_klass(), "not a class");
465 jint dims[3];
466 dims[0] = len1;
467 dims[1] = len2;
468 dims[2] = len3;
469 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
470 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
471 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
472 current->set_vm_result_oop(obj);
473 JRT_END
474
475 // multianewarray for 4 dimensions
476 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
477 #ifndef PRODUCT
478 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
479 #endif
480 assert(check_compiled_frame(current), "incorrect caller");
481 assert(elem_type->is_klass(), "not a class");
482 jint dims[4];
483 dims[0] = len1;
484 dims[1] = len2;
485 dims[2] = len3;
486 dims[3] = len4;
487 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
488 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
489 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
490 current->set_vm_result_oop(obj);
491 JRT_END
492
493 // multianewarray for 5 dimensions
494 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
495 #ifndef PRODUCT
496 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
497 #endif
498 assert(check_compiled_frame(current), "incorrect caller");
499 assert(elem_type->is_klass(), "not a class");
500 jint dims[5];
501 dims[0] = len1;
502 dims[1] = len2;
503 dims[2] = len3;
504 dims[3] = len4;
505 dims[4] = len5;
506 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
507 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
508 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
509 current->set_vm_result_oop(obj);
510 JRT_END
511
512 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
513 assert(check_compiled_frame(current), "incorrect caller");
514 assert(elem_type->is_klass(), "not a class");
515 assert(oop(dims)->is_typeArray(), "not an array");
516
517 ResourceMark rm;
518 jint len = dims->length();
519 assert(len > 0, "Dimensions array should contain data");
520 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
521 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
522 c_dims, len);
523
524 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
525 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
526 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
527 current->set_vm_result_oop(obj);
528 JRT_END
529
530 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
531
532 // Very few notify/notifyAll operations find any threads on the waitset, so
533 // the dominant fast-path is to simply return.
534 // Relatedly, it's critical that notify/notifyAll be fast in order to
535 // reduce lock hold times.
536 if (!SafepointSynchronize::is_synchronizing()) {
537 if (ObjectSynchronizer::quick_notify(obj, current, false)) {
538 return;
539 }
540 }
541
542 // This is the case the fast-path above isn't provisioned to handle.
543 // The fast-path is designed to handle frequently arising cases in an efficient manner.
544 // (The fast-path is just a degenerate variant of the slow-path).
545 // Perform the dreaded state transition and pass control into the slow-path.
546 JRT_BLOCK;
547 Handle h_obj(current, obj);
548 ObjectSynchronizer::notify(h_obj, CHECK);
549 JRT_BLOCK_END;
550 JRT_END
551
552 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
553
554 if (!SafepointSynchronize::is_synchronizing() ) {
555 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
556 return;
557 }
558 }
559
560 // This is the case the fast-path above isn't provisioned to handle.
561 // The fast-path is designed to handle frequently arising cases in an efficient manner.
562 // (The fast-path is just a degenerate variant of the slow-path).
563 // Perform the dreaded state transition and pass control into the slow-path.
564 JRT_BLOCK;
565 Handle h_obj(current, obj);
566 ObjectSynchronizer::notifyall(h_obj, CHECK);
567 JRT_BLOCK_END;
568 JRT_END
569
570 JRT_ENTRY(void, OptoRuntime::vthread_end_first_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
571 MountUnmountDisabler::end_transition(current, vt, true /*is_mount*/, true /*is_thread_start*/);
572 JRT_END
573
574 JRT_ENTRY(void, OptoRuntime::vthread_start_final_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
575 java_lang_Thread::set_is_in_vthread_transition(vt, false);
576 current->set_is_in_vthread_transition(false);
577 MountUnmountDisabler::start_transition(current, vt, false /*is_mount */, true /*is_thread_end*/);
578 JRT_END
579
580 JRT_ENTRY(void, OptoRuntime::vthread_start_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
581 java_lang_Thread::set_is_in_vthread_transition(vt, false);
582 current->set_is_in_vthread_transition(false);
583 MountUnmountDisabler::start_transition(current, vt, is_mount, false /*is_thread_end*/);
584 JRT_END
585
586 JRT_ENTRY(void, OptoRuntime::vthread_end_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
587 MountUnmountDisabler::end_transition(current, vt, is_mount, false /*is_thread_start*/);
588 JRT_END
589
590 static const TypeFunc* make_new_instance_Type() {
591 // create input type (domain)
592 const Type **fields = TypeTuple::fields(1);
593 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
594 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
595
596 // create result type (range)
597 fields = TypeTuple::fields(1);
598 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
599
600 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
601
602 return TypeFunc::make(domain, range);
603 }
604
605 static const TypeFunc* make_vthread_transition_Type() {
606 // create input type (domain)
607 const Type **fields = TypeTuple::fields(2);
608 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
609 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
610 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
611
612 // no result type needed
613 fields = TypeTuple::fields(1);
614 fields[TypeFunc::Parms+0] = nullptr; // void
615 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
616
617 return TypeFunc::make(domain,range);
618 }
619
620 static const TypeFunc* make_athrow_Type() {
621 // create input type (domain)
622 const Type **fields = TypeTuple::fields(1);
623 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
624 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
625
626 // create result type (range)
627 fields = TypeTuple::fields(0);
628
629 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
630
631 return TypeFunc::make(domain, range);
632 }
633
634 static const TypeFunc* make_new_array_Type() {
635 // create input type (domain)
636 const Type **fields = TypeTuple::fields(3);
637 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
638 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
639 fields[TypeFunc::Parms+2] = TypeInstPtr::NOTNULL; // init value
640 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
641
642 // create result type (range)
643 fields = TypeTuple::fields(1);
644 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
645
646 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
647
648 return TypeFunc::make(domain, range);
649 }
650
651 static const TypeFunc* make_new_array_nozero_Type() {
652 // create input type (domain)
653 const Type **fields = TypeTuple::fields(2);
654 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
655 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
656 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
657
658 // create result type (range)
659 fields = TypeTuple::fields(1);
660 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
661
662 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
663
664 return TypeFunc::make(domain, range);
665 }
666
667 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) {
668 // create input type (domain)
669 const int nargs = ndim + 1;
670 const Type **fields = TypeTuple::fields(nargs);
671 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
672 for( int i = 1; i < nargs; i++ )
673 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
674 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
675
676 // create result type (range)
677 fields = TypeTuple::fields(1);
678 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
679 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
680
681 return TypeFunc::make(domain, range);
682 }
683
684 static const TypeFunc* make_multianewarrayN_Type() {
685 // create input type (domain)
686 const Type **fields = TypeTuple::fields(2);
687 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
688 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
689 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
690
691 // create result type (range)
692 fields = TypeTuple::fields(1);
693 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
694 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
695
696 return TypeFunc::make(domain, range);
697 }
698
699 static const TypeFunc* make_uncommon_trap_Type() {
700 // create input type (domain)
701 const Type **fields = TypeTuple::fields(1);
702 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
703 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
704
705 // create result type (range)
706 fields = TypeTuple::fields(0);
707 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
708
709 return TypeFunc::make(domain, range);
710 }
711
712 //-----------------------------------------------------------------------------
713 // Monitor Handling
714
715 static const TypeFunc* make_complete_monitor_enter_Type() {
716 // create input type (domain)
717 const Type **fields = TypeTuple::fields(2);
718 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
719 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
720 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
721
722 // create result type (range)
723 fields = TypeTuple::fields(0);
724
725 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
726
727 return TypeFunc::make(domain, range);
728 }
729
730 //-----------------------------------------------------------------------------
731
732 static const TypeFunc* make_complete_monitor_exit_Type() {
733 // create input type (domain)
734 const Type **fields = TypeTuple::fields(3);
735 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
736 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
737 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
738 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
739
740 // create result type (range)
741 fields = TypeTuple::fields(0);
742
743 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
744
745 return TypeFunc::make(domain, range);
746 }
747
748 static const TypeFunc* make_monitor_notify_Type() {
749 // create input type (domain)
750 const Type **fields = TypeTuple::fields(1);
751 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
752 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
753
754 // create result type (range)
755 fields = TypeTuple::fields(0);
756 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
757 return TypeFunc::make(domain, range);
758 }
759
760 static const TypeFunc* make_flush_windows_Type() {
761 // create input type (domain)
762 const Type** fields = TypeTuple::fields(1);
763 fields[TypeFunc::Parms+0] = nullptr; // void
764 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
765
766 // create result type
767 fields = TypeTuple::fields(1);
768 fields[TypeFunc::Parms+0] = nullptr; // void
769 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
770
771 return TypeFunc::make(domain, range);
772 }
773
774 static const TypeFunc* make_l2f_Type() {
775 // create input type (domain)
776 const Type **fields = TypeTuple::fields(2);
777 fields[TypeFunc::Parms+0] = TypeLong::LONG;
778 fields[TypeFunc::Parms+1] = Type::HALF;
779 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
780
781 // create result type (range)
782 fields = TypeTuple::fields(1);
783 fields[TypeFunc::Parms+0] = Type::FLOAT;
784 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
785
786 return TypeFunc::make(domain, range);
787 }
788
789 static const TypeFunc* make_modf_Type() {
790 const Type **fields = TypeTuple::fields(2);
791 fields[TypeFunc::Parms+0] = Type::FLOAT;
792 fields[TypeFunc::Parms+1] = Type::FLOAT;
793 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
794
795 // create result type (range)
796 fields = TypeTuple::fields(1);
797 fields[TypeFunc::Parms+0] = Type::FLOAT;
798
799 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
800
801 return TypeFunc::make(domain, range);
802 }
803
804 static const TypeFunc* make_Math_D_D_Type() {
805 // create input type (domain)
806 const Type **fields = TypeTuple::fields(2);
807 // Symbol* name of class to be loaded
808 fields[TypeFunc::Parms+0] = Type::DOUBLE;
809 fields[TypeFunc::Parms+1] = Type::HALF;
810 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
811
812 // create result type (range)
813 fields = TypeTuple::fields(2);
814 fields[TypeFunc::Parms+0] = Type::DOUBLE;
815 fields[TypeFunc::Parms+1] = Type::HALF;
816 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
817
818 return TypeFunc::make(domain, range);
819 }
820
821 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) {
822 // create input type (domain)
823 const Type **fields = TypeTuple::fields(num_arg);
824 // Symbol* name of class to be loaded
825 assert(num_arg > 0, "must have at least 1 input");
826 for (uint i = 0; i < num_arg; i++) {
827 fields[TypeFunc::Parms+i] = in_type;
828 }
829 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields);
830
831 // create result type (range)
832 const uint num_ret = 1;
833 fields = TypeTuple::fields(num_ret);
834 fields[TypeFunc::Parms+0] = out_type;
835 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields);
836
837 return TypeFunc::make(domain, range);
838 }
839
840 static const TypeFunc* make_Math_DD_D_Type() {
841 const Type **fields = TypeTuple::fields(4);
842 fields[TypeFunc::Parms+0] = Type::DOUBLE;
843 fields[TypeFunc::Parms+1] = Type::HALF;
844 fields[TypeFunc::Parms+2] = Type::DOUBLE;
845 fields[TypeFunc::Parms+3] = Type::HALF;
846 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
847
848 // create result type (range)
849 fields = TypeTuple::fields(2);
850 fields[TypeFunc::Parms+0] = Type::DOUBLE;
851 fields[TypeFunc::Parms+1] = Type::HALF;
852 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
853
854 return TypeFunc::make(domain, range);
855 }
856
857 //-------------- currentTimeMillis, currentTimeNanos, etc
858
859 static const TypeFunc* make_void_long_Type() {
860 // create input type (domain)
861 const Type **fields = TypeTuple::fields(0);
862 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
863
864 // create result type (range)
865 fields = TypeTuple::fields(2);
866 fields[TypeFunc::Parms+0] = TypeLong::LONG;
867 fields[TypeFunc::Parms+1] = Type::HALF;
868 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
869
870 return TypeFunc::make(domain, range);
871 }
872
873 static const TypeFunc* make_void_void_Type() {
874 // create input type (domain)
875 const Type **fields = TypeTuple::fields(0);
876 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
877
878 // create result type (range)
879 fields = TypeTuple::fields(0);
880 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
881 return TypeFunc::make(domain, range);
882 }
883
884 static const TypeFunc* make_jfr_write_checkpoint_Type() {
885 // create input type (domain)
886 const Type **fields = TypeTuple::fields(0);
887 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
888
889 // create result type (range)
890 fields = TypeTuple::fields(1);
891 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
892 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 1, fields);
893 return TypeFunc::make(domain, range);
894 }
895
896
897 // Takes as parameters:
898 // void *dest
899 // long size
900 // uchar byte
901
902 static const TypeFunc* make_setmemory_Type() {
903 // create input type (domain)
904 int argcnt = NOT_LP64(3) LP64_ONLY(4);
905 const Type** fields = TypeTuple::fields(argcnt);
906 int argp = TypeFunc::Parms;
907 fields[argp++] = TypePtr::NOTNULL; // dest
908 fields[argp++] = TypeX_X; // size
909 LP64_ONLY(fields[argp++] = Type::HALF); // size
910 fields[argp++] = TypeInt::UBYTE; // bytevalue
911 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
912 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
913
914 // no result type needed
915 fields = TypeTuple::fields(1);
916 fields[TypeFunc::Parms+0] = nullptr; // void
917 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
918 return TypeFunc::make(domain, range);
919 }
920
921 // arraycopy stub variations:
922 enum ArrayCopyType {
923 ac_fast, // void(ptr, ptr, size_t)
924 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
925 ac_slow, // void(ptr, int, ptr, int, int)
926 ac_generic // int(ptr, int, ptr, int, int)
927 };
928
929 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
930 // create input type (domain)
931 int num_args = (act == ac_fast ? 3 : 5);
932 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
933 int argcnt = num_args;
934 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
935 const Type** fields = TypeTuple::fields(argcnt);
936 int argp = TypeFunc::Parms;
937 fields[argp++] = TypePtr::NOTNULL; // src
938 if (num_size_args == 0) {
939 fields[argp++] = TypeInt::INT; // src_pos
940 }
941 fields[argp++] = TypePtr::NOTNULL; // dest
942 if (num_size_args == 0) {
943 fields[argp++] = TypeInt::INT; // dest_pos
944 fields[argp++] = TypeInt::INT; // length
945 }
946 while (num_size_args-- > 0) {
947 fields[argp++] = TypeX_X; // size in whatevers (size_t)
948 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
949 }
950 if (act == ac_checkcast) {
951 fields[argp++] = TypePtr::NOTNULL; // super_klass
952 }
953 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
954 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
955
956 // create result type if needed
957 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
958 fields = TypeTuple::fields(1);
959 if (retcnt == 0)
960 fields[TypeFunc::Parms+0] = nullptr; // void
961 else
962 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
963 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
964 return TypeFunc::make(domain, range);
965 }
966
967 static const TypeFunc* make_array_fill_Type() {
968 const Type** fields;
969 int argp = TypeFunc::Parms;
970 // create input type (domain): pointer, int, size_t
971 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
972 fields[argp++] = TypePtr::NOTNULL;
973 fields[argp++] = TypeInt::INT;
974 fields[argp++] = TypeX_X; // size in whatevers (size_t)
975 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
976 const TypeTuple *domain = TypeTuple::make(argp, fields);
977
978 // create result type
979 fields = TypeTuple::fields(1);
980 fields[TypeFunc::Parms+0] = nullptr; // void
981 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
982
983 return TypeFunc::make(domain, range);
984 }
985
986 static const TypeFunc* make_array_partition_Type() {
987 // create input type (domain)
988 int num_args = 7;
989 int argcnt = num_args;
990 const Type** fields = TypeTuple::fields(argcnt);
991 int argp = TypeFunc::Parms;
992 fields[argp++] = TypePtr::NOTNULL; // array
993 fields[argp++] = TypeInt::INT; // element type
994 fields[argp++] = TypeInt::INT; // low
995 fields[argp++] = TypeInt::INT; // end
996 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array)
997 fields[argp++] = TypeInt::INT; // indexPivot1
998 fields[argp++] = TypeInt::INT; // indexPivot2
999 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1000 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1001
1002 // no result type needed
1003 fields = TypeTuple::fields(1);
1004 fields[TypeFunc::Parms+0] = nullptr; // void
1005 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1006 return TypeFunc::make(domain, range);
1007 }
1008
1009 static const TypeFunc* make_array_sort_Type() {
1010 // create input type (domain)
1011 int num_args = 4;
1012 int argcnt = num_args;
1013 const Type** fields = TypeTuple::fields(argcnt);
1014 int argp = TypeFunc::Parms;
1015 fields[argp++] = TypePtr::NOTNULL; // array
1016 fields[argp++] = TypeInt::INT; // element type
1017 fields[argp++] = TypeInt::INT; // fromIndex
1018 fields[argp++] = TypeInt::INT; // toIndex
1019 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1020 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1021
1022 // no result type needed
1023 fields = TypeTuple::fields(1);
1024 fields[TypeFunc::Parms+0] = nullptr; // void
1025 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1026 return TypeFunc::make(domain, range);
1027 }
1028
1029 static const TypeFunc* make_aescrypt_block_Type() {
1030 // create input type (domain)
1031 int num_args = 3;
1032 int argcnt = num_args;
1033 const Type** fields = TypeTuple::fields(argcnt);
1034 int argp = TypeFunc::Parms;
1035 fields[argp++] = TypePtr::NOTNULL; // src
1036 fields[argp++] = TypePtr::NOTNULL; // dest
1037 fields[argp++] = TypePtr::NOTNULL; // k array
1038 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1039 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1040
1041 // no result type needed
1042 fields = TypeTuple::fields(1);
1043 fields[TypeFunc::Parms+0] = nullptr; // void
1044 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1045 return TypeFunc::make(domain, range);
1046 }
1047
1048 static const TypeFunc* make_updateBytesCRC32_Type() {
1049 // create input type (domain)
1050 int num_args = 3;
1051 int argcnt = num_args;
1052 const Type** fields = TypeTuple::fields(argcnt);
1053 int argp = TypeFunc::Parms;
1054 fields[argp++] = TypeInt::INT; // crc
1055 fields[argp++] = TypePtr::NOTNULL; // src
1056 fields[argp++] = TypeInt::INT; // len
1057 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1058 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1059
1060 // result type needed
1061 fields = TypeTuple::fields(1);
1062 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1063 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1064 return TypeFunc::make(domain, range);
1065 }
1066
1067 static const TypeFunc* make_updateBytesCRC32C_Type() {
1068 // create input type (domain)
1069 int num_args = 4;
1070 int argcnt = num_args;
1071 const Type** fields = TypeTuple::fields(argcnt);
1072 int argp = TypeFunc::Parms;
1073 fields[argp++] = TypeInt::INT; // crc
1074 fields[argp++] = TypePtr::NOTNULL; // buf
1075 fields[argp++] = TypeInt::INT; // len
1076 fields[argp++] = TypePtr::NOTNULL; // table
1077 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1078 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1079
1080 // result type needed
1081 fields = TypeTuple::fields(1);
1082 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1083 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1084 return TypeFunc::make(domain, range);
1085 }
1086
1087 static const TypeFunc* make_updateBytesAdler32_Type() {
1088 // create input type (domain)
1089 int num_args = 3;
1090 int argcnt = num_args;
1091 const Type** fields = TypeTuple::fields(argcnt);
1092 int argp = TypeFunc::Parms;
1093 fields[argp++] = TypeInt::INT; // crc
1094 fields[argp++] = TypePtr::NOTNULL; // src + offset
1095 fields[argp++] = TypeInt::INT; // len
1096 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1097 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1098
1099 // result type needed
1100 fields = TypeTuple::fields(1);
1101 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1102 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1103 return TypeFunc::make(domain, range);
1104 }
1105
1106 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() {
1107 // create input type (domain)
1108 int num_args = 5;
1109 int argcnt = num_args;
1110 const Type** fields = TypeTuple::fields(argcnt);
1111 int argp = TypeFunc::Parms;
1112 fields[argp++] = TypePtr::NOTNULL; // src
1113 fields[argp++] = TypePtr::NOTNULL; // dest
1114 fields[argp++] = TypePtr::NOTNULL; // k array
1115 fields[argp++] = TypePtr::NOTNULL; // r array
1116 fields[argp++] = TypeInt::INT; // src len
1117 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1118 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1119
1120 // returning cipher len (int)
1121 fields = TypeTuple::fields(1);
1122 fields[TypeFunc::Parms+0] = TypeInt::INT;
1123 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1124 return TypeFunc::make(domain, range);
1125 }
1126
1127 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() {
1128 // create input type (domain)
1129 int num_args = 4;
1130 int argcnt = num_args;
1131 const Type** fields = TypeTuple::fields(argcnt);
1132 int argp = TypeFunc::Parms;
1133 fields[argp++] = TypePtr::NOTNULL; // src
1134 fields[argp++] = TypePtr::NOTNULL; // dest
1135 fields[argp++] = TypePtr::NOTNULL; // k array
1136 fields[argp++] = TypeInt::INT; // src len
1137 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1138 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1139
1140 // returning cipher len (int)
1141 fields = TypeTuple::fields(1);
1142 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1143 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1144 return TypeFunc::make(domain, range);
1145 }
1146
1147 static const TypeFunc* make_counterMode_aescrypt_Type() {
1148 // create input type (domain)
1149 int num_args = 7;
1150 int argcnt = num_args;
1151 const Type** fields = TypeTuple::fields(argcnt);
1152 int argp = TypeFunc::Parms;
1153 fields[argp++] = TypePtr::NOTNULL; // src
1154 fields[argp++] = TypePtr::NOTNULL; // dest
1155 fields[argp++] = TypePtr::NOTNULL; // k array
1156 fields[argp++] = TypePtr::NOTNULL; // counter array
1157 fields[argp++] = TypeInt::INT; // src len
1158 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
1159 fields[argp++] = TypePtr::NOTNULL; // saved used addr
1160 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1161 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1162 // returning cipher len (int)
1163 fields = TypeTuple::fields(1);
1164 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1165 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1166 return TypeFunc::make(domain, range);
1167 }
1168
1169 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() {
1170 // create input type (domain)
1171 int num_args = 8;
1172 int argcnt = num_args;
1173 const Type** fields = TypeTuple::fields(argcnt);
1174 int argp = TypeFunc::Parms;
1175 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs
1176 fields[argp++] = TypeInt::INT; // int len
1177 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs
1178 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs
1179 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj
1180 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj
1181 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj
1182 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj
1183
1184 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1185 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1186 // returning cipher len (int)
1187 fields = TypeTuple::fields(1);
1188 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1189 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1190 return TypeFunc::make(domain, range);
1191 }
1192
1193 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) {
1194 // create input type (domain)
1195 int num_args = is_sha3 ? 3 : 2;
1196 int argcnt = num_args;
1197 const Type** fields = TypeTuple::fields(argcnt);
1198 int argp = TypeFunc::Parms;
1199 fields[argp++] = TypePtr::NOTNULL; // buf
1200 fields[argp++] = TypePtr::NOTNULL; // state
1201 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1202 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1203 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1204
1205 // no result type needed
1206 fields = TypeTuple::fields(1);
1207 fields[TypeFunc::Parms+0] = nullptr; // void
1208 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1209 return TypeFunc::make(domain, range);
1210 }
1211
1212 /*
1213 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
1214 */
1215 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) {
1216 // create input type (domain)
1217 int num_args = is_sha3 ? 5 : 4;
1218 int argcnt = num_args;
1219 const Type** fields = TypeTuple::fields(argcnt);
1220 int argp = TypeFunc::Parms;
1221 fields[argp++] = TypePtr::NOTNULL; // buf
1222 fields[argp++] = TypePtr::NOTNULL; // state
1223 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1224 fields[argp++] = TypeInt::INT; // ofs
1225 fields[argp++] = TypeInt::INT; // limit
1226 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1227 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1228
1229 // returning ofs (int)
1230 fields = TypeTuple::fields(1);
1231 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1232 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1233 return TypeFunc::make(domain, range);
1234 }
1235
1236 // SHAKE128Parallel doubleKeccak function
1237 static const TypeFunc* make_double_keccak_Type() {
1238 int argcnt = 2;
1239
1240 const Type** fields = TypeTuple::fields(argcnt);
1241 int argp = TypeFunc::Parms;
1242 fields[argp++] = TypePtr::NOTNULL; // status0
1243 fields[argp++] = TypePtr::NOTNULL; // status1
1244
1245 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1246 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1247
1248 // result type needed
1249 fields = TypeTuple::fields(1);
1250 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1251 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1252 return TypeFunc::make(domain, range);
1253 }
1254
1255 static const TypeFunc* make_multiplyToLen_Type() {
1256 // create input type (domain)
1257 int num_args = 5;
1258 int argcnt = num_args;
1259 const Type** fields = TypeTuple::fields(argcnt);
1260 int argp = TypeFunc::Parms;
1261 fields[argp++] = TypePtr::NOTNULL; // x
1262 fields[argp++] = TypeInt::INT; // xlen
1263 fields[argp++] = TypePtr::NOTNULL; // y
1264 fields[argp++] = TypeInt::INT; // ylen
1265 fields[argp++] = TypePtr::NOTNULL; // z
1266 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1267 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1268
1269 // no result type needed
1270 fields = TypeTuple::fields(1);
1271 fields[TypeFunc::Parms+0] = nullptr;
1272 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1273 return TypeFunc::make(domain, range);
1274 }
1275
1276 static const TypeFunc* make_squareToLen_Type() {
1277 // create input type (domain)
1278 int num_args = 4;
1279 int argcnt = num_args;
1280 const Type** fields = TypeTuple::fields(argcnt);
1281 int argp = TypeFunc::Parms;
1282 fields[argp++] = TypePtr::NOTNULL; // x
1283 fields[argp++] = TypeInt::INT; // len
1284 fields[argp++] = TypePtr::NOTNULL; // z
1285 fields[argp++] = TypeInt::INT; // zlen
1286 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1287 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1288
1289 // no result type needed
1290 fields = TypeTuple::fields(1);
1291 fields[TypeFunc::Parms+0] = nullptr;
1292 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1293 return TypeFunc::make(domain, range);
1294 }
1295
1296 static const TypeFunc* make_mulAdd_Type() {
1297 // create input type (domain)
1298 int num_args = 5;
1299 int argcnt = num_args;
1300 const Type** fields = TypeTuple::fields(argcnt);
1301 int argp = TypeFunc::Parms;
1302 fields[argp++] = TypePtr::NOTNULL; // out
1303 fields[argp++] = TypePtr::NOTNULL; // in
1304 fields[argp++] = TypeInt::INT; // offset
1305 fields[argp++] = TypeInt::INT; // len
1306 fields[argp++] = TypeInt::INT; // k
1307 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1308 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1309
1310 // returning carry (int)
1311 fields = TypeTuple::fields(1);
1312 fields[TypeFunc::Parms+0] = TypeInt::INT;
1313 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1314 return TypeFunc::make(domain, range);
1315 }
1316
1317 static const TypeFunc* make_montgomeryMultiply_Type() {
1318 // create input type (domain)
1319 int num_args = 7;
1320 int argcnt = num_args;
1321 const Type** fields = TypeTuple::fields(argcnt);
1322 int argp = TypeFunc::Parms;
1323 fields[argp++] = TypePtr::NOTNULL; // a
1324 fields[argp++] = TypePtr::NOTNULL; // b
1325 fields[argp++] = TypePtr::NOTNULL; // n
1326 fields[argp++] = TypeInt::INT; // len
1327 fields[argp++] = TypeLong::LONG; // inv
1328 fields[argp++] = Type::HALF;
1329 fields[argp++] = TypePtr::NOTNULL; // result
1330 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1331 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1332
1333 // result type needed
1334 fields = TypeTuple::fields(1);
1335 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1336
1337 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1338 return TypeFunc::make(domain, range);
1339 }
1340
1341 static const TypeFunc* make_montgomerySquare_Type() {
1342 // create input type (domain)
1343 int num_args = 6;
1344 int argcnt = num_args;
1345 const Type** fields = TypeTuple::fields(argcnt);
1346 int argp = TypeFunc::Parms;
1347 fields[argp++] = TypePtr::NOTNULL; // a
1348 fields[argp++] = TypePtr::NOTNULL; // n
1349 fields[argp++] = TypeInt::INT; // len
1350 fields[argp++] = TypeLong::LONG; // inv
1351 fields[argp++] = Type::HALF;
1352 fields[argp++] = TypePtr::NOTNULL; // result
1353 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1354 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1355
1356 // result type needed
1357 fields = TypeTuple::fields(1);
1358 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1359
1360 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1361 return TypeFunc::make(domain, range);
1362 }
1363
1364 static const TypeFunc* make_bigIntegerShift_Type() {
1365 int argcnt = 5;
1366 const Type** fields = TypeTuple::fields(argcnt);
1367 int argp = TypeFunc::Parms;
1368 fields[argp++] = TypePtr::NOTNULL; // newArr
1369 fields[argp++] = TypePtr::NOTNULL; // oldArr
1370 fields[argp++] = TypeInt::INT; // newIdx
1371 fields[argp++] = TypeInt::INT; // shiftCount
1372 fields[argp++] = TypeInt::INT; // numIter
1373 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1374 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1375
1376 // no result type needed
1377 fields = TypeTuple::fields(1);
1378 fields[TypeFunc::Parms + 0] = nullptr;
1379 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1380 return TypeFunc::make(domain, range);
1381 }
1382
1383 static const TypeFunc* make_vectorizedMismatch_Type() {
1384 // create input type (domain)
1385 int num_args = 4;
1386 int argcnt = num_args;
1387 const Type** fields = TypeTuple::fields(argcnt);
1388 int argp = TypeFunc::Parms;
1389 fields[argp++] = TypePtr::NOTNULL; // obja
1390 fields[argp++] = TypePtr::NOTNULL; // objb
1391 fields[argp++] = TypeInt::INT; // length, number of elements
1392 fields[argp++] = TypeInt::INT; // log2scale, element size
1393 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1394 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1395
1396 //return mismatch index (int)
1397 fields = TypeTuple::fields(1);
1398 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1399 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1400 return TypeFunc::make(domain, range);
1401 }
1402
1403 static const TypeFunc* make_ghash_processBlocks_Type() {
1404 int argcnt = 4;
1405
1406 const Type** fields = TypeTuple::fields(argcnt);
1407 int argp = TypeFunc::Parms;
1408 fields[argp++] = TypePtr::NOTNULL; // state
1409 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1410 fields[argp++] = TypePtr::NOTNULL; // data
1411 fields[argp++] = TypeInt::INT; // blocks
1412 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1413 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1414
1415 // result type needed
1416 fields = TypeTuple::fields(1);
1417 fields[TypeFunc::Parms+0] = nullptr; // void
1418 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1419 return TypeFunc::make(domain, range);
1420 }
1421
1422 static const TypeFunc* make_chacha20Block_Type() {
1423 int argcnt = 2;
1424
1425 const Type** fields = TypeTuple::fields(argcnt);
1426 int argp = TypeFunc::Parms;
1427 fields[argp++] = TypePtr::NOTNULL; // state
1428 fields[argp++] = TypePtr::NOTNULL; // result
1429
1430 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1431 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1432
1433 // result type needed
1434 fields = TypeTuple::fields(1);
1435 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int
1436 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1437 return TypeFunc::make(domain, range);
1438 }
1439
1440 // Kyber NTT function
1441 static const TypeFunc* make_kyberNtt_Type() {
1442 int argcnt = 2;
1443
1444 const Type** fields = TypeTuple::fields(argcnt);
1445 int argp = TypeFunc::Parms;
1446 fields[argp++] = TypePtr::NOTNULL; // coeffs
1447 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1448
1449 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1450 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1451
1452 // result type needed
1453 fields = TypeTuple::fields(1);
1454 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1455 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1456 return TypeFunc::make(domain, range);
1457 }
1458
1459 // Kyber inverse NTT function
1460 static const TypeFunc* make_kyberInverseNtt_Type() {
1461 int argcnt = 2;
1462
1463 const Type** fields = TypeTuple::fields(argcnt);
1464 int argp = TypeFunc::Parms;
1465 fields[argp++] = TypePtr::NOTNULL; // coeffs
1466 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1467
1468 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1469 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1470
1471 // result type needed
1472 fields = TypeTuple::fields(1);
1473 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1474 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1475 return TypeFunc::make(domain, range);
1476 }
1477
1478 // Kyber NTT multiply function
1479 static const TypeFunc* make_kyberNttMult_Type() {
1480 int argcnt = 4;
1481
1482 const Type** fields = TypeTuple::fields(argcnt);
1483 int argp = TypeFunc::Parms;
1484 fields[argp++] = TypePtr::NOTNULL; // result
1485 fields[argp++] = TypePtr::NOTNULL; // ntta
1486 fields[argp++] = TypePtr::NOTNULL; // nttb
1487 fields[argp++] = TypePtr::NOTNULL; // NTT multiply zetas
1488
1489 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1490 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1491
1492 // result type needed
1493 fields = TypeTuple::fields(1);
1494 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1495 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1496 return TypeFunc::make(domain, range);
1497 }
1498
1499 // Kyber add 2 polynomials function
1500 static const TypeFunc* make_kyberAddPoly_2_Type() {
1501 int argcnt = 3;
1502
1503 const Type** fields = TypeTuple::fields(argcnt);
1504 int argp = TypeFunc::Parms;
1505 fields[argp++] = TypePtr::NOTNULL; // result
1506 fields[argp++] = TypePtr::NOTNULL; // a
1507 fields[argp++] = TypePtr::NOTNULL; // b
1508
1509 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1510 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1511
1512 // result type needed
1513 fields = TypeTuple::fields(1);
1514 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1515 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1516 return TypeFunc::make(domain, range);
1517 }
1518
1519
1520 // Kyber add 3 polynomials function
1521 static const TypeFunc* make_kyberAddPoly_3_Type() {
1522 int argcnt = 4;
1523
1524 const Type** fields = TypeTuple::fields(argcnt);
1525 int argp = TypeFunc::Parms;
1526 fields[argp++] = TypePtr::NOTNULL; // result
1527 fields[argp++] = TypePtr::NOTNULL; // a
1528 fields[argp++] = TypePtr::NOTNULL; // b
1529 fields[argp++] = TypePtr::NOTNULL; // c
1530
1531 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1532 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1533
1534 // result type needed
1535 fields = TypeTuple::fields(1);
1536 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1537 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1538 return TypeFunc::make(domain, range);
1539 }
1540
1541
1542 // Kyber XOF output parsing into polynomial coefficients candidates
1543 // or decompress(12,...) function
1544 static const TypeFunc* make_kyber12To16_Type() {
1545 int argcnt = 4;
1546
1547 const Type** fields = TypeTuple::fields(argcnt);
1548 int argp = TypeFunc::Parms;
1549 fields[argp++] = TypePtr::NOTNULL; // condensed
1550 fields[argp++] = TypeInt::INT; // condensedOffs
1551 fields[argp++] = TypePtr::NOTNULL; // parsed
1552 fields[argp++] = TypeInt::INT; // parsedLength
1553
1554 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1555 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1556
1557 // result type needed
1558 fields = TypeTuple::fields(1);
1559 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1560 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1561 return TypeFunc::make(domain, range);
1562 }
1563
1564 // Kyber Barrett reduce function
1565 static const TypeFunc* make_kyberBarrettReduce_Type() {
1566 int argcnt = 1;
1567
1568 const Type** fields = TypeTuple::fields(argcnt);
1569 int argp = TypeFunc::Parms;
1570 fields[argp++] = TypePtr::NOTNULL; // coeffs
1571
1572 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1573 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1574
1575 // result type needed
1576 fields = TypeTuple::fields(1);
1577 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1578 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1579 return TypeFunc::make(domain, range);
1580 }
1581
1582 // Dilithium NTT function except for the final "normalization" to |coeff| < Q
1583 static const TypeFunc* make_dilithiumAlmostNtt_Type() {
1584 int argcnt = 2;
1585
1586 const Type** fields = TypeTuple::fields(argcnt);
1587 int argp = TypeFunc::Parms;
1588 fields[argp++] = TypePtr::NOTNULL; // coeffs
1589 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1590
1591 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1592 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1593
1594 // result type needed
1595 fields = TypeTuple::fields(1);
1596 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1597 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1598 return TypeFunc::make(domain, range);
1599 }
1600
1601 // Dilithium inverse NTT function except the final mod Q division by 2^256
1602 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() {
1603 int argcnt = 2;
1604
1605 const Type** fields = TypeTuple::fields(argcnt);
1606 int argp = TypeFunc::Parms;
1607 fields[argp++] = TypePtr::NOTNULL; // coeffs
1608 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1609
1610 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1611 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1612
1613 // result type needed
1614 fields = TypeTuple::fields(1);
1615 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1616 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1617 return TypeFunc::make(domain, range);
1618 }
1619
1620 // Dilithium NTT multiply function
1621 static const TypeFunc* make_dilithiumNttMult_Type() {
1622 int argcnt = 3;
1623
1624 const Type** fields = TypeTuple::fields(argcnt);
1625 int argp = TypeFunc::Parms;
1626 fields[argp++] = TypePtr::NOTNULL; // result
1627 fields[argp++] = TypePtr::NOTNULL; // ntta
1628 fields[argp++] = TypePtr::NOTNULL; // nttb
1629
1630 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1631 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1632
1633 // result type needed
1634 fields = TypeTuple::fields(1);
1635 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1636 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1637 return TypeFunc::make(domain, range);
1638 }
1639
1640 // Dilithium Montgomery multiply a polynome coefficient array by a constant
1641 static const TypeFunc* make_dilithiumMontMulByConstant_Type() {
1642 int argcnt = 2;
1643
1644 const Type** fields = TypeTuple::fields(argcnt);
1645 int argp = TypeFunc::Parms;
1646 fields[argp++] = TypePtr::NOTNULL; // coeffs
1647 fields[argp++] = TypeInt::INT; // constant multiplier
1648
1649 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1650 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1651
1652 // result type needed
1653 fields = TypeTuple::fields(1);
1654 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1655 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1656 return TypeFunc::make(domain, range);
1657 }
1658
1659 // Dilithium decompose polynomial
1660 static const TypeFunc* make_dilithiumDecomposePoly_Type() {
1661 int argcnt = 5;
1662
1663 const Type** fields = TypeTuple::fields(argcnt);
1664 int argp = TypeFunc::Parms;
1665 fields[argp++] = TypePtr::NOTNULL; // input
1666 fields[argp++] = TypePtr::NOTNULL; // lowPart
1667 fields[argp++] = TypePtr::NOTNULL; // highPart
1668 fields[argp++] = TypeInt::INT; // 2 * gamma2
1669 fields[argp++] = TypeInt::INT; // multiplier
1670
1671 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1672 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1673
1674 // result type needed
1675 fields = TypeTuple::fields(1);
1676 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1677 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1678 return TypeFunc::make(domain, range);
1679 }
1680
1681 static const TypeFunc* make_base64_encodeBlock_Type() {
1682 int argcnt = 6;
1683
1684 const Type** fields = TypeTuple::fields(argcnt);
1685 int argp = TypeFunc::Parms;
1686 fields[argp++] = TypePtr::NOTNULL; // src array
1687 fields[argp++] = TypeInt::INT; // offset
1688 fields[argp++] = TypeInt::INT; // length
1689 fields[argp++] = TypePtr::NOTNULL; // dest array
1690 fields[argp++] = TypeInt::INT; // dp
1691 fields[argp++] = TypeInt::BOOL; // isURL
1692 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1693 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1694
1695 // result type needed
1696 fields = TypeTuple::fields(1);
1697 fields[TypeFunc::Parms + 0] = nullptr; // void
1698 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1699 return TypeFunc::make(domain, range);
1700 }
1701
1702 static const TypeFunc* make_string_IndexOf_Type() {
1703 int argcnt = 4;
1704
1705 const Type** fields = TypeTuple::fields(argcnt);
1706 int argp = TypeFunc::Parms;
1707 fields[argp++] = TypePtr::NOTNULL; // haystack array
1708 fields[argp++] = TypeInt::INT; // haystack length
1709 fields[argp++] = TypePtr::NOTNULL; // needle array
1710 fields[argp++] = TypeInt::INT; // needle length
1711 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1712 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1713
1714 // result type needed
1715 fields = TypeTuple::fields(1);
1716 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack
1717 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1718 return TypeFunc::make(domain, range);
1719 }
1720
1721 static const TypeFunc* make_base64_decodeBlock_Type() {
1722 int argcnt = 7;
1723
1724 const Type** fields = TypeTuple::fields(argcnt);
1725 int argp = TypeFunc::Parms;
1726 fields[argp++] = TypePtr::NOTNULL; // src array
1727 fields[argp++] = TypeInt::INT; // src offset
1728 fields[argp++] = TypeInt::INT; // src length
1729 fields[argp++] = TypePtr::NOTNULL; // dest array
1730 fields[argp++] = TypeInt::INT; // dest offset
1731 fields[argp++] = TypeInt::BOOL; // isURL
1732 fields[argp++] = TypeInt::BOOL; // isMIME
1733 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1734 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1735
1736 // result type needed
1737 fields = TypeTuple::fields(1);
1738 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst
1739 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1740 return TypeFunc::make(domain, range);
1741 }
1742
1743 static const TypeFunc* make_poly1305_processBlocks_Type() {
1744 int argcnt = 4;
1745
1746 const Type** fields = TypeTuple::fields(argcnt);
1747 int argp = TypeFunc::Parms;
1748 fields[argp++] = TypePtr::NOTNULL; // input array
1749 fields[argp++] = TypeInt::INT; // input length
1750 fields[argp++] = TypePtr::NOTNULL; // accumulator array
1751 fields[argp++] = TypePtr::NOTNULL; // r array
1752 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1753 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1754
1755 // result type needed
1756 fields = TypeTuple::fields(1);
1757 fields[TypeFunc::Parms + 0] = nullptr; // void
1758 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1759 return TypeFunc::make(domain, range);
1760 }
1761
1762 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() {
1763 int argcnt = 3;
1764
1765 const Type** fields = TypeTuple::fields(argcnt);
1766 int argp = TypeFunc::Parms;
1767 fields[argp++] = TypePtr::NOTNULL; // a array
1768 fields[argp++] = TypePtr::NOTNULL; // b array
1769 fields[argp++] = TypePtr::NOTNULL; // r(esult) array
1770 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1771 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1772
1773 // result type needed
1774 fields = TypeTuple::fields(1);
1775 fields[TypeFunc::Parms + 0] = nullptr; // void
1776 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1777 return TypeFunc::make(domain, range);
1778 }
1779
1780 static const TypeFunc* make_intpoly_assign_Type() {
1781 int argcnt = 4;
1782
1783 const Type** fields = TypeTuple::fields(argcnt);
1784 int argp = TypeFunc::Parms;
1785 fields[argp++] = TypeInt::INT; // set flag
1786 fields[argp++] = TypePtr::NOTNULL; // a array (result)
1787 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set)
1788 fields[argp++] = TypeInt::INT; // array length
1789 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1790 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1791
1792 // result type needed
1793 fields = TypeTuple::fields(1);
1794 fields[TypeFunc::Parms + 0] = nullptr; // void
1795 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1796 return TypeFunc::make(domain, range);
1797 }
1798
1799 //------------- Interpreter state for on stack replacement
1800 static const TypeFunc* make_osr_end_Type() {
1801 // create input type (domain)
1802 const Type **fields = TypeTuple::fields(1);
1803 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1804 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1805
1806 // create result type
1807 fields = TypeTuple::fields(1);
1808 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1809 fields[TypeFunc::Parms+0] = nullptr; // void
1810 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1811 return TypeFunc::make(domain, range);
1812 }
1813
1814 #ifndef PRODUCT
1815 static void debug_print_convert_type(const Type** fields, int* argp, Node *parm) {
1816 const BasicType bt = parm->bottom_type()->basic_type();
1817 fields[(*argp)++] = Type::get_const_basic_type(bt);
1818 if (bt == T_LONG || bt == T_DOUBLE) {
1819 fields[(*argp)++] = Type::HALF;
1820 }
1821 }
1822
1823 static void update_arg_cnt(const Node* parm, int* arg_cnt) {
1824 (*arg_cnt)++;
1825 const BasicType bt = parm->bottom_type()->basic_type();
1826 if (bt == T_LONG || bt == T_DOUBLE) {
1827 (*arg_cnt)++;
1828 }
1829 }
1830
1831 const TypeFunc* OptoRuntime::debug_print_Type(Node* parm0, Node* parm1,
1832 Node* parm2, Node* parm3,
1833 Node* parm4, Node* parm5,
1834 Node* parm6) {
1835 int argcnt = 1;
1836 if (parm0 != nullptr) { update_arg_cnt(parm0, &argcnt);
1837 if (parm1 != nullptr) { update_arg_cnt(parm1, &argcnt);
1838 if (parm2 != nullptr) { update_arg_cnt(parm2, &argcnt);
1839 if (parm3 != nullptr) { update_arg_cnt(parm3, &argcnt);
1840 if (parm4 != nullptr) { update_arg_cnt(parm4, &argcnt);
1841 if (parm5 != nullptr) { update_arg_cnt(parm5, &argcnt);
1842 if (parm6 != nullptr) { update_arg_cnt(parm6, &argcnt);
1843 /* close each nested if ===> */ } } } } } } }
1844
1845 // create input type (domain)
1846 const Type** fields = TypeTuple::fields(argcnt);
1847 int argp = TypeFunc::Parms;
1848 fields[argp++] = TypePtr::NOTNULL; // static string pointer
1849
1850 if (parm0 != nullptr) { debug_print_convert_type(fields, &argp, parm0);
1851 if (parm1 != nullptr) { debug_print_convert_type(fields, &argp, parm1);
1852 if (parm2 != nullptr) { debug_print_convert_type(fields, &argp, parm2);
1853 if (parm3 != nullptr) { debug_print_convert_type(fields, &argp, parm3);
1854 if (parm4 != nullptr) { debug_print_convert_type(fields, &argp, parm4);
1855 if (parm5 != nullptr) { debug_print_convert_type(fields, &argp, parm5);
1856 if (parm6 != nullptr) { debug_print_convert_type(fields, &argp, parm6);
1857 /* close each nested if ===> */ } } } } } } }
1858
1859 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1860 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1861
1862 // no result type needed
1863 fields = TypeTuple::fields(1);
1864 fields[TypeFunc::Parms+0] = nullptr; // void
1865 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1866 return TypeFunc::make(domain, range);
1867 }
1868 #endif // PRODUCT
1869
1870 //-------------------------------------------------------------------------------------
1871 // register policy
1872
1873 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1874 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1875 switch (register_save_policy[reg]) {
1876 case 'C': return false; //SOC
1877 case 'E': return true ; //SOE
1878 case 'N': return false; //NS
1879 case 'A': return false; //AS
1880 }
1881 ShouldNotReachHere();
1882 return false;
1883 }
1884
1885 //-----------------------------------------------------------------------
1886 // Exceptions
1887 //
1888
1889 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1890
1891 // The method is an entry that is always called by a C++ method not
1892 // directly from compiled code. Compiled code will call the C++ method following.
1893 // We can't allow async exception to be installed during exception processing.
1894 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1895 // The frame we rethrow the exception to might not have been processed by the GC yet.
1896 // The stack watermark barrier takes care of detecting that and ensuring the frame
1897 // has updated oops.
1898 StackWatermarkSet::after_unwind(current);
1899
1900 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
1901
1902 // Do not confuse exception_oop with pending_exception. The exception_oop
1903 // is only used to pass arguments into the method. Not for general
1904 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1905 // the runtime stubs checks this on exit.
1906 assert(current->exception_oop() != nullptr, "exception oop is found");
1907 address handler_address = nullptr;
1908
1909 Handle exception(current, current->exception_oop());
1910 address pc = current->exception_pc();
1911
1912 // Clear out the exception oop and pc since looking up an
1913 // exception handler can cause class loading, which might throw an
1914 // exception and those fields are expected to be clear during
1915 // normal bytecode execution.
1916 current->clear_exception_oop_and_pc();
1917
1918 LogTarget(Info, exceptions) lt;
1919 if (lt.is_enabled()) {
1920 LogStream ls(lt);
1921 trace_exception(&ls, exception(), pc, "");
1922 }
1923
1924 // for AbortVMOnException flag
1925 Exceptions::debug_check_abort(exception);
1926
1927 #ifdef ASSERT
1928 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1929 // should throw an exception here
1930 ShouldNotReachHere();
1931 }
1932 #endif
1933
1934 // new exception handling: this method is entered only from adapters
1935 // exceptions from compiled java methods are handled in compiled code
1936 // using rethrow node
1937
1938 nm = CodeCache::find_nmethod(pc);
1939 assert(nm != nullptr, "No NMethod found");
1940 if (nm->is_native_method()) {
1941 fatal("Native method should not have path to exception handling");
1942 } else {
1943 // we are switching to old paradigm: search for exception handler in caller_frame
1944 // instead in exception handler of caller_frame.sender()
1945
1946 if (JvmtiExport::can_post_on_exceptions()) {
1947 // "Full-speed catching" is not necessary here,
1948 // since we're notifying the VM on every catch.
1949 // Force deoptimization and the rest of the lookup
1950 // will be fine.
1951 deoptimize_caller_frame(current);
1952 }
1953
1954 // Check the stack guard pages. If enabled, look for handler in this frame;
1955 // otherwise, forcibly unwind the frame.
1956 //
1957 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1958 bool force_unwind = !current->stack_overflow_state()->reguard_stack();
1959 bool deopting = false;
1960 if (nm->is_deopt_pc(pc)) {
1961 deopting = true;
1962 RegisterMap map(current,
1963 RegisterMap::UpdateMap::skip,
1964 RegisterMap::ProcessFrames::include,
1965 RegisterMap::WalkContinuation::skip);
1966 frame deoptee = current->last_frame().sender(&map);
1967 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1968 // Adjust the pc back to the original throwing pc
1969 pc = deoptee.pc();
1970 }
1971
1972 // If we are forcing an unwind because of stack overflow then deopt is
1973 // irrelevant since we are throwing the frame away anyway.
1974
1975 if (deopting && !force_unwind) {
1976 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1977 } else {
1978
1979 handler_address =
1980 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
1981
1982 if (handler_address == nullptr) {
1983 bool recursive_exception = false;
1984 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1985 assert (handler_address != nullptr, "must have compiled handler");
1986 // Update the exception cache only when the unwind was not forced
1987 // and there didn't happen another exception during the computation of the
1988 // compiled exception handler. Checking for exception oop equality is not
1989 // sufficient because some exceptions are pre-allocated and reused.
1990 if (!force_unwind && !recursive_exception) {
1991 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
1992 }
1993 } else {
1994 #ifdef ASSERT
1995 bool recursive_exception = false;
1996 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1997 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
1998 p2i(handler_address), p2i(computed_address));
1999 #endif
2000 }
2001 }
2002
2003 current->set_exception_pc(pc);
2004 current->set_exception_handler_pc(handler_address);
2005 }
2006
2007 // Restore correct return pc. Was saved above.
2008 current->set_exception_oop(exception());
2009 return handler_address;
2010
2011 JRT_END
2012
2013 // We are entering here from exception_blob
2014 // If there is a compiled exception handler in this method, we will continue there;
2015 // otherwise we will unwind the stack and continue at the caller of top frame method
2016 // Note we enter without the usual JRT wrapper. We will call a helper routine that
2017 // will do the normal VM entry. We do it this way so that we can see if the nmethod
2018 // we looked up the handler for has been deoptimized in the meantime. If it has been
2019 // we must not use the handler and instead return the deopt blob.
2020 address OptoRuntime::handle_exception_C(JavaThread* current) {
2021 //
2022 // We are in Java not VM and in debug mode we have a NoHandleMark
2023 //
2024 #ifndef PRODUCT
2025 SharedRuntime::_find_handler_ctr++; // find exception handler
2026 #endif
2027 DEBUG_ONLY(NoHandleMark __hm;)
2028 nmethod* nm = nullptr;
2029 address handler_address = nullptr;
2030 {
2031 // Enter the VM
2032
2033 ResetNoHandleMark rnhm;
2034 handler_address = handle_exception_C_helper(current, nm);
2035 }
2036
2037 // Back in java: Use no oops, DON'T safepoint
2038
2039 // Now check to see if the handler we are returning is in a now
2040 // deoptimized frame
2041
2042 if (nm != nullptr) {
2043 RegisterMap map(current,
2044 RegisterMap::UpdateMap::skip,
2045 RegisterMap::ProcessFrames::skip,
2046 RegisterMap::WalkContinuation::skip);
2047 frame caller = current->last_frame().sender(&map);
2048 #ifdef ASSERT
2049 assert(caller.is_compiled_frame(), "must be");
2050 #endif // ASSERT
2051 if (caller.is_deoptimized_frame()) {
2052 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
2053 }
2054 }
2055 return handler_address;
2056 }
2057
2058 //------------------------------rethrow----------------------------------------
2059 // We get here after compiled code has executed a 'RethrowNode'. The callee
2060 // is either throwing or rethrowing an exception. The callee-save registers
2061 // have been restored, synchronized objects have been unlocked and the callee
2062 // stack frame has been removed. The return address was passed in.
2063 // Exception oop is passed as the 1st argument. This routine is then called
2064 // from the stub. On exit, we know where to jump in the caller's code.
2065 // After this C code exits, the stub will pop his frame and end in a jump
2066 // (instead of a return). We enter the caller's default handler.
2067 //
2068 // This must be JRT_LEAF:
2069 // - caller will not change its state as we cannot block on exit,
2070 // therefore raw_exception_handler_for_return_address is all it takes
2071 // to handle deoptimized blobs
2072 //
2073 // However, there needs to be a safepoint check in the middle! So compiled
2074 // safepoints are completely watertight.
2075 //
2076 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier.
2077 //
2078 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
2079 //
2080 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
2081 // ret_pc will have been loaded from the stack, so for AArch64 will be signed.
2082 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc));
2083
2084 #ifndef PRODUCT
2085 SharedRuntime::_rethrow_ctr++; // count rethrows
2086 #endif
2087 assert (exception != nullptr, "should have thrown a NullPointerException");
2088 #ifdef ASSERT
2089 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
2090 // should throw an exception here
2091 ShouldNotReachHere();
2092 }
2093 #endif
2094
2095 thread->set_vm_result_oop(exception);
2096 // Frame not compiled (handles deoptimization blob)
2097 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
2098 }
2099
2100 static const TypeFunc* make_rethrow_Type() {
2101 // create input type (domain)
2102 const Type **fields = TypeTuple::fields(1);
2103 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2104 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2105
2106 // create result type (range)
2107 fields = TypeTuple::fields(1);
2108 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2109 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
2110
2111 return TypeFunc::make(domain, range);
2112 }
2113
2114
2115 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
2116 // Deoptimize the caller before continuing, as the compiled
2117 // exception handler table may not be valid.
2118 if (DeoptimizeOnAllocationException && doit) {
2119 deoptimize_caller_frame(thread);
2120 }
2121 }
2122
2123 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
2124 // Called from within the owner thread, so no need for safepoint
2125 RegisterMap reg_map(thread,
2126 RegisterMap::UpdateMap::include,
2127 RegisterMap::ProcessFrames::include,
2128 RegisterMap::WalkContinuation::skip);
2129 frame stub_frame = thread->last_frame();
2130 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2131 frame caller_frame = stub_frame.sender(®_map);
2132
2133 // Deoptimize the caller frame.
2134 Deoptimization::deoptimize_frame(thread, caller_frame.id());
2135 }
2136
2137
2138 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
2139 // Called from within the owner thread, so no need for safepoint
2140 RegisterMap reg_map(thread,
2141 RegisterMap::UpdateMap::include,
2142 RegisterMap::ProcessFrames::include,
2143 RegisterMap::WalkContinuation::skip);
2144 frame stub_frame = thread->last_frame();
2145 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2146 frame caller_frame = stub_frame.sender(®_map);
2147 return caller_frame.is_deoptimized_frame();
2148 }
2149
2150 static const TypeFunc* make_register_finalizer_Type() {
2151 // create input type (domain)
2152 const Type **fields = TypeTuple::fields(1);
2153 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
2154 // // The JavaThread* is passed to each routine as the last argument
2155 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
2156 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2157
2158 // create result type (range)
2159 fields = TypeTuple::fields(0);
2160
2161 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2162
2163 return TypeFunc::make(domain, range);
2164 }
2165
2166 #if INCLUDE_JFR
2167 static const TypeFunc* make_class_id_load_barrier_Type() {
2168 // create input type (domain)
2169 const Type **fields = TypeTuple::fields(1);
2170 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
2171 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
2172
2173 // create result type (range)
2174 fields = TypeTuple::fields(0);
2175
2176 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
2177
2178 return TypeFunc::make(domain,range);
2179 }
2180 #endif // INCLUDE_JFR
2181
2182 //-----------------------------------------------------------------------------
2183 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
2184 // create input type (domain)
2185 const Type **fields = TypeTuple::fields(2);
2186 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2187 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
2188 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2189
2190 // create result type (range)
2191 fields = TypeTuple::fields(0);
2192
2193 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2194
2195 return TypeFunc::make(domain, range);
2196 }
2197
2198 static const TypeFunc* make_dtrace_object_alloc_Type() {
2199 // create input type (domain)
2200 const Type **fields = TypeTuple::fields(2);
2201 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2202 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
2203
2204 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2205
2206 // create result type (range)
2207 fields = TypeTuple::fields(0);
2208
2209 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2210
2211 return TypeFunc::make(domain, range);
2212 }
2213
2214 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
2215 assert(oopDesc::is_oop(obj), "must be a valid oop");
2216 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
2217 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
2218 JRT_END
2219
2220 //-----------------------------------------------------------------------------
2221
2222 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
2223
2224 //
2225 // dump the collected NamedCounters.
2226 //
2227 void OptoRuntime::print_named_counters() {
2228 int total_lock_count = 0;
2229 int eliminated_lock_count = 0;
2230
2231 NamedCounter* c = _named_counters;
2232 while (c) {
2233 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2234 int count = c->count();
2235 if (count > 0) {
2236 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2237 if (Verbose) {
2238 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2239 }
2240 total_lock_count += count;
2241 if (eliminated) {
2242 eliminated_lock_count += count;
2243 }
2244 }
2245 }
2246 c = c->next();
2247 }
2248 if (total_lock_count > 0) {
2249 tty->print_cr("dynamic locks: %d", total_lock_count);
2250 if (eliminated_lock_count) {
2251 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
2252 (int)(eliminated_lock_count * 100.0 / total_lock_count));
2253 }
2254 }
2255 }
2256
2257 //
2258 // Allocate a new NamedCounter. The JVMState is used to generate the
2259 // name which consists of method@line for the inlining tree.
2260 //
2261
2262 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
2263 int max_depth = youngest_jvms->depth();
2264
2265 // Visit scopes from youngest to oldest.
2266 bool first = true;
2267 stringStream st;
2268 for (int depth = max_depth; depth >= 1; depth--) {
2269 JVMState* jvms = youngest_jvms->of_depth(depth);
2270 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
2271 if (!first) {
2272 st.print(" ");
2273 } else {
2274 first = false;
2275 }
2276 int bci = jvms->bci();
2277 if (bci < 0) bci = 0;
2278 if (m != nullptr) {
2279 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
2280 } else {
2281 st.print("no method");
2282 }
2283 st.print("@%d", bci);
2284 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
2285 }
2286 NamedCounter* c = new NamedCounter(st.freeze(), tag);
2287
2288 // atomically add the new counter to the head of the list. We only
2289 // add counters so this is safe.
2290 NamedCounter* head;
2291 do {
2292 c->set_next(nullptr);
2293 head = _named_counters;
2294 c->set_next(head);
2295 } while (AtomicAccess::cmpxchg(&_named_counters, head, c) != head);
2296 return c;
2297 }
2298
2299 void OptoRuntime::initialize_types() {
2300 _new_instance_Type = make_new_instance_Type();
2301 _new_array_Type = make_new_array_Type();
2302 _new_array_nozero_Type = make_new_array_nozero_Type();
2303 _multianewarray2_Type = multianewarray_Type(2);
2304 _multianewarray3_Type = multianewarray_Type(3);
2305 _multianewarray4_Type = multianewarray_Type(4);
2306 _multianewarray5_Type = multianewarray_Type(5);
2307 _multianewarrayN_Type = make_multianewarrayN_Type();
2308 _complete_monitor_enter_Type = make_complete_monitor_enter_Type();
2309 _complete_monitor_exit_Type = make_complete_monitor_exit_Type();
2310 _monitor_notify_Type = make_monitor_notify_Type();
2311 _uncommon_trap_Type = make_uncommon_trap_Type();
2312 _athrow_Type = make_athrow_Type();
2313 _rethrow_Type = make_rethrow_Type();
2314 _Math_D_D_Type = make_Math_D_D_Type();
2315 _Math_DD_D_Type = make_Math_DD_D_Type();
2316 _modf_Type = make_modf_Type();
2317 _l2f_Type = make_l2f_Type();
2318 _void_long_Type = make_void_long_Type();
2319 _void_void_Type = make_void_void_Type();
2320 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type();
2321 _flush_windows_Type = make_flush_windows_Type();
2322 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast);
2323 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast);
2324 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic);
2325 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow);
2326 _unsafe_setmemory_Type = make_setmemory_Type();
2327 _array_fill_Type = make_array_fill_Type();
2328 _array_sort_Type = make_array_sort_Type();
2329 _array_partition_Type = make_array_partition_Type();
2330 _aescrypt_block_Type = make_aescrypt_block_Type();
2331 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type();
2332 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type();
2333 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type();
2334 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type();
2335 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true);
2336 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);;
2337 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true);
2338 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false);
2339 _double_keccak_Type = make_double_keccak_Type();
2340 _multiplyToLen_Type = make_multiplyToLen_Type();
2341 _montgomeryMultiply_Type = make_montgomeryMultiply_Type();
2342 _montgomerySquare_Type = make_montgomerySquare_Type();
2343 _squareToLen_Type = make_squareToLen_Type();
2344 _mulAdd_Type = make_mulAdd_Type();
2345 _bigIntegerShift_Type = make_bigIntegerShift_Type();
2346 _vectorizedMismatch_Type = make_vectorizedMismatch_Type();
2347 _ghash_processBlocks_Type = make_ghash_processBlocks_Type();
2348 _chacha20Block_Type = make_chacha20Block_Type();
2349 _kyberNtt_Type = make_kyberNtt_Type();
2350 _kyberInverseNtt_Type = make_kyberInverseNtt_Type();
2351 _kyberNttMult_Type = make_kyberNttMult_Type();
2352 _kyberAddPoly_2_Type = make_kyberAddPoly_2_Type();
2353 _kyberAddPoly_3_Type = make_kyberAddPoly_3_Type();
2354 _kyber12To16_Type = make_kyber12To16_Type();
2355 _kyberBarrettReduce_Type = make_kyberBarrettReduce_Type();
2356 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type();
2357 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type();
2358 _dilithiumNttMult_Type = make_dilithiumNttMult_Type();
2359 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type();
2360 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type();
2361 _base64_encodeBlock_Type = make_base64_encodeBlock_Type();
2362 _base64_decodeBlock_Type = make_base64_decodeBlock_Type();
2363 _string_IndexOf_Type = make_string_IndexOf_Type();
2364 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type();
2365 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type();
2366 _intpoly_assign_Type = make_intpoly_assign_Type();
2367 _updateBytesCRC32_Type = make_updateBytesCRC32_Type();
2368 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type();
2369 _updateBytesAdler32_Type = make_updateBytesAdler32_Type();
2370 _osr_end_Type = make_osr_end_Type();
2371 _register_finalizer_Type = make_register_finalizer_Type();
2372 _vthread_transition_Type = make_vthread_transition_Type();
2373 JFR_ONLY(
2374 _class_id_load_barrier_Type = make_class_id_load_barrier_Type();
2375 )
2376 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type();
2377 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type();
2378 }
2379
2380 int trace_exception_counter = 0;
2381 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2382 trace_exception_counter++;
2383 stringStream tempst;
2384
2385 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2386 exception_oop->print_value_on(&tempst);
2387 tempst.print(" in ");
2388 CodeBlob* blob = CodeCache::find_blob(exception_pc);
2389 if (blob->is_nmethod()) {
2390 blob->as_nmethod()->method()->print_value_on(&tempst);
2391 } else if (blob->is_runtime_stub()) {
2392 tempst.print("<runtime-stub>");
2393 } else {
2394 tempst.print("<unknown>");
2395 }
2396 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
2397 tempst.print("]");
2398
2399 st->print_raw_cr(tempst.freeze());
2400 }
2401
2402 const TypeFunc *OptoRuntime::store_inline_type_fields_Type() {
2403 // create input type (domain)
2404 uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2405 const Type **fields = TypeTuple::fields(total);
2406 // We don't know the number of returned values and their
2407 // types. Assume all registers available to the return convention
2408 // are used.
2409 fields[TypeFunc::Parms] = TypePtr::BOTTOM;
2410 uint i = 1;
2411 for (; i < SharedRuntime::java_return_convention_max_int; i++) {
2412 fields[TypeFunc::Parms+i] = TypeInt::INT;
2413 }
2414 for (; i < total; i+=2) {
2415 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2416 fields[TypeFunc::Parms+i+1] = Type::HALF;
2417 }
2418 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2419
2420 // create result type (range)
2421 fields = TypeTuple::fields(1);
2422 fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
2423
2424 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2425
2426 return TypeFunc::make(domain, range);
2427 }
2428
2429 const TypeFunc *OptoRuntime::pack_inline_type_Type() {
2430 // create input type (domain)
2431 uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2432 const Type **fields = TypeTuple::fields(total);
2433 // We don't know the number of returned values and their
2434 // types. Assume all registers available to the return convention
2435 // are used.
2436 fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM;
2437 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;
2438 uint i = 2;
2439 for (; i < SharedRuntime::java_return_convention_max_int+1; i++) {
2440 fields[TypeFunc::Parms+i] = TypeInt::INT;
2441 }
2442 for (; i < total; i+=2) {
2443 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2444 fields[TypeFunc::Parms+i+1] = Type::HALF;
2445 }
2446 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2447
2448 // create result type (range)
2449 fields = TypeTuple::fields(1);
2450 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2451
2452 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2453
2454 return TypeFunc::make(domain, range);
2455 }
2456
2457 JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current))
2458 JRT_BLOCK;
2459 oop buffer = array->obj_at(index, THREAD);
2460 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
2461 current->set_vm_result_oop(buffer);
2462 JRT_BLOCK_END;
2463 JRT_END
2464
2465 const TypeFunc* OptoRuntime::load_unknown_inline_Type() {
2466 // create input type (domain)
2467 const Type** fields = TypeTuple::fields(2);
2468 fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL;
2469 fields[TypeFunc::Parms+1] = TypeInt::POS;
2470
2471 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+2, fields);
2472
2473 // create result type (range)
2474 fields = TypeTuple::fields(1);
2475 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
2476
2477 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
2478
2479 return TypeFunc::make(domain, range);
2480 }
2481
2482 JRT_BLOCK_ENTRY(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current))
2483 JRT_BLOCK;
2484 array->obj_at_put(index, buffer, THREAD);
2485 if (HAS_PENDING_EXCEPTION) {
2486 fatal("This entry must be changed to be a non-leaf entry because writing to a flat array can now throw an exception");
2487 }
2488 JRT_BLOCK_END;
2489 JRT_END
2490
2491 const TypeFunc* OptoRuntime::store_unknown_inline_Type() {
2492 // create input type (domain)
2493 const Type** fields = TypeTuple::fields(3);
2494 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2495 fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL;
2496 fields[TypeFunc::Parms+2] = TypeInt::POS;
2497
2498 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
2499
2500 // create result type (range)
2501 fields = TypeTuple::fields(0);
2502 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
2503
2504 return TypeFunc::make(domain, range);
2505 }