1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmClasses.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/nmethod.hpp"
30 #include "code/pcDesc.hpp"
31 #include "code/scopeDesc.hpp"
32 #include "code/vtableStubs.hpp"
33 #include "compiler/compilationMemoryStatistic.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/klass.inline.hpp"
48 #include "oops/objArrayKlass.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "oops/typeArrayOop.inline.hpp"
51 #include "opto/ad.hpp"
52 #include "opto/addnode.hpp"
53 #include "opto/callnode.hpp"
54 #include "opto/cfgnode.hpp"
55 #include "opto/graphKit.hpp"
56 #include "opto/machnode.hpp"
57 #include "opto/matcher.hpp"
58 #include "opto/memnode.hpp"
59 #include "opto/mulnode.hpp"
60 #include "opto/output.hpp"
61 #include "opto/runtime.hpp"
62 #include "opto/subnode.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "runtime/atomic.hpp"
65 #include "runtime/frame.inline.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/interfaceSupport.inline.hpp"
68 #include "runtime/javaCalls.hpp"
69 #include "runtime/sharedRuntime.hpp"
70 #include "runtime/signature.hpp"
71 #include "runtime/stackWatermarkSet.hpp"
72 #include "runtime/synchronizer.hpp"
73 #include "runtime/threadWXSetters.inline.hpp"
74 #include "runtime/vframe.hpp"
75 #include "runtime/vframe_hp.hpp"
76 #include "runtime/vframeArray.hpp"
77 #include "utilities/copy.hpp"
78 #include "utilities/preserveException.hpp"
79
80
81 // For debugging purposes:
82 // To force FullGCALot inside a runtime function, add the following two lines
83 //
84 // Universe::release_fullgc_alot_dummy();
85 // Universe::heap()->collect();
86 //
87 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
88
89
90 #define C2_BLOB_FIELD_DEFINE(name, type) \
91 type* OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr;
92 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
93 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
94 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
95 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \
96 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr;
97 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE)
98 #undef C2_BLOB_FIELD_DEFINE
99 #undef C2_STUB_FIELD_DEFINE
100 #undef C2_JVMTI_STUB_FIELD_DEFINE
101
102 // This should be called in an assertion at the start of OptoRuntime routines
103 // which are entered from compiled code (all of them)
104 #ifdef ASSERT
105 static bool check_compiled_frame(JavaThread* thread) {
106 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
107 RegisterMap map(thread,
108 RegisterMap::UpdateMap::skip,
109 RegisterMap::ProcessFrames::include,
110 RegisterMap::WalkContinuation::skip);
111 frame caller = thread->last_frame().sender(&map);
112 assert(caller.is_compiled_frame(), "not being called from compiled like code");
113 return true;
114 }
115 #endif // ASSERT
116
117 /*
118 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
119 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
120 if (var == nullptr) { return false; }
121 */
122
123 #define GEN_C2_BLOB(name, type) \
124 BLOB_FIELD_NAME(name) = \
125 generate_ ## name ## _blob(); \
126 if (BLOB_FIELD_NAME(name) == nullptr) { return false; }
127
128 // a few helper macros to conjure up generate_stub call arguments
129 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
130 #define C2_STUB_TYPEFUNC(name) name ## _Type
131 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C)
132 #define C2_STUB_ID(name) StubId:: JOIN3(c2, name, id)
133 #define C2_STUB_NAME(name) stub_name(C2_STUB_ID(name))
134
135 // Almost all the C functions targeted from the generated stubs are
136 // implemented locally to OptoRuntime with names that can be generated
137 // from the stub name by appending suffix '_C'. However, in two cases
138 // a common target method also needs to be called from shared runtime
139 // stubs. In these two cases the opto stubs rely on method
140 // imlementations defined in class SharedRuntime. The following
141 // defines temporarily rebind the generated names to reference the
142 // relevant implementations.
143
144 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \
145 C2_STUB_FIELD_NAME(name) = \
146 generate_stub(env, \
147 C2_STUB_TYPEFUNC(name), \
148 C2_STUB_C_FUNC(name), \
149 C2_STUB_NAME(name), \
150 C2_STUB_ID(name), \
151 fancy_jump, \
152 pass_tls, \
153 pass_retpc); \
154 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \
155
156 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name)
157
158 #define GEN_C2_JVMTI_STUB(name) \
159 STUB_FIELD_NAME(name) = \
160 generate_stub(env, \
161 notify_jvmti_vthread_Type, \
162 C2_JVMTI_STUB_C_FUNC(name), \
163 C2_STUB_NAME(name), \
164 C2_STUB_ID(name), \
165 0, \
166 true, \
167 false); \
168 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \
169
170 bool OptoRuntime::generate(ciEnv* env) {
171
172 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB)
173
174 return true;
175 }
176
177 #undef GEN_C2_BLOB
178
179 #undef C2_STUB_FIELD_NAME
180 #undef C2_STUB_TYPEFUNC
181 #undef C2_STUB_C_FUNC
182 #undef C2_STUB_NAME
183 #undef GEN_C2_STUB
184
185 #undef C2_JVMTI_STUB_C_FUNC
186 #undef GEN_C2_JVMTI_STUB
187 // #undef gen
188
189 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr;
190 const TypeFunc* OptoRuntime::_new_array_Type = nullptr;
191 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr;
192 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr;
193 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr;
194 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr;
195 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr;
196 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr;
197 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr;
198 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr;
199 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr;
200 const TypeFunc* OptoRuntime::_athrow_Type = nullptr;
201 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr;
202 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr;
203 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr;
204 const TypeFunc* OptoRuntime::_modf_Type = nullptr;
205 const TypeFunc* OptoRuntime::_l2f_Type = nullptr;
206 const TypeFunc* OptoRuntime::_void_long_Type = nullptr;
207 const TypeFunc* OptoRuntime::_void_void_Type = nullptr;
208 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr;
209 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr;
210 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr;
211 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr;
212 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr;
213 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr;
214 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr;
215 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr;
216 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr;
217 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr;
218 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr;
219 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr;
220 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr;
221 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr;
222 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr;
223 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr;
224 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr;
225 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr;
226 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr;
227 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr;
228 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr;
229 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr;
230 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr;
231 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr;
232 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr;
233 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr;
234 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr;
235 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr;
236 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr;
237 const TypeFunc* OptoRuntime::_kyberNtt_Type = nullptr;
238 const TypeFunc* OptoRuntime::_kyberInverseNtt_Type = nullptr;
239 const TypeFunc* OptoRuntime::_kyberNttMult_Type = nullptr;
240 const TypeFunc* OptoRuntime::_kyberAddPoly_2_Type = nullptr;
241 const TypeFunc* OptoRuntime::_kyberAddPoly_3_Type = nullptr;
242 const TypeFunc* OptoRuntime::_kyber12To16_Type = nullptr;
243 const TypeFunc* OptoRuntime::_kyberBarrettReduce_Type = nullptr;
244 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr;
245 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr;
246 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr;
247 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr;
248 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr;
249 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr;
250 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr;
251 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr;
252 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr;
253 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr;
254 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr;
255 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr;
256 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr;
257 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr;
258 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr;
259 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr;
260 #if INCLUDE_JFR
261 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr;
262 #endif // INCLUDE_JFR
263 #if INCLUDE_JVMTI
264 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type = nullptr;
265 #endif // INCLUDE_JVMTI
266 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr;
267 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr;
268
269 // Helper method to do generation of RunTimeStub's
270 address OptoRuntime::generate_stub(ciEnv* env,
271 TypeFunc_generator gen, address C_function,
272 const char *name, StubId stub_id,
273 int is_fancy_jump, bool pass_tls,
274 bool return_pc) {
275
276 // Matching the default directive, we currently have no method to match.
277 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
278 CompilationMemoryStatisticMark cmsm(directive);
279 ResourceMark rm;
280 Compile C(env, gen, C_function, name, stub_id, is_fancy_jump, pass_tls, return_pc, directive);
281 DirectivesStack::release(directive);
282 return C.stub_entry_point();
283 }
284
285 const char* OptoRuntime::stub_name(address entry) {
286 #ifndef PRODUCT
287 CodeBlob* cb = CodeCache::find_blob(entry);
288 RuntimeStub* rs =(RuntimeStub *)cb;
289 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
290 return rs->name();
291 #else
292 // Fast implementation for product mode (maybe it should be inlined too)
293 return "runtime stub";
294 #endif
295 }
296
297 // local methods passed as arguments to stub generator that forward
298 // control to corresponding JRT methods of SharedRuntime
299
300 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
301 oopDesc* dest, jint dest_pos,
302 jint length, JavaThread* thread) {
303 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
304 }
305
306 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
307 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
308 }
309
310
311 //=============================================================================
312 // Opto compiler runtime routines
313 //=============================================================================
314
315
316 //=============================allocation======================================
317 // We failed the fast-path allocation. Now we need to do a scavenge or GC
318 // and try allocation again.
319
320 // object allocation
321 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
322 JRT_BLOCK;
323 #ifndef PRODUCT
324 SharedRuntime::_new_instance_ctr++; // new instance requires GC
325 #endif
326 assert(check_compiled_frame(current), "incorrect caller");
327
328 // These checks are cheap to make and support reflective allocation.
329 int lh = klass->layout_helper();
330 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
331 Handle holder(current, klass->klass_holder()); // keep the klass alive
332 klass->check_valid_for_instantiation(false, THREAD);
333 if (!HAS_PENDING_EXCEPTION) {
334 InstanceKlass::cast(klass)->initialize(THREAD);
335 }
336 }
337
338 if (!HAS_PENDING_EXCEPTION) {
339 // Scavenge and allocate an instance.
340 Handle holder(current, klass->klass_holder()); // keep the klass alive
341 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
342 current->set_vm_result_oop(result);
343
344 // Pass oops back through thread local storage. Our apparent type to Java
345 // is that we return an oop, but we can block on exit from this routine and
346 // a GC can trash the oop in C's return register. The generated stub will
347 // fetch the oop from TLS after any possible GC.
348 }
349
350 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
351 JRT_BLOCK_END;
352
353 // inform GC that we won't do card marks for initializing writes.
354 SharedRuntime::on_slowpath_allocation_exit(current);
355 JRT_END
356
357
358 // array allocation
359 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
360 JRT_BLOCK;
361 #ifndef PRODUCT
362 SharedRuntime::_new_array_ctr++; // new array requires GC
363 #endif
364 assert(check_compiled_frame(current), "incorrect caller");
365
366 // Scavenge and allocate an instance.
367 oop result;
368
369 if (array_type->is_typeArray_klass()) {
370 // The oopFactory likes to work with the element type.
371 // (We could bypass the oopFactory, since it doesn't add much value.)
372 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
373 result = oopFactory::new_typeArray(elem_type, len, THREAD);
374 } else {
375 // Although the oopFactory likes to work with the elem_type,
376 // the compiler prefers the array_type, since it must already have
377 // that latter value in hand for the fast path.
378 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
379 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
380 result = oopFactory::new_objArray(elem_type, len, THREAD);
381 }
382
383 // Pass oops back through thread local storage. Our apparent type to Java
384 // is that we return an oop, but we can block on exit from this routine and
385 // a GC can trash the oop in C's return register. The generated stub will
386 // fetch the oop from TLS after any possible GC.
387 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
388 current->set_vm_result_oop(result);
389 JRT_BLOCK_END;
390
391 // inform GC that we won't do card marks for initializing writes.
392 SharedRuntime::on_slowpath_allocation_exit(current);
393 JRT_END
394
395 // array allocation without zeroing
396 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
397 JRT_BLOCK;
398 #ifndef PRODUCT
399 SharedRuntime::_new_array_ctr++; // new array requires GC
400 #endif
401 assert(check_compiled_frame(current), "incorrect caller");
402
403 // Scavenge and allocate an instance.
404 oop result;
405
406 assert(array_type->is_typeArray_klass(), "should be called only for type array");
407 // The oopFactory likes to work with the element type.
408 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
409 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
410
411 // Pass oops back through thread local storage. Our apparent type to Java
412 // is that we return an oop, but we can block on exit from this routine and
413 // a GC can trash the oop in C's return register. The generated stub will
414 // fetch the oop from TLS after any possible GC.
415 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
416 current->set_vm_result_oop(result);
417 JRT_BLOCK_END;
418
419
420 // inform GC that we won't do card marks for initializing writes.
421 SharedRuntime::on_slowpath_allocation_exit(current);
422
423 oop result = current->vm_result_oop();
424 if ((len > 0) && (result != nullptr) &&
425 is_deoptimized_caller_frame(current)) {
426 // Zero array here if the caller is deoptimized.
427 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result);
428 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
429 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
430 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
431 HeapWord* obj = cast_from_oop<HeapWord*>(result);
432 if (!is_aligned(hs_bytes, BytesPerLong)) {
433 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
434 hs_bytes += BytesPerInt;
435 }
436
437 // Optimized zeroing.
438 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
439 const size_t aligned_hs = hs_bytes / BytesPerLong;
440 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
441 }
442
443 JRT_END
444
445 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
446
447 // multianewarray for 2 dimensions
448 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
449 #ifndef PRODUCT
450 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
451 #endif
452 assert(check_compiled_frame(current), "incorrect caller");
453 assert(elem_type->is_klass(), "not a class");
454 jint dims[2];
455 dims[0] = len1;
456 dims[1] = len2;
457 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
458 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
459 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
460 current->set_vm_result_oop(obj);
461 JRT_END
462
463 // multianewarray for 3 dimensions
464 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
465 #ifndef PRODUCT
466 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
467 #endif
468 assert(check_compiled_frame(current), "incorrect caller");
469 assert(elem_type->is_klass(), "not a class");
470 jint dims[3];
471 dims[0] = len1;
472 dims[1] = len2;
473 dims[2] = len3;
474 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
475 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
476 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
477 current->set_vm_result_oop(obj);
478 JRT_END
479
480 // multianewarray for 4 dimensions
481 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
482 #ifndef PRODUCT
483 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
484 #endif
485 assert(check_compiled_frame(current), "incorrect caller");
486 assert(elem_type->is_klass(), "not a class");
487 jint dims[4];
488 dims[0] = len1;
489 dims[1] = len2;
490 dims[2] = len3;
491 dims[3] = len4;
492 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
493 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
494 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
495 current->set_vm_result_oop(obj);
496 JRT_END
497
498 // multianewarray for 5 dimensions
499 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
500 #ifndef PRODUCT
501 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
502 #endif
503 assert(check_compiled_frame(current), "incorrect caller");
504 assert(elem_type->is_klass(), "not a class");
505 jint dims[5];
506 dims[0] = len1;
507 dims[1] = len2;
508 dims[2] = len3;
509 dims[3] = len4;
510 dims[4] = len5;
511 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
512 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
513 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
514 current->set_vm_result_oop(obj);
515 JRT_END
516
517 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
518 assert(check_compiled_frame(current), "incorrect caller");
519 assert(elem_type->is_klass(), "not a class");
520 assert(oop(dims)->is_typeArray(), "not an array");
521
522 ResourceMark rm;
523 jint len = dims->length();
524 assert(len > 0, "Dimensions array should contain data");
525 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
526 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
527 c_dims, len);
528
529 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
530 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
531 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
532 current->set_vm_result_oop(obj);
533 JRT_END
534
535 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
536
537 // Very few notify/notifyAll operations find any threads on the waitset, so
538 // the dominant fast-path is to simply return.
539 // Relatedly, it's critical that notify/notifyAll be fast in order to
540 // reduce lock hold times.
541 if (!SafepointSynchronize::is_synchronizing()) {
542 if (ObjectSynchronizer::quick_notify(obj, current, false)) {
543 return;
544 }
545 }
546
547 // This is the case the fast-path above isn't provisioned to handle.
548 // The fast-path is designed to handle frequently arising cases in an efficient manner.
549 // (The fast-path is just a degenerate variant of the slow-path).
550 // Perform the dreaded state transition and pass control into the slow-path.
551 JRT_BLOCK;
552 Handle h_obj(current, obj);
553 ObjectSynchronizer::notify(h_obj, CHECK);
554 JRT_BLOCK_END;
555 JRT_END
556
557 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
558
559 if (!SafepointSynchronize::is_synchronizing() ) {
560 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
561 return;
562 }
563 }
564
565 // This is the case the fast-path above isn't provisioned to handle.
566 // The fast-path is designed to handle frequently arising cases in an efficient manner.
567 // (The fast-path is just a degenerate variant of the slow-path).
568 // Perform the dreaded state transition and pass control into the slow-path.
569 JRT_BLOCK;
570 Handle h_obj(current, obj);
571 ObjectSynchronizer::notifyall(h_obj, CHECK);
572 JRT_BLOCK_END;
573 JRT_END
574
575 static const TypeFunc* make_new_instance_Type() {
576 // create input type (domain)
577 const Type **fields = TypeTuple::fields(1);
578 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
579 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
580
581 // create result type (range)
582 fields = TypeTuple::fields(1);
583 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
584
585 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
586
587 return TypeFunc::make(domain, range);
588 }
589
590 #if INCLUDE_JVMTI
591 static const TypeFunc* make_notify_jvmti_vthread_Type() {
592 // create input type (domain)
593 const Type **fields = TypeTuple::fields(2);
594 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
595 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
596 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
597
598 // no result type needed
599 fields = TypeTuple::fields(1);
600 fields[TypeFunc::Parms+0] = nullptr; // void
601 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
602
603 return TypeFunc::make(domain,range);
604 }
605 #endif
606
607 static const TypeFunc* make_athrow_Type() {
608 // create input type (domain)
609 const Type **fields = TypeTuple::fields(1);
610 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
611 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
612
613 // create result type (range)
614 fields = TypeTuple::fields(0);
615
616 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
617
618 return TypeFunc::make(domain, range);
619 }
620
621 static const TypeFunc* make_new_array_Type() {
622 // create input type (domain)
623 const Type **fields = TypeTuple::fields(2);
624 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
625 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
626 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
627
628 // create result type (range)
629 fields = TypeTuple::fields(1);
630 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
631
632 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
633
634 return TypeFunc::make(domain, range);
635 }
636
637 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) {
638 // create input type (domain)
639 const int nargs = ndim + 1;
640 const Type **fields = TypeTuple::fields(nargs);
641 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
642 for( int i = 1; i < nargs; i++ )
643 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
644 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
645
646 // create result type (range)
647 fields = TypeTuple::fields(1);
648 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
649 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
650
651 return TypeFunc::make(domain, range);
652 }
653
654 static const TypeFunc* make_multianewarrayN_Type() {
655 // create input type (domain)
656 const Type **fields = TypeTuple::fields(2);
657 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
658 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
659 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
660
661 // create result type (range)
662 fields = TypeTuple::fields(1);
663 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
664 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
665
666 return TypeFunc::make(domain, range);
667 }
668
669 static const TypeFunc* make_uncommon_trap_Type() {
670 // create input type (domain)
671 const Type **fields = TypeTuple::fields(1);
672 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
673 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
674
675 // create result type (range)
676 fields = TypeTuple::fields(0);
677 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
678
679 return TypeFunc::make(domain, range);
680 }
681
682 //-----------------------------------------------------------------------------
683 // Monitor Handling
684
685 static const TypeFunc* make_complete_monitor_enter_Type() {
686 // create input type (domain)
687 const Type **fields = TypeTuple::fields(2);
688 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
689 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
690 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
691
692 // create result type (range)
693 fields = TypeTuple::fields(0);
694
695 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
696
697 return TypeFunc::make(domain,range);
698 }
699
700 //-----------------------------------------------------------------------------
701
702 static const TypeFunc* make_complete_monitor_exit_Type() {
703 // create input type (domain)
704 const Type **fields = TypeTuple::fields(3);
705 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
706 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
707 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
708 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
709
710 // create result type (range)
711 fields = TypeTuple::fields(0);
712
713 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
714
715 return TypeFunc::make(domain, range);
716 }
717
718 static const TypeFunc* make_monitor_notify_Type() {
719 // create input type (domain)
720 const Type **fields = TypeTuple::fields(1);
721 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
722 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
723
724 // create result type (range)
725 fields = TypeTuple::fields(0);
726 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
727 return TypeFunc::make(domain, range);
728 }
729
730 static const TypeFunc* make_flush_windows_Type() {
731 // create input type (domain)
732 const Type** fields = TypeTuple::fields(1);
733 fields[TypeFunc::Parms+0] = nullptr; // void
734 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
735
736 // create result type
737 fields = TypeTuple::fields(1);
738 fields[TypeFunc::Parms+0] = nullptr; // void
739 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
740
741 return TypeFunc::make(domain, range);
742 }
743
744 static const TypeFunc* make_l2f_Type() {
745 // create input type (domain)
746 const Type **fields = TypeTuple::fields(2);
747 fields[TypeFunc::Parms+0] = TypeLong::LONG;
748 fields[TypeFunc::Parms+1] = Type::HALF;
749 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
750
751 // create result type (range)
752 fields = TypeTuple::fields(1);
753 fields[TypeFunc::Parms+0] = Type::FLOAT;
754 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
755
756 return TypeFunc::make(domain, range);
757 }
758
759 static const TypeFunc* make_modf_Type() {
760 const Type **fields = TypeTuple::fields(2);
761 fields[TypeFunc::Parms+0] = Type::FLOAT;
762 fields[TypeFunc::Parms+1] = Type::FLOAT;
763 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
764
765 // create result type (range)
766 fields = TypeTuple::fields(1);
767 fields[TypeFunc::Parms+0] = Type::FLOAT;
768
769 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
770
771 return TypeFunc::make(domain, range);
772 }
773
774 static const TypeFunc* make_Math_D_D_Type() {
775 // create input type (domain)
776 const Type **fields = TypeTuple::fields(2);
777 // Symbol* name of class to be loaded
778 fields[TypeFunc::Parms+0] = Type::DOUBLE;
779 fields[TypeFunc::Parms+1] = Type::HALF;
780 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
781
782 // create result type (range)
783 fields = TypeTuple::fields(2);
784 fields[TypeFunc::Parms+0] = Type::DOUBLE;
785 fields[TypeFunc::Parms+1] = Type::HALF;
786 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
787
788 return TypeFunc::make(domain, range);
789 }
790
791 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) {
792 // create input type (domain)
793 const Type **fields = TypeTuple::fields(num_arg);
794 // Symbol* name of class to be loaded
795 assert(num_arg > 0, "must have at least 1 input");
796 for (uint i = 0; i < num_arg; i++) {
797 fields[TypeFunc::Parms+i] = in_type;
798 }
799 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields);
800
801 // create result type (range)
802 const uint num_ret = 1;
803 fields = TypeTuple::fields(num_ret);
804 fields[TypeFunc::Parms+0] = out_type;
805 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields);
806
807 return TypeFunc::make(domain, range);
808 }
809
810 static const TypeFunc* make_Math_DD_D_Type() {
811 const Type **fields = TypeTuple::fields(4);
812 fields[TypeFunc::Parms+0] = Type::DOUBLE;
813 fields[TypeFunc::Parms+1] = Type::HALF;
814 fields[TypeFunc::Parms+2] = Type::DOUBLE;
815 fields[TypeFunc::Parms+3] = Type::HALF;
816 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
817
818 // create result type (range)
819 fields = TypeTuple::fields(2);
820 fields[TypeFunc::Parms+0] = Type::DOUBLE;
821 fields[TypeFunc::Parms+1] = Type::HALF;
822 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
823
824 return TypeFunc::make(domain, range);
825 }
826
827 //-------------- currentTimeMillis, currentTimeNanos, etc
828
829 static const TypeFunc* make_void_long_Type() {
830 // create input type (domain)
831 const Type **fields = TypeTuple::fields(0);
832 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
833
834 // create result type (range)
835 fields = TypeTuple::fields(2);
836 fields[TypeFunc::Parms+0] = TypeLong::LONG;
837 fields[TypeFunc::Parms+1] = Type::HALF;
838 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
839
840 return TypeFunc::make(domain, range);
841 }
842
843 static const TypeFunc* make_void_void_Type() {
844 // create input type (domain)
845 const Type **fields = TypeTuple::fields(0);
846 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
847
848 // create result type (range)
849 fields = TypeTuple::fields(0);
850 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
851 return TypeFunc::make(domain, range);
852 }
853
854 static const TypeFunc* make_jfr_write_checkpoint_Type() {
855 // create input type (domain)
856 const Type **fields = TypeTuple::fields(0);
857 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
858
859 // create result type (range)
860 fields = TypeTuple::fields(0);
861 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
862 return TypeFunc::make(domain, range);
863 }
864
865
866 // Takes as parameters:
867 // void *dest
868 // long size
869 // uchar byte
870
871 static const TypeFunc* make_setmemory_Type() {
872 // create input type (domain)
873 int argcnt = NOT_LP64(3) LP64_ONLY(4);
874 const Type** fields = TypeTuple::fields(argcnt);
875 int argp = TypeFunc::Parms;
876 fields[argp++] = TypePtr::NOTNULL; // dest
877 fields[argp++] = TypeX_X; // size
878 LP64_ONLY(fields[argp++] = Type::HALF); // size
879 fields[argp++] = TypeInt::UBYTE; // bytevalue
880 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
881 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
882
883 // no result type needed
884 fields = TypeTuple::fields(1);
885 fields[TypeFunc::Parms+0] = nullptr; // void
886 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
887 return TypeFunc::make(domain, range);
888 }
889
890 // arraycopy stub variations:
891 enum ArrayCopyType {
892 ac_fast, // void(ptr, ptr, size_t)
893 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
894 ac_slow, // void(ptr, int, ptr, int, int)
895 ac_generic // int(ptr, int, ptr, int, int)
896 };
897
898 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
899 // create input type (domain)
900 int num_args = (act == ac_fast ? 3 : 5);
901 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
902 int argcnt = num_args;
903 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
904 const Type** fields = TypeTuple::fields(argcnt);
905 int argp = TypeFunc::Parms;
906 fields[argp++] = TypePtr::NOTNULL; // src
907 if (num_size_args == 0) {
908 fields[argp++] = TypeInt::INT; // src_pos
909 }
910 fields[argp++] = TypePtr::NOTNULL; // dest
911 if (num_size_args == 0) {
912 fields[argp++] = TypeInt::INT; // dest_pos
913 fields[argp++] = TypeInt::INT; // length
914 }
915 while (num_size_args-- > 0) {
916 fields[argp++] = TypeX_X; // size in whatevers (size_t)
917 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
918 }
919 if (act == ac_checkcast) {
920 fields[argp++] = TypePtr::NOTNULL; // super_klass
921 }
922 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
923 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
924
925 // create result type if needed
926 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
927 fields = TypeTuple::fields(1);
928 if (retcnt == 0)
929 fields[TypeFunc::Parms+0] = nullptr; // void
930 else
931 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
932 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
933 return TypeFunc::make(domain, range);
934 }
935
936 static const TypeFunc* make_array_fill_Type() {
937 const Type** fields;
938 int argp = TypeFunc::Parms;
939 // create input type (domain): pointer, int, size_t
940 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
941 fields[argp++] = TypePtr::NOTNULL;
942 fields[argp++] = TypeInt::INT;
943 fields[argp++] = TypeX_X; // size in whatevers (size_t)
944 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
945 const TypeTuple *domain = TypeTuple::make(argp, fields);
946
947 // create result type
948 fields = TypeTuple::fields(1);
949 fields[TypeFunc::Parms+0] = nullptr; // void
950 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
951
952 return TypeFunc::make(domain, range);
953 }
954
955 static const TypeFunc* make_array_partition_Type() {
956 // create input type (domain)
957 int num_args = 7;
958 int argcnt = num_args;
959 const Type** fields = TypeTuple::fields(argcnt);
960 int argp = TypeFunc::Parms;
961 fields[argp++] = TypePtr::NOTNULL; // array
962 fields[argp++] = TypeInt::INT; // element type
963 fields[argp++] = TypeInt::INT; // low
964 fields[argp++] = TypeInt::INT; // end
965 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array)
966 fields[argp++] = TypeInt::INT; // indexPivot1
967 fields[argp++] = TypeInt::INT; // indexPivot2
968 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
969 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
970
971 // no result type needed
972 fields = TypeTuple::fields(1);
973 fields[TypeFunc::Parms+0] = nullptr; // void
974 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
975 return TypeFunc::make(domain, range);
976 }
977
978 static const TypeFunc* make_array_sort_Type() {
979 // create input type (domain)
980 int num_args = 4;
981 int argcnt = num_args;
982 const Type** fields = TypeTuple::fields(argcnt);
983 int argp = TypeFunc::Parms;
984 fields[argp++] = TypePtr::NOTNULL; // array
985 fields[argp++] = TypeInt::INT; // element type
986 fields[argp++] = TypeInt::INT; // fromIndex
987 fields[argp++] = TypeInt::INT; // toIndex
988 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
989 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
990
991 // no result type needed
992 fields = TypeTuple::fields(1);
993 fields[TypeFunc::Parms+0] = nullptr; // void
994 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
995 return TypeFunc::make(domain, range);
996 }
997
998 static const TypeFunc* make_aescrypt_block_Type() {
999 // create input type (domain)
1000 int num_args = 3;
1001 int argcnt = num_args;
1002 const Type** fields = TypeTuple::fields(argcnt);
1003 int argp = TypeFunc::Parms;
1004 fields[argp++] = TypePtr::NOTNULL; // src
1005 fields[argp++] = TypePtr::NOTNULL; // dest
1006 fields[argp++] = TypePtr::NOTNULL; // k array
1007 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1008 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1009
1010 // no result type needed
1011 fields = TypeTuple::fields(1);
1012 fields[TypeFunc::Parms+0] = nullptr; // void
1013 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1014 return TypeFunc::make(domain, range);
1015 }
1016
1017 static const TypeFunc* make_updateBytesCRC32_Type() {
1018 // create input type (domain)
1019 int num_args = 3;
1020 int argcnt = num_args;
1021 const Type** fields = TypeTuple::fields(argcnt);
1022 int argp = TypeFunc::Parms;
1023 fields[argp++] = TypeInt::INT; // crc
1024 fields[argp++] = TypePtr::NOTNULL; // src
1025 fields[argp++] = TypeInt::INT; // len
1026 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1027 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1028
1029 // result type needed
1030 fields = TypeTuple::fields(1);
1031 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1032 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1033 return TypeFunc::make(domain, range);
1034 }
1035
1036 static const TypeFunc* make_updateBytesCRC32C_Type() {
1037 // create input type (domain)
1038 int num_args = 4;
1039 int argcnt = num_args;
1040 const Type** fields = TypeTuple::fields(argcnt);
1041 int argp = TypeFunc::Parms;
1042 fields[argp++] = TypeInt::INT; // crc
1043 fields[argp++] = TypePtr::NOTNULL; // buf
1044 fields[argp++] = TypeInt::INT; // len
1045 fields[argp++] = TypePtr::NOTNULL; // table
1046 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1047 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1048
1049 // result type needed
1050 fields = TypeTuple::fields(1);
1051 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1052 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1053 return TypeFunc::make(domain, range);
1054 }
1055
1056 static const TypeFunc* make_updateBytesAdler32_Type() {
1057 // create input type (domain)
1058 int num_args = 3;
1059 int argcnt = num_args;
1060 const Type** fields = TypeTuple::fields(argcnt);
1061 int argp = TypeFunc::Parms;
1062 fields[argp++] = TypeInt::INT; // crc
1063 fields[argp++] = TypePtr::NOTNULL; // src + offset
1064 fields[argp++] = TypeInt::INT; // len
1065 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1066 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1067
1068 // result type needed
1069 fields = TypeTuple::fields(1);
1070 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1071 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1072 return TypeFunc::make(domain, range);
1073 }
1074
1075 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() {
1076 // create input type (domain)
1077 int num_args = 5;
1078 int argcnt = num_args;
1079 const Type** fields = TypeTuple::fields(argcnt);
1080 int argp = TypeFunc::Parms;
1081 fields[argp++] = TypePtr::NOTNULL; // src
1082 fields[argp++] = TypePtr::NOTNULL; // dest
1083 fields[argp++] = TypePtr::NOTNULL; // k array
1084 fields[argp++] = TypePtr::NOTNULL; // r array
1085 fields[argp++] = TypeInt::INT; // src len
1086 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1087 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1088
1089 // returning cipher len (int)
1090 fields = TypeTuple::fields(1);
1091 fields[TypeFunc::Parms+0] = TypeInt::INT;
1092 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1093 return TypeFunc::make(domain, range);
1094 }
1095
1096 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() {
1097 // create input type (domain)
1098 int num_args = 4;
1099 int argcnt = num_args;
1100 const Type** fields = TypeTuple::fields(argcnt);
1101 int argp = TypeFunc::Parms;
1102 fields[argp++] = TypePtr::NOTNULL; // src
1103 fields[argp++] = TypePtr::NOTNULL; // dest
1104 fields[argp++] = TypePtr::NOTNULL; // k array
1105 fields[argp++] = TypeInt::INT; // src len
1106 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1107 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1108
1109 // returning cipher len (int)
1110 fields = TypeTuple::fields(1);
1111 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1112 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1113 return TypeFunc::make(domain, range);
1114 }
1115
1116 static const TypeFunc* make_counterMode_aescrypt_Type() {
1117 // create input type (domain)
1118 int num_args = 7;
1119 int argcnt = num_args;
1120 const Type** fields = TypeTuple::fields(argcnt);
1121 int argp = TypeFunc::Parms;
1122 fields[argp++] = TypePtr::NOTNULL; // src
1123 fields[argp++] = TypePtr::NOTNULL; // dest
1124 fields[argp++] = TypePtr::NOTNULL; // k array
1125 fields[argp++] = TypePtr::NOTNULL; // counter array
1126 fields[argp++] = TypeInt::INT; // src len
1127 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
1128 fields[argp++] = TypePtr::NOTNULL; // saved used addr
1129 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1130 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1131 // returning cipher len (int)
1132 fields = TypeTuple::fields(1);
1133 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1134 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1135 return TypeFunc::make(domain, range);
1136 }
1137
1138 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() {
1139 // create input type (domain)
1140 int num_args = 8;
1141 int argcnt = num_args;
1142 const Type** fields = TypeTuple::fields(argcnt);
1143 int argp = TypeFunc::Parms;
1144 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs
1145 fields[argp++] = TypeInt::INT; // int len
1146 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs
1147 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs
1148 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj
1149 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj
1150 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj
1151 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj
1152
1153 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1154 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1155 // returning cipher len (int)
1156 fields = TypeTuple::fields(1);
1157 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1158 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1159 return TypeFunc::make(domain, range);
1160 }
1161
1162 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) {
1163 // create input type (domain)
1164 int num_args = is_sha3 ? 3 : 2;
1165 int argcnt = num_args;
1166 const Type** fields = TypeTuple::fields(argcnt);
1167 int argp = TypeFunc::Parms;
1168 fields[argp++] = TypePtr::NOTNULL; // buf
1169 fields[argp++] = TypePtr::NOTNULL; // state
1170 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1171 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1172 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1173
1174 // no result type needed
1175 fields = TypeTuple::fields(1);
1176 fields[TypeFunc::Parms+0] = nullptr; // void
1177 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1178 return TypeFunc::make(domain, range);
1179 }
1180
1181 /*
1182 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
1183 */
1184 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) {
1185 // create input type (domain)
1186 int num_args = is_sha3 ? 5 : 4;
1187 int argcnt = num_args;
1188 const Type** fields = TypeTuple::fields(argcnt);
1189 int argp = TypeFunc::Parms;
1190 fields[argp++] = TypePtr::NOTNULL; // buf
1191 fields[argp++] = TypePtr::NOTNULL; // state
1192 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1193 fields[argp++] = TypeInt::INT; // ofs
1194 fields[argp++] = TypeInt::INT; // limit
1195 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1196 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1197
1198 // returning ofs (int)
1199 fields = TypeTuple::fields(1);
1200 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1201 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1202 return TypeFunc::make(domain, range);
1203 }
1204
1205 // SHAKE128Parallel doubleKeccak function
1206 static const TypeFunc* make_double_keccak_Type() {
1207 int argcnt = 2;
1208
1209 const Type** fields = TypeTuple::fields(argcnt);
1210 int argp = TypeFunc::Parms;
1211 fields[argp++] = TypePtr::NOTNULL; // status0
1212 fields[argp++] = TypePtr::NOTNULL; // status1
1213
1214 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1215 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1216
1217 // result type needed
1218 fields = TypeTuple::fields(1);
1219 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1220 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1221 return TypeFunc::make(domain, range);
1222 }
1223
1224 static const TypeFunc* make_multiplyToLen_Type() {
1225 // create input type (domain)
1226 int num_args = 5;
1227 int argcnt = num_args;
1228 const Type** fields = TypeTuple::fields(argcnt);
1229 int argp = TypeFunc::Parms;
1230 fields[argp++] = TypePtr::NOTNULL; // x
1231 fields[argp++] = TypeInt::INT; // xlen
1232 fields[argp++] = TypePtr::NOTNULL; // y
1233 fields[argp++] = TypeInt::INT; // ylen
1234 fields[argp++] = TypePtr::NOTNULL; // z
1235 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1236 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1237
1238 // no result type needed
1239 fields = TypeTuple::fields(1);
1240 fields[TypeFunc::Parms+0] = nullptr;
1241 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1242 return TypeFunc::make(domain, range);
1243 }
1244
1245 static const TypeFunc* make_squareToLen_Type() {
1246 // create input type (domain)
1247 int num_args = 4;
1248 int argcnt = num_args;
1249 const Type** fields = TypeTuple::fields(argcnt);
1250 int argp = TypeFunc::Parms;
1251 fields[argp++] = TypePtr::NOTNULL; // x
1252 fields[argp++] = TypeInt::INT; // len
1253 fields[argp++] = TypePtr::NOTNULL; // z
1254 fields[argp++] = TypeInt::INT; // zlen
1255 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1256 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1257
1258 // no result type needed
1259 fields = TypeTuple::fields(1);
1260 fields[TypeFunc::Parms+0] = nullptr;
1261 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1262 return TypeFunc::make(domain, range);
1263 }
1264
1265 static const TypeFunc* make_mulAdd_Type() {
1266 // create input type (domain)
1267 int num_args = 5;
1268 int argcnt = num_args;
1269 const Type** fields = TypeTuple::fields(argcnt);
1270 int argp = TypeFunc::Parms;
1271 fields[argp++] = TypePtr::NOTNULL; // out
1272 fields[argp++] = TypePtr::NOTNULL; // in
1273 fields[argp++] = TypeInt::INT; // offset
1274 fields[argp++] = TypeInt::INT; // len
1275 fields[argp++] = TypeInt::INT; // k
1276 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1277 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1278
1279 // returning carry (int)
1280 fields = TypeTuple::fields(1);
1281 fields[TypeFunc::Parms+0] = TypeInt::INT;
1282 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1283 return TypeFunc::make(domain, range);
1284 }
1285
1286 static const TypeFunc* make_montgomeryMultiply_Type() {
1287 // create input type (domain)
1288 int num_args = 7;
1289 int argcnt = num_args;
1290 const Type** fields = TypeTuple::fields(argcnt);
1291 int argp = TypeFunc::Parms;
1292 fields[argp++] = TypePtr::NOTNULL; // a
1293 fields[argp++] = TypePtr::NOTNULL; // b
1294 fields[argp++] = TypePtr::NOTNULL; // n
1295 fields[argp++] = TypeInt::INT; // len
1296 fields[argp++] = TypeLong::LONG; // inv
1297 fields[argp++] = Type::HALF;
1298 fields[argp++] = TypePtr::NOTNULL; // result
1299 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1300 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1301
1302 // result type needed
1303 fields = TypeTuple::fields(1);
1304 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1305
1306 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1307 return TypeFunc::make(domain, range);
1308 }
1309
1310 static const TypeFunc* make_montgomerySquare_Type() {
1311 // create input type (domain)
1312 int num_args = 6;
1313 int argcnt = num_args;
1314 const Type** fields = TypeTuple::fields(argcnt);
1315 int argp = TypeFunc::Parms;
1316 fields[argp++] = TypePtr::NOTNULL; // a
1317 fields[argp++] = TypePtr::NOTNULL; // n
1318 fields[argp++] = TypeInt::INT; // len
1319 fields[argp++] = TypeLong::LONG; // inv
1320 fields[argp++] = Type::HALF;
1321 fields[argp++] = TypePtr::NOTNULL; // result
1322 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1323 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1324
1325 // result type needed
1326 fields = TypeTuple::fields(1);
1327 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1328
1329 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1330 return TypeFunc::make(domain, range);
1331 }
1332
1333 static const TypeFunc* make_bigIntegerShift_Type() {
1334 int argcnt = 5;
1335 const Type** fields = TypeTuple::fields(argcnt);
1336 int argp = TypeFunc::Parms;
1337 fields[argp++] = TypePtr::NOTNULL; // newArr
1338 fields[argp++] = TypePtr::NOTNULL; // oldArr
1339 fields[argp++] = TypeInt::INT; // newIdx
1340 fields[argp++] = TypeInt::INT; // shiftCount
1341 fields[argp++] = TypeInt::INT; // numIter
1342 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1343 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1344
1345 // no result type needed
1346 fields = TypeTuple::fields(1);
1347 fields[TypeFunc::Parms + 0] = nullptr;
1348 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1349 return TypeFunc::make(domain, range);
1350 }
1351
1352 static const TypeFunc* make_vectorizedMismatch_Type() {
1353 // create input type (domain)
1354 int num_args = 4;
1355 int argcnt = num_args;
1356 const Type** fields = TypeTuple::fields(argcnt);
1357 int argp = TypeFunc::Parms;
1358 fields[argp++] = TypePtr::NOTNULL; // obja
1359 fields[argp++] = TypePtr::NOTNULL; // objb
1360 fields[argp++] = TypeInt::INT; // length, number of elements
1361 fields[argp++] = TypeInt::INT; // log2scale, element size
1362 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1363 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1364
1365 //return mismatch index (int)
1366 fields = TypeTuple::fields(1);
1367 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1368 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1369 return TypeFunc::make(domain, range);
1370 }
1371
1372 static const TypeFunc* make_ghash_processBlocks_Type() {
1373 int argcnt = 4;
1374
1375 const Type** fields = TypeTuple::fields(argcnt);
1376 int argp = TypeFunc::Parms;
1377 fields[argp++] = TypePtr::NOTNULL; // state
1378 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1379 fields[argp++] = TypePtr::NOTNULL; // data
1380 fields[argp++] = TypeInt::INT; // blocks
1381 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1382 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1383
1384 // result type needed
1385 fields = TypeTuple::fields(1);
1386 fields[TypeFunc::Parms+0] = nullptr; // void
1387 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1388 return TypeFunc::make(domain, range);
1389 }
1390
1391 static const TypeFunc* make_chacha20Block_Type() {
1392 int argcnt = 2;
1393
1394 const Type** fields = TypeTuple::fields(argcnt);
1395 int argp = TypeFunc::Parms;
1396 fields[argp++] = TypePtr::NOTNULL; // state
1397 fields[argp++] = TypePtr::NOTNULL; // result
1398
1399 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1400 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1401
1402 // result type needed
1403 fields = TypeTuple::fields(1);
1404 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int
1405 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1406 return TypeFunc::make(domain, range);
1407 }
1408
1409 // Kyber NTT function
1410 static const TypeFunc* make_kyberNtt_Type() {
1411 int argcnt = 2;
1412
1413 const Type** fields = TypeTuple::fields(argcnt);
1414 int argp = TypeFunc::Parms;
1415 fields[argp++] = TypePtr::NOTNULL; // coeffs
1416 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1417
1418 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1419 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1420
1421 // result type needed
1422 fields = TypeTuple::fields(1);
1423 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1424 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1425 return TypeFunc::make(domain, range);
1426 }
1427
1428 // Kyber inverse NTT function
1429 static const TypeFunc* make_kyberInverseNtt_Type() {
1430 int argcnt = 2;
1431
1432 const Type** fields = TypeTuple::fields(argcnt);
1433 int argp = TypeFunc::Parms;
1434 fields[argp++] = TypePtr::NOTNULL; // coeffs
1435 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1436
1437 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1438 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1439
1440 // result type needed
1441 fields = TypeTuple::fields(1);
1442 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1443 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1444 return TypeFunc::make(domain, range);
1445 }
1446
1447 // Kyber NTT multiply function
1448 static const TypeFunc* make_kyberNttMult_Type() {
1449 int argcnt = 4;
1450
1451 const Type** fields = TypeTuple::fields(argcnt);
1452 int argp = TypeFunc::Parms;
1453 fields[argp++] = TypePtr::NOTNULL; // result
1454 fields[argp++] = TypePtr::NOTNULL; // ntta
1455 fields[argp++] = TypePtr::NOTNULL; // nttb
1456 fields[argp++] = TypePtr::NOTNULL; // NTT multiply zetas
1457
1458 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1459 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1460
1461 // result type needed
1462 fields = TypeTuple::fields(1);
1463 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1464 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1465 return TypeFunc::make(domain, range);
1466 }
1467
1468 // Kyber add 2 polynomials function
1469 static const TypeFunc* make_kyberAddPoly_2_Type() {
1470 int argcnt = 3;
1471
1472 const Type** fields = TypeTuple::fields(argcnt);
1473 int argp = TypeFunc::Parms;
1474 fields[argp++] = TypePtr::NOTNULL; // result
1475 fields[argp++] = TypePtr::NOTNULL; // a
1476 fields[argp++] = TypePtr::NOTNULL; // b
1477
1478 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1479 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1480
1481 // result type needed
1482 fields = TypeTuple::fields(1);
1483 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1484 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1485 return TypeFunc::make(domain, range);
1486 }
1487
1488
1489 // Kyber add 3 polynomials function
1490 static const TypeFunc* make_kyberAddPoly_3_Type() {
1491 int argcnt = 4;
1492
1493 const Type** fields = TypeTuple::fields(argcnt);
1494 int argp = TypeFunc::Parms;
1495 fields[argp++] = TypePtr::NOTNULL; // result
1496 fields[argp++] = TypePtr::NOTNULL; // a
1497 fields[argp++] = TypePtr::NOTNULL; // b
1498 fields[argp++] = TypePtr::NOTNULL; // c
1499
1500 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1501 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1502
1503 // result type needed
1504 fields = TypeTuple::fields(1);
1505 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1506 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1507 return TypeFunc::make(domain, range);
1508 }
1509
1510
1511 // Kyber XOF output parsing into polynomial coefficients candidates
1512 // or decompress(12,...) function
1513 static const TypeFunc* make_kyber12To16_Type() {
1514 int argcnt = 4;
1515
1516 const Type** fields = TypeTuple::fields(argcnt);
1517 int argp = TypeFunc::Parms;
1518 fields[argp++] = TypePtr::NOTNULL; // condensed
1519 fields[argp++] = TypeInt::INT; // condensedOffs
1520 fields[argp++] = TypePtr::NOTNULL; // parsed
1521 fields[argp++] = TypeInt::INT; // parsedLength
1522
1523 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1524 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1525
1526 // result type needed
1527 fields = TypeTuple::fields(1);
1528 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1529 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1530 return TypeFunc::make(domain, range);
1531 }
1532
1533 // Kyber Barrett reduce function
1534 static const TypeFunc* make_kyberBarrettReduce_Type() {
1535 int argcnt = 1;
1536
1537 const Type** fields = TypeTuple::fields(argcnt);
1538 int argp = TypeFunc::Parms;
1539 fields[argp++] = TypePtr::NOTNULL; // coeffs
1540
1541 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1542 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1543
1544 // result type needed
1545 fields = TypeTuple::fields(1);
1546 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1547 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1548 return TypeFunc::make(domain, range);
1549 }
1550
1551 // Dilithium NTT function except for the final "normalization" to |coeff| < Q
1552 static const TypeFunc* make_dilithiumAlmostNtt_Type() {
1553 int argcnt = 2;
1554
1555 const Type** fields = TypeTuple::fields(argcnt);
1556 int argp = TypeFunc::Parms;
1557 fields[argp++] = TypePtr::NOTNULL; // coeffs
1558 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1559
1560 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1561 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1562
1563 // result type needed
1564 fields = TypeTuple::fields(1);
1565 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1566 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1567 return TypeFunc::make(domain, range);
1568 }
1569
1570 // Dilithium inverse NTT function except the final mod Q division by 2^256
1571 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() {
1572 int argcnt = 2;
1573
1574 const Type** fields = TypeTuple::fields(argcnt);
1575 int argp = TypeFunc::Parms;
1576 fields[argp++] = TypePtr::NOTNULL; // coeffs
1577 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1578
1579 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1580 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1581
1582 // result type needed
1583 fields = TypeTuple::fields(1);
1584 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1585 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1586 return TypeFunc::make(domain, range);
1587 }
1588
1589 // Dilithium NTT multiply function
1590 static const TypeFunc* make_dilithiumNttMult_Type() {
1591 int argcnt = 3;
1592
1593 const Type** fields = TypeTuple::fields(argcnt);
1594 int argp = TypeFunc::Parms;
1595 fields[argp++] = TypePtr::NOTNULL; // result
1596 fields[argp++] = TypePtr::NOTNULL; // ntta
1597 fields[argp++] = TypePtr::NOTNULL; // nttb
1598
1599 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1600 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1601
1602 // result type needed
1603 fields = TypeTuple::fields(1);
1604 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1605 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1606 return TypeFunc::make(domain, range);
1607 }
1608
1609 // Dilithium Montgomery multiply a polynome coefficient array by a constant
1610 static const TypeFunc* make_dilithiumMontMulByConstant_Type() {
1611 int argcnt = 2;
1612
1613 const Type** fields = TypeTuple::fields(argcnt);
1614 int argp = TypeFunc::Parms;
1615 fields[argp++] = TypePtr::NOTNULL; // coeffs
1616 fields[argp++] = TypeInt::INT; // constant multiplier
1617
1618 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1619 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1620
1621 // result type needed
1622 fields = TypeTuple::fields(1);
1623 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1624 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1625 return TypeFunc::make(domain, range);
1626 }
1627
1628 // Dilithium decompose polynomial
1629 static const TypeFunc* make_dilithiumDecomposePoly_Type() {
1630 int argcnt = 5;
1631
1632 const Type** fields = TypeTuple::fields(argcnt);
1633 int argp = TypeFunc::Parms;
1634 fields[argp++] = TypePtr::NOTNULL; // input
1635 fields[argp++] = TypePtr::NOTNULL; // lowPart
1636 fields[argp++] = TypePtr::NOTNULL; // highPart
1637 fields[argp++] = TypeInt::INT; // 2 * gamma2
1638 fields[argp++] = TypeInt::INT; // multiplier
1639
1640 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1641 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1642
1643 // result type needed
1644 fields = TypeTuple::fields(1);
1645 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1646 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1647 return TypeFunc::make(domain, range);
1648 }
1649
1650 static const TypeFunc* make_base64_encodeBlock_Type() {
1651 int argcnt = 6;
1652
1653 const Type** fields = TypeTuple::fields(argcnt);
1654 int argp = TypeFunc::Parms;
1655 fields[argp++] = TypePtr::NOTNULL; // src array
1656 fields[argp++] = TypeInt::INT; // offset
1657 fields[argp++] = TypeInt::INT; // length
1658 fields[argp++] = TypePtr::NOTNULL; // dest array
1659 fields[argp++] = TypeInt::INT; // dp
1660 fields[argp++] = TypeInt::BOOL; // isURL
1661 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1662 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1663
1664 // result type needed
1665 fields = TypeTuple::fields(1);
1666 fields[TypeFunc::Parms + 0] = nullptr; // void
1667 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1668 return TypeFunc::make(domain, range);
1669 }
1670
1671 static const TypeFunc* make_string_IndexOf_Type() {
1672 int argcnt = 4;
1673
1674 const Type** fields = TypeTuple::fields(argcnt);
1675 int argp = TypeFunc::Parms;
1676 fields[argp++] = TypePtr::NOTNULL; // haystack array
1677 fields[argp++] = TypeInt::INT; // haystack length
1678 fields[argp++] = TypePtr::NOTNULL; // needle array
1679 fields[argp++] = TypeInt::INT; // needle length
1680 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1681 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1682
1683 // result type needed
1684 fields = TypeTuple::fields(1);
1685 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack
1686 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1687 return TypeFunc::make(domain, range);
1688 }
1689
1690 static const TypeFunc* make_base64_decodeBlock_Type() {
1691 int argcnt = 7;
1692
1693 const Type** fields = TypeTuple::fields(argcnt);
1694 int argp = TypeFunc::Parms;
1695 fields[argp++] = TypePtr::NOTNULL; // src array
1696 fields[argp++] = TypeInt::INT; // src offset
1697 fields[argp++] = TypeInt::INT; // src length
1698 fields[argp++] = TypePtr::NOTNULL; // dest array
1699 fields[argp++] = TypeInt::INT; // dest offset
1700 fields[argp++] = TypeInt::BOOL; // isURL
1701 fields[argp++] = TypeInt::BOOL; // isMIME
1702 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1703 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1704
1705 // result type needed
1706 fields = TypeTuple::fields(1);
1707 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst
1708 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1709 return TypeFunc::make(domain, range);
1710 }
1711
1712 static const TypeFunc* make_poly1305_processBlocks_Type() {
1713 int argcnt = 4;
1714
1715 const Type** fields = TypeTuple::fields(argcnt);
1716 int argp = TypeFunc::Parms;
1717 fields[argp++] = TypePtr::NOTNULL; // input array
1718 fields[argp++] = TypeInt::INT; // input length
1719 fields[argp++] = TypePtr::NOTNULL; // accumulator array
1720 fields[argp++] = TypePtr::NOTNULL; // r array
1721 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1722 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1723
1724 // result type needed
1725 fields = TypeTuple::fields(1);
1726 fields[TypeFunc::Parms + 0] = nullptr; // void
1727 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1728 return TypeFunc::make(domain, range);
1729 }
1730
1731 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() {
1732 int argcnt = 3;
1733
1734 const Type** fields = TypeTuple::fields(argcnt);
1735 int argp = TypeFunc::Parms;
1736 fields[argp++] = TypePtr::NOTNULL; // a array
1737 fields[argp++] = TypePtr::NOTNULL; // b array
1738 fields[argp++] = TypePtr::NOTNULL; // r(esult) array
1739 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1740 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1741
1742 // result type needed
1743 fields = TypeTuple::fields(1);
1744 fields[TypeFunc::Parms + 0] = nullptr; // void
1745 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1746 return TypeFunc::make(domain, range);
1747 }
1748
1749 static const TypeFunc* make_intpoly_assign_Type() {
1750 int argcnt = 4;
1751
1752 const Type** fields = TypeTuple::fields(argcnt);
1753 int argp = TypeFunc::Parms;
1754 fields[argp++] = TypeInt::INT; // set flag
1755 fields[argp++] = TypePtr::NOTNULL; // a array (result)
1756 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set)
1757 fields[argp++] = TypeInt::INT; // array length
1758 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1759 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1760
1761 // result type needed
1762 fields = TypeTuple::fields(1);
1763 fields[TypeFunc::Parms + 0] = nullptr; // void
1764 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1765 return TypeFunc::make(domain, range);
1766 }
1767
1768 //------------- Interpreter state for on stack replacement
1769 static const TypeFunc* make_osr_end_Type() {
1770 // create input type (domain)
1771 const Type **fields = TypeTuple::fields(1);
1772 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1773 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1774
1775 // create result type
1776 fields = TypeTuple::fields(1);
1777 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1778 fields[TypeFunc::Parms+0] = nullptr; // void
1779 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1780 return TypeFunc::make(domain, range);
1781 }
1782
1783 //-------------------------------------------------------------------------------------
1784 // register policy
1785
1786 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1787 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1788 switch (register_save_policy[reg]) {
1789 case 'C': return false; //SOC
1790 case 'E': return true ; //SOE
1791 case 'N': return false; //NS
1792 case 'A': return false; //AS
1793 }
1794 ShouldNotReachHere();
1795 return false;
1796 }
1797
1798 //-----------------------------------------------------------------------
1799 // Exceptions
1800 //
1801
1802 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1803
1804 // The method is an entry that is always called by a C++ method not
1805 // directly from compiled code. Compiled code will call the C++ method following.
1806 // We can't allow async exception to be installed during exception processing.
1807 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1808 // The frame we rethrow the exception to might not have been processed by the GC yet.
1809 // The stack watermark barrier takes care of detecting that and ensuring the frame
1810 // has updated oops.
1811 StackWatermarkSet::after_unwind(current);
1812
1813 // Do not confuse exception_oop with pending_exception. The exception_oop
1814 // is only used to pass arguments into the method. Not for general
1815 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1816 // the runtime stubs checks this on exit.
1817 assert(current->exception_oop() != nullptr, "exception oop is found");
1818 address handler_address = nullptr;
1819
1820 Handle exception(current, current->exception_oop());
1821 address pc = current->exception_pc();
1822
1823 // Clear out the exception oop and pc since looking up an
1824 // exception handler can cause class loading, which might throw an
1825 // exception and those fields are expected to be clear during
1826 // normal bytecode execution.
1827 current->clear_exception_oop_and_pc();
1828
1829 LogTarget(Info, exceptions) lt;
1830 if (lt.is_enabled()) {
1831 LogStream ls(lt);
1832 trace_exception(&ls, exception(), pc, "");
1833 }
1834
1835 // for AbortVMOnException flag
1836 Exceptions::debug_check_abort(exception);
1837
1838 #ifdef ASSERT
1839 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1840 // should throw an exception here
1841 ShouldNotReachHere();
1842 }
1843 #endif
1844
1845 // new exception handling: this method is entered only from adapters
1846 // exceptions from compiled java methods are handled in compiled code
1847 // using rethrow node
1848
1849 nm = CodeCache::find_nmethod(pc);
1850 assert(nm != nullptr, "No NMethod found");
1851 if (nm->is_native_method()) {
1852 fatal("Native method should not have path to exception handling");
1853 } else {
1854 // we are switching to old paradigm: search for exception handler in caller_frame
1855 // instead in exception handler of caller_frame.sender()
1856
1857 if (JvmtiExport::can_post_on_exceptions()) {
1858 // "Full-speed catching" is not necessary here,
1859 // since we're notifying the VM on every catch.
1860 // Force deoptimization and the rest of the lookup
1861 // will be fine.
1862 deoptimize_caller_frame(current);
1863 }
1864
1865 // Check the stack guard pages. If enabled, look for handler in this frame;
1866 // otherwise, forcibly unwind the frame.
1867 //
1868 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1869 bool force_unwind = !current->stack_overflow_state()->reguard_stack();
1870 bool deopting = false;
1871 if (nm->is_deopt_pc(pc)) {
1872 deopting = true;
1873 RegisterMap map(current,
1874 RegisterMap::UpdateMap::skip,
1875 RegisterMap::ProcessFrames::include,
1876 RegisterMap::WalkContinuation::skip);
1877 frame deoptee = current->last_frame().sender(&map);
1878 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1879 // Adjust the pc back to the original throwing pc
1880 pc = deoptee.pc();
1881 }
1882
1883 // If we are forcing an unwind because of stack overflow then deopt is
1884 // irrelevant since we are throwing the frame away anyway.
1885
1886 if (deopting && !force_unwind) {
1887 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1888 } else {
1889
1890 handler_address =
1891 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
1892
1893 if (handler_address == nullptr) {
1894 bool recursive_exception = false;
1895 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1896 assert (handler_address != nullptr, "must have compiled handler");
1897 // Update the exception cache only when the unwind was not forced
1898 // and there didn't happen another exception during the computation of the
1899 // compiled exception handler. Checking for exception oop equality is not
1900 // sufficient because some exceptions are pre-allocated and reused.
1901 if (!force_unwind && !recursive_exception) {
1902 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
1903 }
1904 } else {
1905 #ifdef ASSERT
1906 bool recursive_exception = false;
1907 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1908 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
1909 p2i(handler_address), p2i(computed_address));
1910 #endif
1911 }
1912 }
1913
1914 current->set_exception_pc(pc);
1915 current->set_exception_handler_pc(handler_address);
1916
1917 // Check if the exception PC is a MethodHandle call site.
1918 current->set_is_method_handle_return(nm->is_method_handle_return(pc));
1919 }
1920
1921 // Restore correct return pc. Was saved above.
1922 current->set_exception_oop(exception());
1923 return handler_address;
1924
1925 JRT_END
1926
1927 // We are entering here from exception_blob
1928 // If there is a compiled exception handler in this method, we will continue there;
1929 // otherwise we will unwind the stack and continue at the caller of top frame method
1930 // Note we enter without the usual JRT wrapper. We will call a helper routine that
1931 // will do the normal VM entry. We do it this way so that we can see if the nmethod
1932 // we looked up the handler for has been deoptimized in the meantime. If it has been
1933 // we must not use the handler and instead return the deopt blob.
1934 address OptoRuntime::handle_exception_C(JavaThread* current) {
1935 //
1936 // We are in Java not VM and in debug mode we have a NoHandleMark
1937 //
1938 #ifndef PRODUCT
1939 SharedRuntime::_find_handler_ctr++; // find exception handler
1940 #endif
1941 DEBUG_ONLY(NoHandleMark __hm;)
1942 nmethod* nm = nullptr;
1943 address handler_address = nullptr;
1944 {
1945 // Enter the VM
1946
1947 ResetNoHandleMark rnhm;
1948 handler_address = handle_exception_C_helper(current, nm);
1949 }
1950
1951 // Back in java: Use no oops, DON'T safepoint
1952
1953 // Now check to see if the handler we are returning is in a now
1954 // deoptimized frame
1955
1956 if (nm != nullptr) {
1957 RegisterMap map(current,
1958 RegisterMap::UpdateMap::skip,
1959 RegisterMap::ProcessFrames::skip,
1960 RegisterMap::WalkContinuation::skip);
1961 frame caller = current->last_frame().sender(&map);
1962 #ifdef ASSERT
1963 assert(caller.is_compiled_frame(), "must be");
1964 #endif // ASSERT
1965 if (caller.is_deoptimized_frame()) {
1966 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1967 }
1968 }
1969 return handler_address;
1970 }
1971
1972 //------------------------------rethrow----------------------------------------
1973 // We get here after compiled code has executed a 'RethrowNode'. The callee
1974 // is either throwing or rethrowing an exception. The callee-save registers
1975 // have been restored, synchronized objects have been unlocked and the callee
1976 // stack frame has been removed. The return address was passed in.
1977 // Exception oop is passed as the 1st argument. This routine is then called
1978 // from the stub. On exit, we know where to jump in the caller's code.
1979 // After this C code exits, the stub will pop his frame and end in a jump
1980 // (instead of a return). We enter the caller's default handler.
1981 //
1982 // This must be JRT_LEAF:
1983 // - caller will not change its state as we cannot block on exit,
1984 // therefore raw_exception_handler_for_return_address is all it takes
1985 // to handle deoptimized blobs
1986 //
1987 // However, there needs to be a safepoint check in the middle! So compiled
1988 // safepoints are completely watertight.
1989 //
1990 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier.
1991 //
1992 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
1993 //
1994 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
1995 // ret_pc will have been loaded from the stack, so for AArch64 will be signed.
1996 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc));
1997
1998 #ifndef PRODUCT
1999 SharedRuntime::_rethrow_ctr++; // count rethrows
2000 #endif
2001 assert (exception != nullptr, "should have thrown a NullPointerException");
2002 #ifdef ASSERT
2003 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
2004 // should throw an exception here
2005 ShouldNotReachHere();
2006 }
2007 #endif
2008
2009 thread->set_vm_result_oop(exception);
2010 // Frame not compiled (handles deoptimization blob)
2011 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
2012 }
2013
2014 static const TypeFunc* make_rethrow_Type() {
2015 // create input type (domain)
2016 const Type **fields = TypeTuple::fields(1);
2017 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2018 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2019
2020 // create result type (range)
2021 fields = TypeTuple::fields(1);
2022 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2023 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
2024
2025 return TypeFunc::make(domain, range);
2026 }
2027
2028
2029 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
2030 // Deoptimize the caller before continuing, as the compiled
2031 // exception handler table may not be valid.
2032 if (DeoptimizeOnAllocationException && doit) {
2033 deoptimize_caller_frame(thread);
2034 }
2035 }
2036
2037 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
2038 // Called from within the owner thread, so no need for safepoint
2039 RegisterMap reg_map(thread,
2040 RegisterMap::UpdateMap::include,
2041 RegisterMap::ProcessFrames::include,
2042 RegisterMap::WalkContinuation::skip);
2043 frame stub_frame = thread->last_frame();
2044 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2045 frame caller_frame = stub_frame.sender(®_map);
2046
2047 // Deoptimize the caller frame.
2048 Deoptimization::deoptimize_frame(thread, caller_frame.id());
2049 }
2050
2051
2052 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
2053 // Called from within the owner thread, so no need for safepoint
2054 RegisterMap reg_map(thread,
2055 RegisterMap::UpdateMap::include,
2056 RegisterMap::ProcessFrames::include,
2057 RegisterMap::WalkContinuation::skip);
2058 frame stub_frame = thread->last_frame();
2059 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2060 frame caller_frame = stub_frame.sender(®_map);
2061 return caller_frame.is_deoptimized_frame();
2062 }
2063
2064 static const TypeFunc* make_register_finalizer_Type() {
2065 // create input type (domain)
2066 const Type **fields = TypeTuple::fields(1);
2067 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
2068 // // The JavaThread* is passed to each routine as the last argument
2069 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
2070 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2071
2072 // create result type (range)
2073 fields = TypeTuple::fields(0);
2074
2075 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2076
2077 return TypeFunc::make(domain,range);
2078 }
2079
2080 #if INCLUDE_JFR
2081 static const TypeFunc* make_class_id_load_barrier_Type() {
2082 // create input type (domain)
2083 const Type **fields = TypeTuple::fields(1);
2084 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
2085 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
2086
2087 // create result type (range)
2088 fields = TypeTuple::fields(0);
2089
2090 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
2091
2092 return TypeFunc::make(domain,range);
2093 }
2094 #endif // INCLUDE_JFR
2095
2096 //-----------------------------------------------------------------------------
2097 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
2098 // create input type (domain)
2099 const Type **fields = TypeTuple::fields(2);
2100 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2101 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
2102 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2103
2104 // create result type (range)
2105 fields = TypeTuple::fields(0);
2106
2107 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2108
2109 return TypeFunc::make(domain,range);
2110 }
2111
2112 static const TypeFunc* make_dtrace_object_alloc_Type() {
2113 // create input type (domain)
2114 const Type **fields = TypeTuple::fields(2);
2115 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2116 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
2117
2118 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2119
2120 // create result type (range)
2121 fields = TypeTuple::fields(0);
2122
2123 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2124
2125 return TypeFunc::make(domain,range);
2126 }
2127
2128 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
2129 assert(oopDesc::is_oop(obj), "must be a valid oop");
2130 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
2131 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
2132 JRT_END
2133
2134 //-----------------------------------------------------------------------------
2135
2136 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
2137
2138 //
2139 // dump the collected NamedCounters.
2140 //
2141 void OptoRuntime::print_named_counters() {
2142 int total_lock_count = 0;
2143 int eliminated_lock_count = 0;
2144
2145 NamedCounter* c = _named_counters;
2146 while (c) {
2147 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2148 int count = c->count();
2149 if (count > 0) {
2150 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2151 if (Verbose) {
2152 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2153 }
2154 total_lock_count += count;
2155 if (eliminated) {
2156 eliminated_lock_count += count;
2157 }
2158 }
2159 }
2160 c = c->next();
2161 }
2162 if (total_lock_count > 0) {
2163 tty->print_cr("dynamic locks: %d", total_lock_count);
2164 if (eliminated_lock_count) {
2165 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
2166 (int)(eliminated_lock_count * 100.0 / total_lock_count));
2167 }
2168 }
2169 }
2170
2171 //
2172 // Allocate a new NamedCounter. The JVMState is used to generate the
2173 // name which consists of method@line for the inlining tree.
2174 //
2175
2176 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
2177 int max_depth = youngest_jvms->depth();
2178
2179 // Visit scopes from youngest to oldest.
2180 bool first = true;
2181 stringStream st;
2182 for (int depth = max_depth; depth >= 1; depth--) {
2183 JVMState* jvms = youngest_jvms->of_depth(depth);
2184 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
2185 if (!first) {
2186 st.print(" ");
2187 } else {
2188 first = false;
2189 }
2190 int bci = jvms->bci();
2191 if (bci < 0) bci = 0;
2192 if (m != nullptr) {
2193 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
2194 } else {
2195 st.print("no method");
2196 }
2197 st.print("@%d", bci);
2198 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
2199 }
2200 NamedCounter* c = new NamedCounter(st.freeze(), tag);
2201
2202 // atomically add the new counter to the head of the list. We only
2203 // add counters so this is safe.
2204 NamedCounter* head;
2205 do {
2206 c->set_next(nullptr);
2207 head = _named_counters;
2208 c->set_next(head);
2209 } while (Atomic::cmpxchg(&_named_counters, head, c) != head);
2210 return c;
2211 }
2212
2213 void OptoRuntime::initialize_types() {
2214 _new_instance_Type = make_new_instance_Type();
2215 _new_array_Type = make_new_array_Type();
2216 _multianewarray2_Type = multianewarray_Type(2);
2217 _multianewarray3_Type = multianewarray_Type(3);
2218 _multianewarray4_Type = multianewarray_Type(4);
2219 _multianewarray5_Type = multianewarray_Type(5);
2220 _multianewarrayN_Type = make_multianewarrayN_Type();
2221 _complete_monitor_enter_Type = make_complete_monitor_enter_Type();
2222 _complete_monitor_exit_Type = make_complete_monitor_exit_Type();
2223 _monitor_notify_Type = make_monitor_notify_Type();
2224 _uncommon_trap_Type = make_uncommon_trap_Type();
2225 _athrow_Type = make_athrow_Type();
2226 _rethrow_Type = make_rethrow_Type();
2227 _Math_D_D_Type = make_Math_D_D_Type();
2228 _Math_DD_D_Type = make_Math_DD_D_Type();
2229 _modf_Type = make_modf_Type();
2230 _l2f_Type = make_l2f_Type();
2231 _void_long_Type = make_void_long_Type();
2232 _void_void_Type = make_void_void_Type();
2233 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type();
2234 _flush_windows_Type = make_flush_windows_Type();
2235 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast);
2236 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast);
2237 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic);
2238 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow);
2239 _unsafe_setmemory_Type = make_setmemory_Type();
2240 _array_fill_Type = make_array_fill_Type();
2241 _array_sort_Type = make_array_sort_Type();
2242 _array_partition_Type = make_array_partition_Type();
2243 _aescrypt_block_Type = make_aescrypt_block_Type();
2244 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type();
2245 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type();
2246 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type();
2247 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type();
2248 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true);
2249 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);;
2250 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true);
2251 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false);
2252 _double_keccak_Type = make_double_keccak_Type();
2253 _multiplyToLen_Type = make_multiplyToLen_Type();
2254 _montgomeryMultiply_Type = make_montgomeryMultiply_Type();
2255 _montgomerySquare_Type = make_montgomerySquare_Type();
2256 _squareToLen_Type = make_squareToLen_Type();
2257 _mulAdd_Type = make_mulAdd_Type();
2258 _bigIntegerShift_Type = make_bigIntegerShift_Type();
2259 _vectorizedMismatch_Type = make_vectorizedMismatch_Type();
2260 _ghash_processBlocks_Type = make_ghash_processBlocks_Type();
2261 _chacha20Block_Type = make_chacha20Block_Type();
2262 _kyberNtt_Type = make_kyberNtt_Type();
2263 _kyberInverseNtt_Type = make_kyberInverseNtt_Type();
2264 _kyberNttMult_Type = make_kyberNttMult_Type();
2265 _kyberAddPoly_2_Type = make_kyberAddPoly_2_Type();
2266 _kyberAddPoly_3_Type = make_kyberAddPoly_3_Type();
2267 _kyber12To16_Type = make_kyber12To16_Type();
2268 _kyberBarrettReduce_Type = make_kyberBarrettReduce_Type();
2269 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type();
2270 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type();
2271 _dilithiumNttMult_Type = make_dilithiumNttMult_Type();
2272 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type();
2273 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type();
2274 _base64_encodeBlock_Type = make_base64_encodeBlock_Type();
2275 _base64_decodeBlock_Type = make_base64_decodeBlock_Type();
2276 _string_IndexOf_Type = make_string_IndexOf_Type();
2277 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type();
2278 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type();
2279 _intpoly_assign_Type = make_intpoly_assign_Type();
2280 _updateBytesCRC32_Type = make_updateBytesCRC32_Type();
2281 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type();
2282 _updateBytesAdler32_Type = make_updateBytesAdler32_Type();
2283 _osr_end_Type = make_osr_end_Type();
2284 _register_finalizer_Type = make_register_finalizer_Type();
2285 JFR_ONLY(
2286 _class_id_load_barrier_Type = make_class_id_load_barrier_Type();
2287 )
2288 #if INCLUDE_JVMTI
2289 _notify_jvmti_vthread_Type = make_notify_jvmti_vthread_Type();
2290 #endif // INCLUDE_JVMTI
2291 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type();
2292 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type();
2293 }
2294
2295 int trace_exception_counter = 0;
2296 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2297 trace_exception_counter++;
2298 stringStream tempst;
2299
2300 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2301 exception_oop->print_value_on(&tempst);
2302 tempst.print(" in ");
2303 CodeBlob* blob = CodeCache::find_blob(exception_pc);
2304 if (blob->is_nmethod()) {
2305 blob->as_nmethod()->method()->print_value_on(&tempst);
2306 } else if (blob->is_runtime_stub()) {
2307 tempst.print("<runtime-stub>");
2308 } else {
2309 tempst.print("<unknown>");
2310 }
2311 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
2312 tempst.print("]");
2313
2314 st->print_raw_cr(tempst.freeze());
2315 }